code stringlengths 101 5.91M |
|---|
def register_Ns3Channel_methods(root_module, cls):
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetId', 'uint32_t', [], is_const=True)
cls.add_method('GetNDevices', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
return |
class MLP(nn.Module):
def __init__(self, input_dim=2048, embed_dim=768):
super().__init__()
self.proj = nn.Linear(input_dim, embed_dim)
def forward(self, x):
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x |
def build_treebank(trees, transition_scheme=TransitionScheme.TOP_DOWN_UNARY, reverse=False):
if reverse:
return [build_sequence(tree.reverse(), transition_scheme) for tree in trees]
else:
return [build_sequence(tree, transition_scheme) for tree in trees] |
def transform_tree(tree, fn, iterable_type=tuple):
if is_iterable(tree):
if isinstance(tree, dict):
res = tree.__new__(type(tree))
res.__init__(((k, transform_tree(child, fn)) for (k, child) in iteritems(tree)))
return res
elif isinstance(tree, tuple):
if hasattr(tree, '_asdict'):
res = tree.__new__(type(tree), **transform_tree(tree._asdict(), fn))
else:
res = tree.__new__(type(tree), (transform_tree(child, fn) for child in tree))
return res
elif isinstance(tree, typing.Sequence):
res = tree.__new__(type(tree))
res.__init__((transform_tree(child, fn) for child in tree))
return res
else:
return iterable_type((transform_tree(child, fn) for child in tree))
else:
return fn(tree) |
def test_mirror():
STATE_LEN = 1000
FIDELITY = 0.98
LS_FREQ = .0
MEAN = 0.1
tl = Timeline()
ls = LightSource('ls', tl, frequency=LS_FREQ, mean_photon_num=MEAN)
sender = EmittingNode('sender', tl, ls)
receiver = Receiver('receiver', tl)
mr = Mirror('mr', tl, fidelity=FIDELITY, destination=receiver.name)
mid = MiddleNode('mid', tl, mr)
sender.set_seed(0)
mid.set_seed(1)
receiver.set_seed(2)
assert (mid.mirror.fidelity == FIDELITY)
qc1 = QuantumChannel('qc1', tl, distance=100000.0, attenuation=0)
qc2 = QuantumChannel('qc2', tl, distance=100000.0, attenuation=0)
qc1.set_ends(sender, mid.name)
qc2.set_ends(mid, receiver.name)
state_list = []
rng = mid.get_generator()
for _ in range(STATE_LEN):
basis = rng.integers(2)
bit = rng.integers(2)
state_list.append(polarization['bases'][basis][bit])
tl.init()
sender.light_source.emit(state_list)
tl.run()
assert (abs(((len(receiver.log) / STATE_LEN) - (MEAN * FIDELITY))) < 0.1)
for (time, src, qubit) in receiver.log:
index = int(qubit.name)
assert (state_list[index] == qubit.quantum_state.state)
assert (time == (((index * (.0 / LS_FREQ)) + qc1.delay) + qc2.delay)) |
class ToggleObjectAction(BaseAction):
valid_actions = {'ToggleObjectOn', 'ToggleObjectOff'}
def get_reward(self, state, prev_state, expert_plan, goal_idx):
if (state.metadata['lastAction'] not in self.valid_actions):
(reward, done) = (self.rewards['invalid_action'], False)
return (reward, done)
subgoal = expert_plan[goal_idx]['planner_action']
(reward, done) = (self.rewards['neutral'], False)
target_toggle = get_object(subgoal['objectId'], state.metadata)
if (target_toggle is not None):
is_target_toggled = target_toggle['isToggled']
(reward, done) = ((self.rewards['positive'], True) if is_target_toggled else (self.rewards['negative'], False))
return (reward, done) |
def main(_):
vocab = Vocabulary.from_file(os.path.join(FLAGS.datadir, '1b_word_vocab.txt'))
dataset = Dataset(vocab, os.path.join(FLAGS.datadir, 'training-monolingual.tokenized.shuffled/*'))
single_gpu_graph = tf.Graph()
with single_gpu_graph.as_default():
with tf.variable_scope('model'):
model = language_model_graph.build_model()
def run(sess, num_workers, worker_id, num_replicas_per_worker):
state_c = []
state_h = []
if (len(state_c) == 0):
state_c.extend([np.zeros([FLAGS.batch_size, model.state_size], dtype=np.float32) for _ in range(num_replicas_per_worker)])
state_h.extend([np.zeros([FLAGS.batch_size, model.projected_size], dtype=np.float32) for _ in range(num_replicas_per_worker)])
prev_global_step = sess.run(model.global_step)[0]
prev_time = time.time()
data_iterator = dataset.iterate_forever((FLAGS.batch_size * num_replicas_per_worker), FLAGS.num_steps, num_workers, worker_id)
fetches = {'global_step': model.global_step, 'loss': model.loss, 'train_op': model.train_op, 'final_state_c': model.final_state_c, 'final_state_h': model.final_state_h}
for local_step in range(FLAGS.max_steps):
if FLAGS.use_synthetic:
x = np.random.randint(low=0, high=model.vocab_size, size=((FLAGS.batch_size * num_replicas_per_worker), FLAGS.num_steps))
y = np.random.randint(low=0, high=model.vocab_size, size=((FLAGS.batch_size * num_replicas_per_worker), FLAGS.num_steps))
w = np.ones(((FLAGS.batch_size * num_replicas_per_worker), FLAGS.num_steps))
else:
(x, y, w) = next(data_iterator)
feeds = {}
feeds[model.x] = np.split(x, num_replicas_per_worker)
feeds[model.y] = np.split(y, num_replicas_per_worker)
feeds[model.w] = np.split(w, num_replicas_per_worker)
feeds[model.initial_state_c] = state_c
feeds[model.initial_state_h] = state_h
fetched = sess.run(fetches, feeds)
state_c = fetched['final_state_c']
state_h = fetched['final_state_h']
if ((local_step % FLAGS.log_frequency) == 0):
cur_time = time.time()
elapsed_time = (cur_time - prev_time)
num_words = (FLAGS.batch_size * FLAGS.num_steps)
wps = (((fetched['global_step'][0] - prev_global_step) * num_words) / elapsed_time)
prev_global_step = fetched['global_step'][0]
parallax.log.info(('Iteration %d, time = %.2fs, wps = %.0f, train loss = %.4f' % (fetched['global_step'][0], (cur_time - prev_time), wps, fetched['loss'][0])))
prev_time = cur_time
(sess, num_workers, worker_id, num_replicas_per_worker) = parallax.parallel_run(single_gpu_graph, FLAGS.resource_info_file, sync=FLAGS.sync, parallax_config=parallax_config.build_config())
run(sess, num_workers, worker_id, num_replicas_per_worker) |
class Conv_Block(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, num_conv_layers=3, dilation_rate=2):
super(Conv_Block, self).__init__()
self.num_conv_layers = num_conv_layers
self.input_dim = in_channels
self.output_dim = out_channels
ops = []
for i in range(self.num_conv_layers):
ops.append(nn.Conv2d(in_channels, in_channels, dilation=dilation_rate, kernel_size=kernel_size, bias=False, padding='same', padding_mode='reflect'))
ops.append(nn.BatchNorm2d(in_channels))
ops.append(nn.ReLU())
ops.append(nn.Conv2d(in_channels, out_channels, dilation=dilation_rate, kernel_size=kernel_size, bias=True, padding='same', padding_mode='reflect'))
self.seq = nn.Sequential(*ops)
def forward(self, input_tensor):
out = self.seq(input_tensor)
return out |
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = (len(samples_range) * len(features_range))
for (i_s, n_samples) in enumerate(samples_range):
for (i_f, n_features) in enumerate(features_range):
it += 1
n_informative = (n_features // 10)
print('')
print(('Iteration %03d of %03d' % (it, max_it)))
print('')
dataset_kwargs = {'n_samples': 1, 'n_components': n_features, 'n_features': n_samples, 'n_nonzero_coefs': n_informative, 'random_state': 0}
print(('n_samples: %d' % n_samples))
print(('n_features: %d' % n_features))
(y, X, _) = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X.T)
gc.collect()
print('benchmarking lars_path (with Gram):', end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, max_iter=n_informative)
delta = (time() - tstart)
print(('%0.3fs' % delta))
lars_gram[(i_f, i_s)] = delta
gc.collect()
print('benchmarking lars_path (without Gram):', end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = (time() - tstart)
print(('%0.3fs' % delta))
lars[(i_f, i_s)] = delta
gc.collect()
print('benchmarking orthogonal_mp (with Gram):', end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_informative)
delta = (time() - tstart)
print(('%0.3fs' % delta))
omp_gram[(i_f, i_s)] = delta
gc.collect()
print('benchmarking orthogonal_mp (without Gram):', end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False, n_nonzero_coefs=n_informative)
delta = (time() - tstart)
print(('%0.3fs' % delta))
omp[(i_f, i_s)] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results |
def get_gradnorm(optimizer, group=0):
norms = [torch.norm(p.grad).item() for p in optimizer.param_groups[group]['params']]
gradnorm = (np.mean(norms) if norms else 0)
return gradnorm |
_properties
class Stream(Data):
offset = ListProperty(element_type=symbolic.pystr_to_symbolic)
buffer_size = SymbolicProperty(desc='Size of internal buffer.', default=0)
def __init__(self, dtype, buffer_size, shape=None, transient=False, storage=dtypes.StorageType.Default, location=None, offset=None, lifetime=dtypes.AllocationLifetime.Scope, debuginfo=None):
if (shape is None):
shape = (1,)
self.buffer_size = buffer_size
if (offset is not None):
if (len(offset) != len(shape)):
raise TypeError('Offset must be the same size as shape')
self.offset = cp.copy(offset)
else:
self.offset = ([0] * len(shape))
super(Stream, self).__init__(dtype, shape, transient, storage, location, lifetime, debuginfo)
def to_json(self):
attrs = serialize.all_properties_to_json(self)
retdict = {'type': type(self).__name__, 'attributes': attrs}
return retdict
def from_json(cls, json_obj, context=None):
ret = cls(dtypes.int8, 1)
serialize.set_properties_from_json(ret, json_obj, context=context)
return ret
def __repr__(self):
return ('%s (dtype=%s, shape=%s)' % (type(self).__name__, self.dtype, self.shape))
def total_size(self):
return _prod(self.shape)
def strides(self):
return [_prod(self.shape[(i + 1):]) for i in range(len(self.shape))]
def start_offset(self):
return 0
def optional(self) -> bool:
return False
def may_alias(self) -> bool:
return False
def clone(self):
return type(self)(self.dtype, self.buffer_size, self.shape, self.transient, self.storage, self.location, self.offset, self.lifetime, self.debuginfo)
def is_equivalent(self, other):
if (not isinstance(other, type(self))):
return False
if (self.dtype != other.dtype):
return False
if (len(self.shape) != len(other.shape)):
return False
for (dim, otherdim) in zip(self.shape, other.shape):
if (dim != otherdim):
return False
return True
def as_arg(self, with_types=True, for_call=False, name=None):
if ((not with_types) or for_call):
return name
if (self.storage in [dtypes.StorageType.GPU_Global, dtypes.StorageType.GPU_Shared]):
return ('dace::GPUStream<%s, %s> %s' % (str(self.dtype.ctype), ('true' if sp.log(self.buffer_size, 2).is_Integer else 'false'), name))
return ('dace::Stream<%s> %s' % (str(self.dtype.ctype), name))
def sizes(self):
return [(d.name if isinstance(d, symbolic.symbol) else str(d)) for d in self.shape]
def size_string(self):
return ' * '.join([cppunparse.pyexpr2cpp(symbolic.symstr(s, cpp_mode=True)) for s in self.shape])
def is_stream_array(self):
return (_prod(self.shape) != 1)
def covers_range(self, rng):
if (len(rng) != len(self.shape)):
return False
for (s, (rb, re, rs)) in zip(self.shape, rng):
if isinstance(s, sp.Basic):
olds = s
if ('positive' in s.assumptions0):
s = sp.Symbol(str(s), **s.assumptions0)
else:
s = sp.Symbol(str(s), positive=True, **s.assumptions0)
if isinstance(rb, sp.Basic):
rb = rb.subs({olds: s})
if isinstance(re, sp.Basic):
re = re.subs({olds: s})
if isinstance(rs, sp.Basic):
rs = rs.subs({olds: s})
try:
if (rb < 0):
return False
except TypeError:
pass
try:
if (re > s):
return False
except TypeError:
pass
return True
def used_symbols(self, all_symbols: bool) -> Set[symbolic.SymbolicType]:
result = super().used_symbols(all_symbols)
if ((self.transient or all_symbols) and isinstance(self.buffer_size, sp.Expr)):
result |= set(self.buffer_size.free_symbols)
for o in self.offset:
if isinstance(o, sp.Expr):
result |= set(o.free_symbols)
return result
def free_symbols(self):
return self.used_symbols(all_symbols=True) |
def create_mention_span_representations(mentions, model, device, topic_docs, is_event, requires_grad):
for mention in mentions:
mention.span_rep = get_mention_span_rep(mention, device, model, topic_docs, is_event, requires_grad) |
def conv(in_planes, out_planes, dilation=1, kernel_size=3, stride=1):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, dilation=dilation, kernel_size=kernel_size, stride=stride, padding=(((kernel_size - 1) + ((kernel_size - 1) * (dilation - 1))) // 2)), nn.GroupNorm(1, out_planes), nn.ReLU(inplace=True)) |
def _dump_protobuf(args, proto, prefix, depth):
if args.dump_verbose:
if (0 <= depth <= len(prefix)):
print('{} ...'.format(':'.join(prefix)))
return
for (desc, field) in proto.ListFields():
if isinstance(field, (int, float, complex, str)):
print('{}:{}: {}'.format(':'.join(prefix), desc.name, field))
elif isinstance(field, collections.Iterable):
print('{} has {} {}(s).'.format(':'.join(prefix), len(field), desc.name))
for (n, f) in enumerate(field[:args.dump_limit]):
if isinstance(f, (int, float, complex, str)):
print('{}:{}[{}]: {}'.format(':'.join(prefix), desc.name, n, f))
elif ((depth < 0) or (depth > (len(prefix) + 1))):
_dump_protobuf(args, f, (prefix + ['{}[{}]'.format(desc.name, n)]), depth)
else:
_dump_protobuf(args, field, (prefix + [desc.name]), depth)
else:
params = {}
for par in proto.parameter:
params[par.variable_name] = [x for x in par.shape.dim]
nets = {}
for net in proto.network:
ninfo = {'variables': {}, 'functions': []}
for var in net.variable:
if (var.type == 'Parameter'):
if (var.name not in params):
print('[ERROR] Parameter [{}] in network[{}] not found.'.format(var.name, net.name))
print((' ' + 'Make sure that you do not forget to read parameter file.'))
print((' ' + 'Otherwise it should be expander problem. Please report us.'))
sys.exit((- 1))
ninfo['variables'][var.name] = {'type': var.type, 'shape': [x for x in var.shape.dim]}
for func in net.function:
ninfo['functions'].append(func)
nets[net.name] = ninfo
def _dump_network_arg(prefix, name, var):
for (i, v) in enumerate(var):
shape = ' - '
if (v.variable_name in net['variables']):
shape = net['variables'][v.variable_name]['shape']
print((('{}{} variable[{}]:'.format(prefix, name, i) + ' Name:{:30}'.format(v.variable_name)) + ' Shape:{}'.format(shape)))
def _dump_network(prefix, net):
if args.dump_variable_name:
if (args.dump_variable_name in net['variables']):
v = args.dump_variable_name
print('Variable Name: {:20} Shape: {}'.format(v, net['variables'][v]['shape']))
else:
print('DUMP ERROR: variable {} not found.'.format(args.dump_variable_name))
return
if args.dump_functions:
for (i, f) in enumerate(net['functions']):
func_prefix = '{} Function[{:^5}]: '.format(prefix, i)
print('{}Type: {:20} Name: {}'.format(func_prefix, f.type, f.name))
if args.dump_variables:
for (j, v) in enumerate(f.input):
print('{} Input{}: Name: {:20} Shape: {}'.format(func_prefix, j, v, net['variables'][v]['shape']))
for (j, v) in enumerate(f.output):
print('{} Output{}: Name: {:20} Shape: {}'.format(func_prefix, j, v, net['variables'][v]['shape']))
for (i, opt) in enumerate(proto.optimizer):
net = nets[opt.network_name]
prefix = ' Optimizer[{}]: '.format(i)
print('{}{}'.format(prefix, opt.name))
_dump_network_arg(prefix, ' (In) Data ', opt.data_variable)
_dump_network_arg(prefix, ' (In) Generator', opt.generator_variable)
_dump_network_arg(prefix, ' (Out)Loss ', opt.loss_variable)
_dump_network(prefix, net)
for (i, mon) in enumerate(proto.monitor):
net = nets[mon.network_name]
prefix = ' Monitor [{}]: '.format(i)
print('{}{}'.format(prefix, mon.name))
_dump_network_arg(prefix, ' (In) Data ', mon.data_variable)
_dump_network_arg(prefix, ' (In) Generator', mon.generator_variable)
_dump_network_arg(prefix, ' (Out)Monitor ', mon.monitor_variable)
_dump_network(prefix, net)
for (i, exe) in enumerate(proto.executor):
net = nets[exe.network_name]
prefix = ' Executor [{}]: '.format(i)
print('{}{}'.format(prefix, exe.name))
_dump_network_arg(prefix, ' (In) Data ', exe.data_variable)
_dump_network_arg(prefix, ' (In) Generator', exe.generator_variable)
_dump_network_arg(prefix, ' (Out)Loss ', exe.loss_variable)
_dump_network_arg(prefix, ' (Out)Output ', exe.output_variable)
_dump_network(prefix, net) |
class EarlyStopping():
def __init__(self, patience=7, verbose=False, delta=0):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, path):
score = (- val_loss)
if (self.best_score is None):
self.best_score = score
self.save_checkpoint(val_loss, model, path)
elif (score < (self.best_score + self.delta)):
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if (self.counter >= self.patience):
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
self.counter = 0
def save_checkpoint(self, val_loss, model, path):
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), ((path + '/') + 'checkpoint.pth'))
self.val_loss_min = val_loss |
def add_value_info_as_variable(network, info):
if (not info.type.HasField('tensor_type')):
raise ValueError("Only TensorProto is allowed as ValueInfoProto's type for info.name (Got {})".format(info.name, info.type))
t = info.type.tensor_type
v = network.variable.add()
v.name = info.name
shape = normalize_shape(t.shape)
v.shape.dim.extend([(x.dim_value if (not x.dim_param) else (- 1)) for x in shape.dim])
return v |
def test_simple():
a = ak.from_numpy(np.array([[1, 2], [3, 4], [5, 6]]), regulararray=True)
b = ak.from_numpy(np.array([[7, 8], [9, 10]]), regulararray=True)
c = a.layout._mergemany([b.layout])
assert isinstance(c, ak.contents.RegularArray)
assert (c.size == 2)
assert (ak.operations.to_list(c) == [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) |
def main(config):
cudnn.benchmark = True
if config.train:
make_folder(config.model_save_path, config.version)
make_folder(config.sample_path, config.version)
make_folder(config.log_path, config.version)
data_loader = Data_Loader(config.img_path, config.label_path, config.imsize, config.batch_size, config.train)
trainer = Trainer(data_loader.loader(), config)
trainer.train()
else:
tester = Tester(config)
tester.test() |
def read_init():
with open(os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'), 'r', encoding='utf-8', newline='\n') as f:
lines = f.readlines()
line_index = 0
while (not lines[line_index].startswith('if TYPE_CHECKING')):
line_index += 1
backend_specific_objects = {}
while (line_index < len(lines)):
backend = find_backend(lines[line_index])
if (backend is not None):
while (not lines[line_index].startswith(' else:')):
line_index += 1
line_index += 1
objects = []
while ((len(lines[line_index]) <= 1) or lines[line_index].startswith((' ' * 8))):
line = lines[line_index]
single_line_import_search = _re_single_line_import.search(line)
if (single_line_import_search is not None):
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith((' ' * 12)):
objects.append(line[12:(- 2)])
line_index += 1
backend_specific_objects[backend] = objects
else:
line_index += 1
return backend_specific_objects |
def get_grouped_params(model, args, no_decay=['bias', 'LayerNorm.weight']):
(params_with_wd, params_without_wd) = ([], [])
for (n, p) in model.named_parameters():
if any(((nd in n) for nd in no_decay)):
params_without_wd.append(p)
else:
params_with_wd.append(p)
return [{'params': params_with_wd, 'weight_decay': args.weight_decay}, {'params': params_without_wd, 'weight_decay': 0.0}] |
class RootMeanSquaredError(NumpyArrayMetric):
def __init__(self, metric: str='RMSE'):
super().__init__(metric)
def calculate(self):
return np.sqrt(np.mean(np.square((self.reference - self.prediction)))) |
def argmax_output_model(input_shape):
inputs = layers.Input(shape=input_shape)
x = layers.Conv2D(3, 3)(inputs)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(3, 3)(x)
x = layers.ReLU()(x)
outputs = tf.argmax(x, axis=(- 1))
model = keras.Model(inputs=inputs, outputs=outputs)
return model |
def main():
args = parse_args()
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
args.out_dir.mkdir(exist_ok=True, parents=True)
(args.out_dir / 'train').mkdir(exist_ok=True)
(args.out_dir / 'test').mkdir(exist_ok=True)
data = utils.load_csv_text(args.in_filename, True)
logging.info(f'Iterating over the videos...')
if (args.jobs == 1):
pbar = tqdm.tqdm(data, ncols=80)
for (youtube_id, start, _, subset) in pbar:
pbar.set_postfix_str(youtube_id)
process(youtube_id, args.out_dir, start, args.duration, subset, args.trials, args.skip_existing, args.ignore_exceptions)
else:
joblib.Parallel(args.jobs, verbose=5)((joblib.delayed(process)(youtube_id, args.out_dir, start, args.duration, subset, args.trials, args.skip_existing, args.ignore_exceptions) for (youtube_id, start, _, subset) in data)) |
class GPT2TokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = GPT2Tokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs)
self.add_bos_token = kwargs.pop('add_bos_token', False)
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type'))
pre_tok_state['add_prefix_space'] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
def _build_conversation_input_ids(self, conversation: 'Conversation') -> List[int]:
input_ids = []
for (is_user, text) in conversation.iter_texts():
input_ids.extend((self.encode(text, add_special_tokens=False) + [self.eos_token_id]))
if (len(input_ids) > self.model_max_length):
input_ids = input_ids[(- self.model_max_length):]
return input_ids |
def _get_approximate_success(prev_rgb, frame, action):
wheres = np.where((prev_rgb != frame))
wheres_ar = np.zeros(prev_rgb.shape)
wheres_ar[wheres] = 1
wheres_ar = np.sum(wheres_ar, axis=2).astype(bool)
connected_regions = skimage.morphology.label(wheres_ar, connectivity=2)
unique_labels = [i for i in range(1, (np.max(connected_regions) + 1))]
max_area = (- 1)
for lab in unique_labels:
wheres_lab = np.where((connected_regions == lab))
max_area = max(len(wheres_lab[0]), max_area)
if ((action in ['OpenObject', 'CloseObject']) and (max_area > 500)):
success = True
elif (max_area > 100):
success = True
else:
success = False
return success |
def simple_multi_input_reduce_tests(rank, world_size):
return [(c10d.ReduceOp.SUM, [torch.tensor([((2 * rank) + 0.0)]), torch.tensor([((2 * rank) + 1.0)])], torch.tensor([float((world_size * ((2 * world_size) - 1)))])), (c10d.ReduceOp.PRODUCT, [torch.tensor([((2 * rank) + 1.0)]), torch.tensor([((2 * rank) + 2.0)])], torch.tensor([float(math.factorial((2 * world_size)))])), (c10d.ReduceOp.MIN, [torch.tensor([((2 * rank) + 1.0)]), torch.tensor([((2 * rank) + 2.0)])], torch.tensor([1.0])), (c10d.ReduceOp.MAX, [torch.tensor([((2 * rank) + 1.0)]), torch.tensor([((2 * rank) + 2.0)])], torch.tensor([(2 * world_size)]))] |
.parametrize('lil_container', LIL_CONTAINERS)
def test_sample_weights(lil_container):
X_sp = lil_container(X)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.0])
sample_weight = (([0.1] * 3) + ([10] * 3))
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.0]) |
def all_gather_list(data, max_size=4096):
world_size = hvd.size()
if ((not hasattr(all_gather_list, '_in_buffer')) or (max_size != all_gather_list._in_buffer.size())):
all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
in_buffer = all_gather_list._in_buffer
enc = pickle.dumps(data)
enc_size = len(enc)
if ((enc_size + 2) > max_size):
raise ValueError('encoded data exceeds max_size: {}'.format((enc_size + 2)))
assert (max_size < (255 * 256))
in_buffer[0] = (enc_size // 255)
in_buffer[1] = (enc_size % 255)
in_buffer[2:(enc_size + 2)] = torch.ByteTensor(list(enc))
out = hvd.allgather(in_buffer.cuda())
results = []
for i in range(0, (max_size * world_size), max_size):
out_buffer = out[i:(i + max_size)]
size = ((255 * out_buffer[0].item()) + out_buffer[1].item())
bytes_list = bytes(out_buffer[2:(size + 2)].tolist())
result = pickle.loads(bytes_list)
results.append(result)
return results |
def sgd(opfunc, x, config, state=None):
state = (state if (state is not None) else config)
lr = config.get('learningRate', 0.001)
lrd = config.get('learningRateDecay', 0)
wd = config.get('weightDecay', 0)
mom = config.get('momentum', 0)
damp = config.get('dampening', mom)
nesterov = config.get('nesterov', False)
lrs = config.get('learningRates', None)
wds = config.get('weightDecays', None)
if ('evalCounter' not in state):
state['evalCounter'] = 0
if (nesterov and ((mom <= 0) and (damp != 0))):
raise ValueError('Nesterov momentum requires a momentum and zero dampening')
if ((wd != 0) and (wds is not None)):
raise ValueError('Only one of wd and wds can be specified')
(fx, dfdx) = opfunc(x)
if (wd != 0):
dfdx.add_(wd, x)
elif (wds is not None):
if (not state['decayParameters']):
state['decayParameters'] = torch.Tensor().type_as(x).resize_as_(dfdx)
state['decayParameters'].copy_(wds).mul_(x)
dfdx.add_(state['decayParameters'])
if (mom != 0):
if ('dfdx' not in state):
state['dfdx'] = torch.Tensor().type_as(dfdx).resize_as_(dfdx).copy_(dfdx)
else:
state['dfdx'].mul_(mom).add_((1 - damp), dfdx)
if nesterov:
dfdx.add_(mom, state['dfdx'])
else:
dfdx = state['dfdx']
clr = (lr / (1 + (state['evalCounter'] * lrd)))
if (lrs is not None):
if ('deltaParameters' not in state):
state['deltaParameters'] = torch.Tensor().type_as(x).resize_as_(dfdx)
state['deltaParameters'].copy_(lrs).mul_(dfdx)
x.add_((- clr), state['deltaParameters'])
else:
x.add_((- clr), dfdx)
state['evalCounter'] += 1
return (x, fx) |
_model
def caformer_s18_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_s18_in21ft1k']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
def gen_normals_kernel_indexed(vertices: template(), indices: template(), normals: template(), weights: template()):
num_triangles = (indices.shape[0] // 3)
num_vertices = vertices.shape[0]
for i in range(num_vertices):
normals[i] = Vector([0.0, 0.0, 0.0])
weights[i] = 0.0
for i in range(num_triangles):
i_a = indices[(i * 3)]
i_b = indices[((i * 3) + 1)]
i_c = indices[((i * 3) + 2)]
a = vertices[i_a]
b = vertices[i_b]
c = vertices[i_c]
n = (a - b).cross((a - c)).normalized()
atomic_add(normals[i_a], n)
atomic_add(normals[i_b], n)
atomic_add(normals[i_c], n)
atomic_add(weights[i_a], 1.0)
atomic_add(weights[i_b], 1.0)
atomic_add(weights[i_c], 1.0)
for i in range(num_vertices):
if (weights[i] > 0.0):
normals[i] = (normals[i] / weights[i]) |
def MODEL(model_name, scope, weight_decay, image, label, is_training, Distillation):
network_fn = nets_factory.get_network_fn(model_name, weight_decay=weight_decay)
end_points = network_fn(image, label, scope, is_training=is_training, Distill=Distillation)
loss = tf.losses.softmax_cross_entropy(label, end_points['Logits'])
if (Distillation == 'DML'):
tf.add_to_collection('teacher_class_loss', tf.losses.softmax_cross_entropy(label, end_points['Logits_tch']))
accuracy = tf.contrib.metrics.accuracy(tf.cast(tf.argmax(end_points['Logits'], 1), tf.int32), tf.cast(tf.argmax(label, 1), tf.int32))
return (loss, accuracy) |
def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):
DatasetCatalog.register(name, (lambda : load_voc_instances(dirname, split, class_names)))
MetadataCatalog.get(name).set(thing_classes=list(class_names), dirname=dirname, year=year, split=split) |
_cache(maxsize=1024)
def unit_nhops_to_proc_region(layer, batch_size, region, part, filter_nodes, ifmap_layout, ofmap_layout, options):
fil_dict = {}
ofm_dict = {}
ifm_dict = {}
for pidx in part.gen_pidx():
coord = part.coordinate(region, pidx)
(filrng, ifrng, ofrng) = proc_data_range(layer, batch_size, part, pidx)
if ((ifrng.size() > 0) and (ofrng.size() > 0)):
ifm_dict.setdefault(ifrng, []).append(coord)
ofm_dict.setdefault(ofrng, []).append(coord)
if ((not filrng[0].empty()) and (not filrng[1].empty())):
fil_dict.setdefault(filrng, []).append(coord)
assert (len(set((len(v) for v in fil_dict.values()))) <= 1), 'fil val len: {}'.format([len(v) for v in fil_dict.values()])
assert (len(set((len(v) for v in ifm_dict.values()))) <= 1), 'ifm val len: {}'.format([len(v) for v in ifm_dict.values()])
assert (len(set((len(v) for v in ofm_dict.values()))) <= 1), 'ofm val len: {}'.format([len(v) for v in ofm_dict.values()])
fil_dict = util.HashableDict.fromdict(fil_dict, valfunc=tuple)
ifm_dict = util.HashableDict.fromdict(ifm_dict, valfunc=tuple)
ofm_dict = util.HashableDict.fromdict(ofm_dict, valfunc=tuple)
fwd = (options.hw_access_forwarding or options.hw_gbuf_sharing)
nhops = ([0] * de.NUM)
nhops[de.FIL] = _unit_nhops_to_fil(layer, filter_nodes, fil_dict, fwd)
nhops[de.IFM] = _unit_nhops_to_ifm(ifmap_layout, ifm_dict, fwd)
if ((ofmap_layout.parts == (part,)) and (ofmap_layout.regions == (region,))):
pass
else:
nhops[de.OFM] = _unit_nhops_to_ofm(ofmap_layout, ofm_dict, fwd)
return nhops |
.corpus
def test_snips():
config = dotenv_values()
dataset_root = config['SNIPS']
dataset = SNIPS(dataset_root, ['Ivy', 'Joanna', 'Joey', 'Justin', 'Kendra', 'Kimberly', 'Matthew', 'Salli'], ['Aditi', 'Amy', 'Geraint', 'Nicole'], ['Brian', 'Emma', 'Raveena', 'Russell'])
(train_data, valid_data, test_data) = dataset.data_split
assert (len(train_data) == 104672)
assert (len(valid_data) == 2800)
assert (len(test_data) == 2800) |
class PlayerState(object):
def __init__(self, position, orientation, held_object=None):
self.position = tuple(position)
self.orientation = tuple(orientation)
self.held_object = held_object
assert (self.orientation in Direction.ALL_DIRECTIONS)
if (self.held_object is not None):
assert isinstance(self.held_object, ObjectState)
assert (self.held_object.position == self.position)
def pos_and_or(self):
return (self.position, self.orientation)
def has_object(self):
return (self.held_object is not None)
def get_object(self):
assert self.has_object()
return self.held_object
def set_object(self, obj):
assert (not self.has_object())
obj.position = self.position
self.held_object = obj
def remove_object(self):
assert self.has_object()
obj = self.held_object
self.held_object = None
return obj
def update_pos_and_or(self, new_position, new_orientation):
self.position = new_position
self.orientation = new_orientation
if self.has_object():
self.get_object().position = new_position
def deepcopy(self):
new_obj = (None if (self.held_object is None) else self.held_object.deepcopy())
return PlayerState(self.position, self.orientation, new_obj)
def __eq__(self, other):
return (isinstance(other, PlayerState) and (self.position == other.position) and (self.orientation == other.orientation) and (self.held_object == other.held_object))
def __hash__(self):
return hash((self.position, self.orientation, self.held_object))
def __repr__(self):
return '{} facing {} holding {}'.format(self.position, self.orientation, str(self.held_object))
def to_dict(self):
return {'position': self.position, 'orientation': self.orientation, 'held_object': (self.held_object.to_dict() if (self.held_object is not None) else None)}
def from_dict(player_dict):
player_dict = copy.deepcopy(player_dict)
held_obj = player_dict.get('held_object', None)
if (held_obj is not None):
player_dict['held_object'] = SoupState.from_dict(held_obj)
return PlayerState(**player_dict) |
def make_learner_xml(path, filename='korean_learner_corpus_error_sentences.xml'):
all = []
for filename in os.listdir(path):
all.extend(parse_xml(((path + '/') + filename)))
root_node = xmlparser.Element('root')
[root_node.append(e) for e in all]
xmlparser.ElementTree(root_node).write(filename, encoding='utf-8') |
def test_Absorptive_expire():
tl = Timeline()
mem = AbsorptiveMemory('mem', tl, .0, 1, perfect_efficiency, 100, 500)
parent = DumbParent(mem)
process = Process(mem, 'expire', [])
event = Event(.0, process)
tl.schedule(event)
mem.expiration_event = event
mem._schedule_expiration()
counter = 0
for event in tl.events:
if event.is_invalid():
counter += 1
assert (counter == 1) |
class TableauTuples_all(TableauTuples):
def __init__(self):
super().__init__(category=Sets())
self._level = None
self._size = None
def _repr_(self):
return 'Tableau tuples'
def an_element(self):
return self.element_class(self, [[[1]], [[2]], [[3]], [[4]], [[5]], [[6]], [[7]]]) |
def one_hot(x: torch.Tensor, v_bins: torch.Tensor) -> torch.Tensor:
reshaped_bins = v_bins.view((((1,) * len(x.shape)) + (len(v_bins),)))
diffs = (x[(..., None)] - reshaped_bins)
am = torch.argmin(torch.abs(diffs), dim=(- 1))
return nn.functional.one_hot(am, num_classes=len(v_bins)).float() |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('x_shape, q_shape', [((4, 8, 16, 16), (1, 1, 1, 1)), ((4, 8, 16, 16), (1, 8, 1, 1)), ((16, 8, 3, 3), (16, 1, 1, 1))])
.parametrize('decay', [0.999, 0.9])
.parametrize('x_min_max', [True, False])
.parametrize('ema', [True, False])
.parametrize('ste_fine_grained', [True, False])
.parametrize('eps', [0.01])
.parametrize('quantize', [True, False])
def test_min_max_quantize_forward_backward(seed, x_shape, q_shape, decay, x_min_max, ema, ste_fine_grained, eps, quantize, ctx, func_name):
from nbla_test_utils import cap_ignore_region, function_tester
rng = np.random.RandomState(seed)
x = rng.randn(*x_shape)
qr_min = ((- 0.5) * rng.rand(*q_shape))
qr_max = ((+ 0.5) * rng.rand(*q_shape))
ql_min = np.zeros(q_shape)
ql_max = (np.ones(q_shape) * 255)
inputs = [x, qr_min, qr_max, ql_min, ql_max]
func_args = [decay, x_min_max, ema, ste_fine_grained, eps, quantize]
if (not quantize):
vinputs = [nn.Variable.from_numpy_array(xd) for xd in inputs]
v = vinputs[0]
with nn.context_scope(ctx):
o = F.min_max_quantize(*(vinputs + func_args))
np.allclose(o.d, v.d)
return
if (x_min_max and ema):
from nbla_test_utils import ArrayDiffStats
vinputs = [nn.Variable.from_numpy_array(xd) for xd in inputs]
vinputs[0].need_grad = True
with nn.context_scope(ctx):
y = F.min_max_quantize(*(vinputs + func_args))
y.forward()
y_ref = ref_min_max_quantize(x, qr_min, qr_max, ql_min, ql_max, decay, x_min_max, ema, ste_fine_grained, eps, quantize)
assert np.allclose(y_ref, y.d, atol=1e-05), ArrayDiffStats(y_ref, y.d)
xv = vinputs[0]
xv.grad.zero()
dy = rng.randn(*y.shape)
y.backward(dy)
gx_ref = ref_grad_min_max_quantize(x, qr_min, qr_max, ql_min, ql_max, dy, decay, x_min_max, ema, ste_fine_grained, eps, quantize)
ag = xv.g.copy()
assert np.allclose(gx_ref, ag.flatten(), atol=1e-05), ArrayDiffStats(gx_ref, ag.flatten())
y.backward(dy)
assert np.allclose((ag * 2.0), xv.g.copy(), atol=1e-05), ArrayDiffStats((ag * 2.0), xv.g.copy())
return
backward = ([True, False, False, False, False, False] if (x_min_max or ema) else [True, True, True, False, False, False])
function_tester(rng, F.min_max_quantize, ref_min_max_quantize, inputs, func_args=func_args, atol_b=0.001, backward=backward, ctx=ctx, func_name=func_name, disable_half_test=True, ref_grad=ref_grad_min_max_quantize) |
class LoraHandler(object):
def __init__(self, version: LORA_VERSIONS=LoraVersions.cloneofsimo, use_unet_lora: bool=False, use_text_lora: bool=False, save_for_webui: bool=False, only_for_webui: bool=False, lora_bias: str='none', unet_replace_modules: list=None, text_encoder_replace_modules: list=None):
self.version = version
self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)
self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)
self.lora_bias = lora_bias
self.use_unet_lora = use_unet_lora
self.use_text_lora = use_text_lora
self.save_for_webui = save_for_webui
self.only_for_webui = only_for_webui
self.unet_replace_modules = unet_replace_modules
self.text_encoder_replace_modules = text_encoder_replace_modules
self.use_lora = any([use_text_lora, use_unet_lora])
def is_cloneofsimo_lora(self):
return (self.version == LoraVersions.cloneofsimo)
def get_lora_func(self, func_type: LORA_FUNC_TYPES=LoraFuncTypes.loader):
if self.is_cloneofsimo_lora():
if (func_type == LoraFuncTypes.loader):
return monkeypatch_or_replace_lora_extended
if (func_type == LoraFuncTypes.injector):
return inject_trainable_lora_extended
assert 'LoRA Version does not exist.'
def check_lora_ext(self, lora_file: str):
return lora_file.endswith(tuple(LORA_FILE_TYPES))
def get_lora_file_path(self, lora_path: str, model: Union[(UNet3DConditionModel, CLIPTextModel)]):
if os.path.exists(lora_path):
lora_filenames = [fns for fns in os.listdir(lora_path)]
is_lora = self.check_lora_ext(lora_path)
is_unet = isinstance(model, UNet3DConditionModel)
is_text = isinstance(model, CLIPTextModel)
idx = (0 if is_unet else 1)
base_name = FILE_BASENAMES[idx]
for lora_filename in lora_filenames:
is_lora = self.check_lora_ext(lora_filename)
if (not is_lora):
continue
if (base_name in lora_filename):
return os.path.join(lora_path, lora_filename)
return None
def handle_lora_load(self, file_name: str, lora_loader_args: dict=None):
self.lora_loader(**lora_loader_args)
print(f'Successfully loaded LoRA from: {file_name}')
def load_lora(self, model, lora_path: str='', lora_loader_args: dict=None):
try:
lora_file = self.get_lora_file_path(lora_path, model)
if (lora_file is not None):
lora_loader_args.update({'lora_path': lora_file})
self.handle_lora_load(lora_file, lora_loader_args)
else:
print(f'Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...')
except Exception as e:
print(f'An error occurred while loading a LoRA file: {e}')
def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):
return_dict = lora_args.copy()
if self.is_cloneofsimo_lora():
return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)
return_dict.update({'model': model, 'loras': self.get_lora_file_path(lora_path, model), 'target_replace_module': replace_modules, 'r': r, 'scale': scale, 'dropout_p': dropout})
return return_dict
def do_lora_injection(self, model, replace_modules, bias='none', dropout=0, r=4, lora_loader_args=None):
REPLACE_MODULES = replace_modules
params = None
negation = None
is_injection_hybrid = False
if self.is_cloneofsimo_lora():
is_injection_hybrid = True
injector_args = lora_loader_args
(params, negation) = self.lora_injector(**injector_args)
for (_up, _down) in extract_lora_ups_down(model, target_replace_module=REPLACE_MODULES):
if all(((x is not None) for x in [_up, _down])):
print(f'Lora successfully injected into {model.__class__.__name__}.')
break
return (params, negation, is_injection_hybrid)
return (params, negation, is_injection_hybrid)
def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):
params = None
negation = None
lora_loader_args = self.get_lora_func_args(lora_path, use_lora, model, replace_modules, r, dropout, self.lora_bias, scale)
if use_lora:
(params, negation, is_injection_hybrid) = self.do_lora_injection(model, replace_modules, bias=self.lora_bias, lora_loader_args=lora_loader_args, dropout=dropout, r=r)
if (not is_injection_hybrid):
self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)
params = (model if (params is None) else params)
return (params, negation)
def save_cloneofsimo_lora(self, model, save_path, step, flag):
def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):
if (condition and (replace_modules is not None)):
save_path = f'{save_path}/{step}_{name}.pt'
save_lora_weight(model, save_path, replace_modules, flag)
save_lora(model.unet, FILE_BASENAMES[0], self.use_unet_lora, self.unet_replace_modules, step, save_path, flag)
save_lora(model.text_encoder, FILE_BASENAMES[1], self.use_text_lora, self.text_encoder_replace_modules, step, save_path, flag)
def save_lora_weights(self, model: None, save_path: str='', step: str='', flag=None):
save_path = f'{save_path}/lora'
os.makedirs(save_path, exist_ok=True)
if self.is_cloneofsimo_lora():
if any([self.save_for_webui, self.only_for_webui]):
warnings.warn("\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n ")
self.save_cloneofsimo_lora(model, save_path, step, flag) |
class Adapter(ABC):
def __init__(self, adapter_spec: AdapterSpec, tokenizer_service: TokenizerService):
self.adapter_spec: AdapterSpec = adapter_spec
self.window_service: WindowService = WindowServiceFactory.get_window_service(adapter_spec.model_deployment, tokenizer_service)
def adapt(self, instances: List[Instance], parallelism: int) -> ScenarioState:
pass |
.expansion
class ExpandStencilIntelFPGA(dace.library.ExpandTransformation):
environments = []
def expansion(node, parent_state, parent_sdfg):
sdfg = dace.SDFG((node.label + '_outer'))
state = sdfg.add_state((node.label + '_outer'))
(inputs, outputs, shape, field_to_data, field_to_desc, field_to_edge, vector_lengths) = parse_connectors(node, parent_state, parent_sdfg)
converter = SubscriptConverter()
for field in node.boundary_conditions:
if (node.boundary_conditions[field]['btype'] == 'copy'):
center_index = tuple((0 for _ in range(len(parent_sdfg.arrays[field_to_data[field]].shape))))
converter.convert(field, center_index)
(code, field_accesses) = parse_accesses(node.code.as_string, outputs)
iterator_mapping = make_iterator_mapping(node, field_accesses, shape)
vector_length = validate_vector_lengths(vector_lengths, iterator_mapping)
shape_vectorized = tuple((((s / vector_length) if (i == (len(shape) - 1)) else s) for (i, s) in enumerate(shape)))
buffer_sizes = collections.OrderedDict()
buffer_accesses = collections.OrderedDict()
scalars = {}
for field_name in inputs:
relative = field_accesses[field_name]
dim_mask = iterator_mapping[field_name]
if (not any(dim_mask)):
scalars[field_name] = parent_sdfg.symbols[field_name]
sdfg.add_symbol(field_name, parent_sdfg.symbols[field_name])
continue
abs_indices = ([dim_to_abs_val(i, tuple((s for (s, m) in zip(shape, dim_mask) if m)), parent_sdfg) for i in relative] + ([0] if ((field_name in node.boundary_conditions) and (node.boundary_conditions[field_name]['btype'] == 'copy')) else []))
max_access = max(abs_indices)
min_access = min(abs_indices)
buffer_size = ((max_access - min_access) + vector_lengths[field_name])
buffer_sizes[field_name] = buffer_size
buffer_accesses[field_name] = ([tuple(r) for r in relative], [(i - min_access) for i in abs_indices], (- min_access))
init_sizes = [(((buffer_sizes[key] - vector_lengths[key]) - val[2]) // vector_length) for (key, val) in buffer_accesses.items()]
init_size_max = int(np.max(init_sizes))
parameters = [f'_i{i}' for i in range(len(shape))]
iterator_mask = np.array([((s != 0) and (s != 1)) for s in shape], dtype=bool)
iterators = make_iterators(tuple((s for (s, m) in zip(shape_vectorized, iterator_mask) if m)), parameters=tuple((s for (s, m) in zip(parameters, iterator_mask) if m)))
pipeline_range = dace.properties.SubsetProperty.from_string(', '.join(iterators.values()))
pipeline = dace.sdfg.nodes.PipelineScope(('compute_' + node.label), list(iterators.keys()), pipeline_range, dace.dtypes.ScheduleType.FPGA_Device, False, init_size=init_size_max, init_overlap=False, drain_size=init_size_max, drain_overlap=True)
entry = dace.sdfg.nodes.PipelineEntry(pipeline)
exit = dace.sdfg.nodes.PipelineExit(pipeline)
state.add_nodes_from([entry, exit])
nested_sdfg = dace.SDFG((node.label + '_inner'), parent=state)
nested_sdfg_tasklet = state.add_nested_sdfg(nested_sdfg, sdfg, ([(k + '_in') for k in inputs if any(iterator_mapping[k])] + [(name + '_buffer_in') for (name, _) in buffer_sizes.items()]), ([(k + '_out') for k in outputs] + [(name + '_buffer_out') for (name, _) in buffer_sizes.items()]), schedule=dace.ScheduleType.FPGA_Device)
for (sym_name, sym_type) in parent_sdfg.symbols.items():
nested_sdfg.add_symbol(sym_name, sym_type)
nested_sdfg_tasklet.symbol_mapping[sym_name] = sym_name
for p in parameters:
nested_sdfg.add_symbol(p, dace.int64)
nested_sdfg_tasklet.symbol_mapping[p] = p
shift_state = nested_sdfg.add_state((node.label + '_shift'))
update_state = nested_sdfg.add_state((node.label + '_update'))
(boundary_code, oob_cond) = generate_boundary_conditions(node, shape, field_accesses, field_to_desc, iterator_mapping)
write_code = '\n'.join(['{}_inner_out = {}\n'.format(output, field_accesses[output][tuple((0 for _ in range(len(shape))))]) for output in outputs])
if ((init_size_max > 0) or (len(oob_cond) > 0)):
write_cond = []
if (init_size_max > 0):
init_cond = pipeline.init_condition()
write_cond.append(('not ' + init_cond))
nested_sdfg_tasklet.symbol_mapping[init_cond] = init_cond
nested_sdfg.add_symbol(init_cond, dace.bool)
if (len(oob_cond) > 0):
oob_cond = ' or '.join(sorted(oob_cond))
oob_cond = f'not ({oob_cond})'
write_cond.append(oob_cond)
write_cond = ' and '.join(write_cond)
write_cond = f'''if {write_cond}:
'''
else:
write_cond = ''
code = ((((boundary_code + '\n') + code) + '\n') + write_code)
compute_state = nested_sdfg.add_state((node.label + '_compute'))
compute_inputs = list(itertools.chain.from_iterable([[('_' + v) for v in field_accesses[f].values()] for f in inputs if any(iterator_mapping[f])]))
compute_tasklet = compute_state.add_tasklet((node.label + '_compute'), compute_inputs, {(name + '_inner_out') for name in outputs}, code, language=dace.dtypes.Language.Python)
if (vector_length > 1):
(compute_unroll_entry, compute_unroll_exit) = compute_state.add_map((compute_state.label + '_unroll'), {'i_unroll': f'0:{vector_length}'}, schedule=dace.ScheduleType.FPGA_Device, unroll=True)
nested_sdfg.add_edge(shift_state, update_state, dace.sdfg.InterstateEdge())
nested_sdfg.add_edge(update_state, compute_state, dace.sdfg.InterstateEdge())
for (scalar, scalar_type) in scalars.items():
nested_sdfg.add_symbol(scalar, scalar_type)
iterator_code = ''
for ((field_name, size), init_size) in zip(buffer_sizes.items(), init_sizes):
data_name = field_to_data[field_name]
connector = field_to_edge[field_name].dst_conn
data_name_outer = connector
data_name_inner = (field_name + '_in')
desc_outer = parent_sdfg.arrays[data_name].clone()
desc_outer.transient = False
sdfg.add_datadesc(data_name_outer, desc_outer)
mapping = iterator_mapping[field_name]
is_array = (not isinstance(desc_outer, dt.Stream))
if is_array:
if (init_size == 0):
field_index = [s for (s, p) in zip(parameters, mapping) if p]
else:
num_dims = sum(mapping, 0)
field_iterators = [(f'_{field_name}_i{i}', shape[i]) for i in range(num_dims) if mapping[i]]
start_index = (init_size_max - init_size)
tab = ''
if (start_index > 0):
iterator_code += f'''if {pipeline.iterator_str()} >= {start_index}:
'''
tab += ' '
for (i, (it, s)) in enumerate(reversed(field_iterators)):
iterator_code += f'''{tab}if {it} < {s} - 1:
{tab} {it} = {it} + 1
{tab}else:
{tab} {it} = 0
'''
tab += ' '
field_index = [fi[0] for fi in field_iterators]
for fi in field_index:
pipeline.additional_iterators[fi] = '0'
nested_sdfg.add_symbol(fi, dace.int64)
nested_sdfg_tasklet.symbol_mapping[fi] = fi
field_index = ', '.join(field_index)
else:
field_index = '0'
begin_reading = (init_size_max - init_size)
total_size = functools.reduce(operator.mul, shape_vectorized, 1)
end_reading = ((total_size + init_size_max) - init_size)
read_node_outer = state.add_read(data_name_outer)
if ((begin_reading != 0) or (end_reading != (total_size + init_size_max))):
sdfg.add_scalar(f'{field_name}_wavefront', desc_outer.dtype, storage=dace.StorageType.FPGA_Local, transient=True)
wavefront_access = state.add_access(f'{field_name}_wavefront')
condition = []
it = pipeline.iterator_str()
if (begin_reading != 0):
condition.append(f'{it} >= {begin_reading}')
if (end_reading != (total_size + init_size_max)):
condition.append(f'{it} < {end_reading}')
condition = ' and '.join(condition)
update_tasklet = state.add_tasklet(f'read_{field_name}', {'wavefront_in'}, {'wavefront_out'}, f'''if {condition}:
wavefront_out = wavefront_in
''', language=dace.dtypes.Language.Python)
state.add_memlet_path(read_node_outer, entry, update_tasklet, dst_conn='wavefront_in', memlet=dace.Memlet(f'{data_name_outer}[{field_index}]', dynamic=True))
state.add_memlet_path(update_tasklet, wavefront_access, src_conn='wavefront_out', memlet=dace.Memlet(f'{field_name}_wavefront', dynamic=True))
state.add_memlet_path(wavefront_access, nested_sdfg_tasklet, dst_conn=f'{field_name}_in', memlet=dace.Memlet(f'{field_name}_wavefront'))
else:
state.add_memlet_path(read_node_outer, entry, nested_sdfg_tasklet, dst_conn=f'{field_name}_in', memlet=dace.Memlet(f'{data_name_outer}[{field_index}]'))
nested_sdfg.add_scalar(data_name_inner, desc_outer.dtype, storage=dace.StorageType.FPGA_Local, transient=False)
buffer_name_outer = f'{node.label}_{field_name}_buffer'
buffer_name_inner_read = f'{field_name}_buffer_in'
buffer_name_inner_write = f'{field_name}_buffer_out'
field_dtype = parent_sdfg.data(data_name).dtype
(_, desc_outer) = sdfg.add_array(buffer_name_outer, (size,), field_dtype.base_type, storage=dace.dtypes.StorageType.FPGA_Local, transient=True)
read_node_outer = state.add_read(buffer_name_outer)
write_node_outer = state.add_write(buffer_name_outer)
state.add_memlet_path(read_node_outer, entry, nested_sdfg_tasklet, dst_conn=buffer_name_inner_read, memlet=dace.Memlet(f'{buffer_name_outer}[0:{size}]'))
state.add_memlet_path(nested_sdfg_tasklet, exit, write_node_outer, src_conn=buffer_name_inner_write, memlet=dace.Memlet(f'{write_node_outer.data}[0:{size}]', dynamic=True))
desc_inner_read = desc_outer.clone()
desc_inner_read.transient = False
desc_inner_read.name = buffer_name_inner_read
desc_inner_write = desc_inner_read.clone()
desc_inner_write.name = buffer_name_inner_write
nested_sdfg.add_datadesc(buffer_name_inner_read, desc_inner_read)
nested_sdfg.add_datadesc(buffer_name_inner_write, desc_inner_write)
if (size > 1):
shift_read = shift_state.add_read(buffer_name_inner_read)
shift_write = shift_state.add_write(buffer_name_inner_write)
(shift_entry, shift_exit) = shift_state.add_map(f'shift_{field_name}', {'i_shift': f'0:{size} - {vector_lengths[field_name]}'}, schedule=dace.dtypes.ScheduleType.FPGA_Device, unroll=True)
shift_tasklet = shift_state.add_tasklet(f'shift_{field_name}', {f'{field_name}_shift_in'}, {f'{field_name}_shift_out'}, f'{field_name}_shift_out = {field_name}_shift_in')
shift_state.add_memlet_path(shift_read, shift_entry, shift_tasklet, dst_conn=(field_name + '_shift_in'), memlet=dace.Memlet(f'{shift_read.data}[i_shift + {vector_lengths[field_name]}]'))
shift_state.add_memlet_path(shift_tasklet, shift_exit, shift_write, src_conn=(field_name + '_shift_out'), memlet=dace.Memlet(f'{shift_write.data}[i_shift]'))
update_read = update_state.add_read(data_name_inner)
update_write = update_state.add_write(buffer_name_inner_write)
subset = (f'{size} - {vector_length}:{size}' if (size > 1) else '0')
update_state.add_memlet_path(update_read, update_write, memlet=dace.Memlet(f'{update_read.data}', other_subset=f'{subset}'))
compute_read = compute_state.add_read(buffer_name_inner_read)
for (relative, offset) in zip(buffer_accesses[field_name][0], buffer_accesses[field_name][1]):
memlet_name = field_accesses[field_name][tuple(relative)]
if (vector_length > 1):
if (vector_lengths[field_name] > 1):
offset = f'{offset} + i_unroll'
else:
offset = str(offset)
path = [compute_read, compute_unroll_entry, compute_tasklet]
else:
offset = str(offset)
path = [compute_read, compute_tasklet]
compute_state.add_memlet_path(*path, dst_conn=('_' + memlet_name), memlet=dace.Memlet(f'{compute_read.data}[{offset}]'))
if iterator_code:
update_iterator_tasklet = state.add_tasklet(f'{node.label}_update_iterators', {}, {}, iterator_code)
state.add_memlet_path(nested_sdfg_tasklet, update_iterator_tasklet, memlet=dace.Memlet())
state.add_memlet_path(update_iterator_tasklet, exit, memlet=dace.Memlet())
for field_name in outputs:
for offset in field_accesses[field_name]:
if ((offset is not None) and (list(offset) != ([0] * len(offset)))):
raise NotImplementedError('Output offsets not implemented')
data_name = field_to_data[field_name]
data_name_outer = field_name
data_name_inner = (field_name + '_out')
desc_outer = parent_sdfg.arrays[data_name].clone()
desc_outer.transient = False
array_index = ', '.join(map(str, parameters))
try:
sdfg.add_datadesc(data_name_outer, desc_outer)
except NameError:
pass
nested_sdfg.add_scalar(data_name_inner, desc_outer.dtype, storage=dace.StorageType.FPGA_Local, transient=False)
write_node_inner = compute_state.add_write(data_name_inner)
output_buffer_name = (field_name + '_output_buffer')
nested_sdfg.add_array(output_buffer_name, (vector_length,), desc_outer.dtype.base_type, storage=dace.StorageType.FPGA_Registers, transient=True)
output_buffer = compute_state.add_access(output_buffer_name)
if (vector_length > 1):
compute_state.add_memlet_path(compute_tasklet, compute_unroll_exit, output_buffer, src_conn=(field_name + '_inner_out'), memlet=dace.Memlet(f'{output_buffer_name}[i_unroll]'))
else:
(compute_state.add_memlet_path(compute_tasklet, output_buffer, src_conn=(field_name + '_inner_out'), memlet=dace.Memlet(f'{output_buffer_name}[0]')),)
(compute_state.add_memlet_path(output_buffer, write_node_inner, memlet=dace.Memlet(f'{write_node_inner.data}')),)
sdfg.add_scalar(f'{field_name}_result', desc_outer.dtype, storage=dace.StorageType.FPGA_Local, transient=True)
output_access = state.add_access(f'{field_name}_result')
state.add_memlet_path(nested_sdfg_tasklet, output_access, src_conn=data_name_inner, memlet=dace.Memlet(f'{field_name}_result'))
output_tasklet = state.add_tasklet(f'{field_name}_conditional_write', {f'_{field_name}_result'}, {f'_{data_name_inner}'}, (write_cond + f'_{data_name_inner} = _{field_name}_result'))
state.add_memlet_path(output_access, output_tasklet, dst_conn=f'_{field_name}_result', memlet=dace.Memlet(f'{field_name}_result'))
write_node_outer = state.add_write(data_name_outer)
if isinstance(desc_outer, dt.Stream):
subset = '0'
else:
subset = array_index
(state.add_memlet_path(output_tasklet, exit, write_node_outer, src_conn=f'_{data_name_inner}', memlet=dace.Memlet(f'{write_node_outer.data}[{subset}]', dynamic=True)),)
return sdfg |
def _wsgi_test(case: Case, checks: Iterable[CheckFunction], targets: Iterable[Target], result: TestResult, headers: dict[(str, Any)], store_interactions: bool, feedback: Feedback, max_response_time: (int | None)) -> WSGIResponse:
with catching_logs(LogCaptureHandler(), level=logging.DEBUG) as recorded:
start = time.monotonic()
hook_context = HookContext(operation=case.operation)
kwargs = {'headers': headers}
hooks.dispatch('process_call_kwargs', hook_context, case, kwargs)
response = case.call_wsgi(**kwargs)
elapsed = (time.monotonic() - start)
context = TargetContext(case=case, response=response, response_time=elapsed)
run_targets(targets, context)
result.logs.extend(recorded.records)
status = Status.success
check_results: list[Check] = []
try:
run_checks(case=case, checks=checks, check_results=check_results, result=result, response=response, elapsed_time=(context.response_time * 1000), max_response_time=max_response_time)
except CheckFailed:
status = Status.failure
raise
finally:
feedback.add_test_case(case, response)
if store_interactions:
result.store_wsgi_response(case, response, headers, elapsed, status, check_results)
return response |
class _MemoryEfficientFP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
def has_flat_params(self):
return False
def state_dict(self):
state_dict = self.wrapped_optimizer.state_dict()
if (self.scaler is not None):
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
if (('loss_scale' in state_dict) and (self.scaler is not None)):
self.scaler.loss_scale = state_dict['loss_scale']
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
if (not getattr(self.optimizer, 'disable_mem_eff_fp16_loading_hack', False)):
groups = self.optimizer.param_groups
saved_groups = state_dict['param_groups']
id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))}
for (k, v) in state_dict['state'].items():
if (k in id_map):
param = id_map[k]
self.optimizer.state[param] = v
def backward(self, loss):
if (self.scaler is not None):
loss = self.scaler.scale(loss)
loss.backward()
def _unscale_grads(self):
if (torch.is_tensor(self._multiply_factor) or (self._multiply_factor != 1.0)):
self.wrapped_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
max_norm = float(max_norm)
grad_norm = (self._multiply_factor * self.wrapped_optimizer.clip_grad_norm(0, aggregate_norm_fn))
if (self.scaler is not None):
grad_norm_cpu = float(grad_norm)
if (grad_norm_cpu > max_norm > 0.0):
self._multiply_factor *= (max_norm / grad_norm_cpu)
self.scaler.check_overflow(grad_norm_cpu)
elif (max_norm > 0.0):
clip_coef = (max_norm / (grad_norm + 1e-06)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
if getattr(self, 'supports_step_with_scale', False):
self.wrapped_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
else:
self._unscale_grads()
self.wrapped_optimizer.step(closure, groups=groups)
if (self.scaler is not None):
self.scaler.update()
def zero_grad(self):
self.wrapped_optimizer.zero_grad()
if (self.scaler is not None):
self._multiply_factor = (1.0 / float(self.scaler.loss_scale))
else:
self._multiply_factor = 1.0 |
def gaussian_measure_2d_full(mean, cov, f):
if (not is_pos_def(cov)):
logger.warn(f'cov={cov} not positive definite')
L = cholesky(cov)
def integrand(x2, x1):
(y1, y2) = ((L [x1, x2]) + mean)
return ((norm_pdf(x1) * norm_pdf(x2)) * f(y1, y2))
integral = dblquad(integrand, (- 10), 10, (- 10), 10)[0]
return integral |
('/get_gas_limits/<lastN>', methods=('GET',))
def get_gas_limits(lastN):
web3 = connect_to_geth(app.web3_url, app.consensus)
latest = web3.eth.getBlock('latest').number
start = ((latest - int(lastN)) + 1)
if (start <= 0):
start = 1
gas_limits = {}
for bk in range(start, (latest + 1)):
block = web3.eth.get_block(bk)
tt = dict(block)
gas_limits[bk] = tt['gasLimit']
resp = Response(json.dumps(gas_limits, cls=HexJsonEncoder, indent=5))
resp.headers['Content-Type'] = 'application/json'
return resp |
_pooler('average_concat_last_k')
class AverageConcatLastN(nn.Module):
def __init__(self, k=4, tol=1e-06):
super().__init__()
self.num_layers = k
self.tol = tol
def forward(self, encoded_layers: List[torch.Tensor], pad_mask: torch.Tensor):
assert (self.num_layers <= len(encoded_layers)), 'k should be less than the number of encoder layers'
encoder_avg = torch.cat(encoded_layers[(- self.num_layers):], 2)
pad_mask = pad_mask.unsqueeze(2)
encoder_avg = (encoder_avg * pad_mask.float())
pooled_output = (torch.sum(encoder_avg, 1) / (torch.sum(pad_mask, 1).float() + self.tol))
return pooled_output |
class Spinner(Infinite):
phases = ('-', '\\', '|', '/')
hide_cursor = True
def update(self):
i = (self.index % len(self.phases))
self.write(self.phases[i]) |
class RobertaPreLayerNormForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class RandomCycler():
def __init__(self, source):
if (len(source) == 0):
raise Exception("Can't create RandomCycler from an empty collection")
self.all_items = list(source)
self.next_items = []
def sample(self, count: int):
shuffle = (lambda l: random.sample(l, len(l)))
out = []
while (count > 0):
if (count >= len(self.all_items)):
out.extend(shuffle(list(self.all_items)))
count -= len(self.all_items)
continue
n = min(count, len(self.next_items))
out.extend(self.next_items[:n])
count -= n
self.next_items = self.next_items[n:]
if (len(self.next_items) == 0):
self.next_items = shuffle(list(self.all_items))
return out
def __next__(self):
return self.sample(1)[0] |
class HashError(InstallationError):
req = None
head = ''
order = None
def body(self):
return ' {}'.format(self._requirement_name())
def __str__(self):
return '{}\n{}'.format(self.head, self.body())
def _requirement_name(self):
return (str(self.req) if self.req else 'unknown package') |
()
('--seed', default=1)
('--max_path_length', default=150)
('--meta_batch_size', default=10)
('--n_epochs', default=10)
('--episode_per_task', default=10)
_experiment
def rl2_ppo_metaworld_ml10(ctxt, seed, max_path_length, meta_batch_size, n_epochs, episode_per_task):
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
ml10_train_envs = [RL2Env(mwb.ML10.from_task(task_name)) for task_name in mwb.ML10.get_train_tasks().all_task_names]
tasks = task_sampler.EnvPoolSampler(ml10_train_envs)
tasks.grow_pool(meta_batch_size)
env_spec = ml10_train_envs[0].spec
policy = GaussianGRUPolicy(name='policy', hidden_dim=64, env_spec=env_spec, state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
algo = RL2PPO(rl2_max_path_length=max_path_length, meta_batch_size=meta_batch_size, task_sampler=tasks, env_spec=env_spec, policy=policy, baseline=baseline, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, optimizer_args=dict(batch_size=32, max_epochs=10), stop_entropy_gradient=True, entropy_method='max', policy_ent_coeff=0.02, center_adv=False, max_path_length=(max_path_length * episode_per_task))
runner.setup(algo, tasks.sample(meta_batch_size), sampler_cls=LocalSampler, n_workers=meta_batch_size, worker_class=RL2Worker, worker_args=dict(n_paths_per_trial=episode_per_task))
runner.train(n_epochs=n_epochs, batch_size=((episode_per_task * max_path_length) * meta_batch_size)) |
def _seg_68():
return [(120700, 'M', u''), (120701, 'M', u''), (120702, 'M', u''), (120703, 'M', u''), (120704, 'M', u''), (120705, 'M', u''), (120707, 'M', u''), (120708, 'M', u''), (120709, 'M', u''), (120710, 'M', u''), (120711, 'M', u''), (120712, 'M', u''), (120713, 'M', u''), (120714, 'M', u''), (120715, 'M', u''), (120716, 'M', u''), (120717, 'M', u''), (120718, 'M', u''), (120719, 'M', u''), (120720, 'M', u''), (120721, 'M', u''), (120722, 'M', u''), (120723, 'M', u''), (120724, 'M', u''), (120725, 'M', u''), (120726, 'M', u''), (120727, 'M', u''), (120728, 'M', u''), (120729, 'M', u''), (120730, 'M', u''), (120731, 'M', u''), (120732, 'M', u''), (120733, 'M', u''), (120734, 'M', u''), (120735, 'M', u''), (120736, 'M', u''), (120737, 'M', u''), (120738, 'M', u''), (120739, 'M', u''), (120740, 'M', u''), (120741, 'M', u''), (120742, 'M', u''), (120743, 'M', u''), (120744, 'M', u''), (120745, 'M', u''), (120746, 'M', u''), (120747, 'M', u''), (120748, 'M', u''), (120749, 'M', u''), (120750, 'M', u''), (120751, 'M', u''), (120752, 'M', u''), (120753, 'M', u''), (120754, 'M', u''), (120755, 'M', u''), (120756, 'M', u''), (120757, 'M', u''), (120758, 'M', u''), (120759, 'M', u''), (120760, 'M', u''), (120761, 'M', u''), (120762, 'M', u''), (120763, 'M', u''), (120765, 'M', u''), (120766, 'M', u''), (120767, 'M', u''), (120768, 'M', u''), (120769, 'M', u''), (120770, 'M', u''), (120771, 'M', u''), (120772, 'M', u''), (120773, 'M', u''), (120774, 'M', u''), (120775, 'M', u''), (120776, 'M', u''), (120777, 'M', u''), (120778, 'M', u''), (120780, 'X'), (120782, 'M', u'0'), (120783, 'M', u'1'), (120784, 'M', u'2'), (120785, 'M', u'3'), (120786, 'M', u'4'), (120787, 'M', u'5'), (120788, 'M', u'6'), (120789, 'M', u'7'), (120790, 'M', u'8'), (120791, 'M', u'9'), (120792, 'M', u'0'), (120793, 'M', u'1'), (120794, 'M', u'2'), (120795, 'M', u'3'), (120796, 'M', u'4'), (120797, 'M', u'5'), (120798, 'M', u'6'), (120799, 'M', u'7'), (120800, 'M', u'8'), (120801, 'M', u'9'), (120802, 'M', u'0'), (120803, 'M', u'1')] |
class WhisperModel(nn.Module):
def __init__(self, model_type='small.en', n_class=14):
super().__init__()
self.encoder = whisper.load_model(model_type).encoder
for param in self.encoder.parameters():
param.requires_grad = True
feature_dim = 768
self.intent_classifier = nn.Sequential(nn.Linear(feature_dim, n_class))
def forward(self, x):
x = self.encoder(x)
x = torch.mean(x, dim=1)
intent = self.intent_classifier(x)
return intent |
def get_combinations(list1, list2):
return [list(zip(each_permutation, list2)) for each_permutation in itertools.permutations(list1, len(list2))] |
def open_all_layers(model):
model.train()
for p in model.parameters():
p.requires_grad = True |
def nested_map_for_loop_2(B: dace.int64[(10, 10)]):
A = np.ndarray([10, 10], dtype=np.int64)
for i in dace.map[0:10]:
for j in range(10):
A[(i, j)] = (((2 * B[(i, j)]) + (i * 10)) + j)
return A |
def mean_IoU(overall_h):
iu = (np.diag(overall_h) / ((overall_h.sum(1) + overall_h.sum(0)) - np.diag(overall_h)))
return (iu, np.nanmean(iu)) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('val', [0.5, 1, 2])
def test_mul_scalar_forward_backward(seed, val, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2)]
function_tester(rng, F.mul_scalar, (lambda x, y: (x * y)), inputs, func_args=[val], ctx=ctx, func_name=func_name) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_tan_double_backward(seed, ctx, func_name):
from nbla_test_utils import cap_ignore_region, backward_function_tester
rng = np.random.RandomState(seed)
inputs = [(np.clip(rng.randn(2, 3, 4).astype(np.float32), ((- np.pi) / 2), (np.pi / 2)) * 0.1)]
backward_function_tester(rng, F.tan, inputs=inputs, func_args=[], func_kwargs={}, atol_accum=0.01, dstep=0.001, ctx=ctx) |
def find_files(top_directory, exclude=[], include_top_directory_in_name=True):
import os
import re
paths_and_names = []
exclude = [re.compile(exclusion) for exclusion in exclude]
top_directory = os.path.abspath(os.path.expanduser(top_directory))
parent_directory = os.path.dirname(top_directory)
for (root, dirs, files) in os.walk(top_directory, topdown=True):
dirs.sort(key=str.lower)
files.sort(key=str.lower)
for exclusion in exclude:
for d in dirs:
if exclusion.search(os.path.relpath(d, top_directory)):
dirs.remove(d)
for f in files:
if exclusion.search(os.path.relpath(f, top_directory)):
files.remove(f)
for f in files:
path = os.path.join(root, f)
if include_top_directory_in_name:
name = os.path.relpath(path, parent_directory)
else:
name = os.path.relpath(path, top_directory)
paths_and_names.append([path, name])
return paths_and_names |
def main(_):
if (not os.path.exists(args.checkpoint_dir)):
os.makedirs(args.checkpoint_dir)
if (not os.path.exists(args.sample_dir)):
os.makedirs(args.sample_dir)
if (not os.path.exists(args.test_dir)):
os.makedirs(args.test_dir)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
if (args.type == 'cyclegan'):
model = cyclegan(sess, args)
(model.train(args) if (args.phase == 'train') else model.test(args))
if (args.type == 'classifier'):
classifier = Classifer(sess, args)
(classifier.train(args) if (args.phase == 'train') else classifier.test(args)) |
def seqtemplate1(seq_label):
question = 'Is this sentence Causal or Non-causal?'
answers = {'text': [('Causal' if (int(seq_label) == 1) else 'Non-causal')]}
return (question, answers) |
def cross_entropy(output, target):
return F.binary_cross_entropy_with_logits(input=output, target=target.float()) |
class IStat():
def __init__(self, value: float=None, n: int=0):
if (n > 0):
assert (value is not None)
self.value = value
self.n = n
def n(self):
return self._n
def n(self, n: int):
assert (n >= 0)
self._n = n
def value(self):
return self._value
def value(self, value: float):
self._value = value
def add(self, x):
raise NotImplementedError
def drop(self, x):
raise NotImplementedError
def add_batch(self, batch: List[float]):
for x in batch:
self.add(x)
def drop_batch(self, batch: List[float]):
for x in batch:
self.drop(x) |
class VessNN(nn.Module):
def __init__(self):
super(VessNN, self).__init__()
self.conv1 = nn.Sequential(nn.Conv3d(1, 24, (2, 3, 3)), nn.ReLU(), nn.Conv3d(24, 24, (2, 3, 3)), nn.ReLU(), nn.Conv3d(24, 24, (2, 3, 3)), nn.Tanh(), nn.MaxPool3d((1, 2, 2), stride=(1, 1, 1)))
self.conv2 = nn.Sequential(nn.Conv3d(24, 36, (1, 3, 3)), nn.ReLU(), nn.Conv3d(36, 36, (1, 3, 3)), nn.ReLU(), nn.MaxPool3d((2, 2, 2), stride=(1, 1, 1)))
self.conv3 = nn.Sequential(nn.Conv3d(36, 48, (2, 3, 3), padding=(1, 0, 0)), nn.ReLU(), nn.Conv3d(48, 48, (2, 3, 3), padding=(1, 0, 0)), nn.Tanh(), nn.MaxPool3d((2, 2, 2), stride=(1, 1, 1)))
self.conv4 = nn.Sequential(nn.Conv3d(48, 60, (2, 3, 3), padding=(1, 0, 0)), nn.ReLU(), nn.Conv3d(60, 60, (2, 3, 3), padding=(1, 0, 0)), nn.ReLU(), nn.Conv3d(60, 100, (2, 3, 3), padding=(1, 0, 0)), nn.ReLU())
self.drop = nn.Dropout(0.5)
self.fc = nn.Linear((((100 * 5) * 62) * 62), 2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.drop(x)
x = x.reshape(x.shape[0], (((100 * 5) * 62) * 62))
x = self.fc(x)
x = x.reshape(x.shape[0], 2, 1, 1, 1)
return x |
_seed
.parametrize('num_steps, acquisition_rule', [pytest.param(5, EfficientGlobalOptimization(), id='EfficientGlobalOptimization'), pytest.param(10, DiscreteThompsonSampling(1000, 1), id='DiscreteThompsonSampling'), pytest.param(5, DiscreteThompsonSampling(1000, 1, thompson_sampler=ThompsonSamplerFromTrajectory()), id='DiscreteThompsonSampling/ThompsonSamplerFromTrajectory')])
def test_bayesian_optimizer_with_deep_ensemble_finds_minima_of_simple_quadratic(num_steps: int, acquisition_rule: AcquisitionRule[(TensorType, SearchSpace, DeepEnsemble)]) -> None:
_test_optimizer_finds_minimum(DeepEnsemble, num_steps, acquisition_rule) |
class config_fc(Command):
description = 'specify Fortran 77/Fortran 90 compiler information'
user_options = [('fcompiler=', None, 'specify Fortran compiler type'), ('f77exec=', None, 'specify F77 compiler command'), ('f90exec=', None, 'specify F90 compiler command'), ('f77flags=', None, 'specify F77 compiler flags'), ('f90flags=', None, 'specify F90 compiler flags'), ('opt=', None, 'specify optimization flags'), ('arch=', None, 'specify architecture specific optimization flags'), ('debug', 'g', 'compile with debugging information'), ('noopt', None, 'compile without optimization'), ('noarch', None, 'compile without arch-dependent optimization')]
help_options = [('help-fcompiler', None, 'list available Fortran compilers', show_fortran_compilers)]
boolean_options = ['debug', 'noopt', 'noarch']
def initialize_options(self):
self.fcompiler = None
self.f77exec = None
self.f90exec = None
self.f77flags = None
self.f90flags = None
self.opt = None
self.arch = None
self.debug = None
self.noopt = None
self.noarch = None
def finalize_options(self):
log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options')
build_clib = self.get_finalized_command('build_clib')
build_ext = self.get_finalized_command('build_ext')
config = self.get_finalized_command('config')
build = self.get_finalized_command('build')
cmd_list = [self, config, build_clib, build_ext, build]
for a in ['fcompiler']:
l = []
for c in cmd_list:
v = getattr(c, a)
if (v is not None):
if (not isinstance(v, str)):
v = v.compiler_type
if (v not in l):
l.append(v)
if (not l):
v1 = None
else:
v1 = l[0]
if (len(l) > 1):
log.warn((' commands have different --%s options: %s, using first in list as default' % (a, l)))
if v1:
for c in cmd_list:
if (getattr(c, a) is None):
setattr(c, a, v1)
def run(self):
return |
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
cls.add_method('Cleanup', 'void', [], is_static=True)
return |
def nnb_template(args, ifiles, output):
nnp = _import_file(args, ifiles)
if (nnp is not None):
return _generate_nnb_template(args, nnp, output)
else:
print('Import from [{}] failed.'.format(ifiles))
return False |
class BadArgumentUsage(UsageError):
def __init__(self, message, ctx=None):
UsageError.__init__(self, message, ctx) |
class CoNLLDataset(object):
def __init__(self, filename, processing_word=None, processing_tag=None, max_iter=None):
self.filename = filename
self.processing_word = processing_word
self.processing_tag = processing_tag
self.max_iter = max_iter
self.length = None
def __iter__(self):
niter = 0
with open(self.filename) as f:
(words, tags) = ([], [])
for line in f:
line = line.strip()
if ((len(line) == 0) or line.startswith('-DOCSTART-')):
if (len(words) != 0):
niter += 1
if ((self.max_iter is not None) and (niter > self.max_iter)):
break
(yield (words, tags))
(words, tags) = ([], [])
else:
ls = line.split('\t')
(word, tag) = (ls[0], ls[(- 1)])
if (self.processing_word is not None):
word = self.processing_word(word)
if (self.processing_tag is not None):
tag = self.processing_tag(tag)
words += [word]
tags += [tag]
def __len__(self):
if (self.length is None):
self.length = 0
for _ in self:
self.length += 1
return self.length |
.box(NumpyType)
def NumpyType_box(typ, val, c):
Numpy_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Numpy))
from_buffer_obj = c.pyapi.object_getattr_string(Numpy_obj, '_from_buffer')
builder = numba.core.cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
data_obj = c.pyapi.from_native_value(typ.data, builder.data, c.env_manager)
out = c.pyapi.call_function_objargs(from_buffer_obj, (data_obj,))
c.pyapi.decref(Numpy_obj)
c.pyapi.decref(from_buffer_obj)
c.pyapi.decref(data_obj)
return out |
def df_to_time_series(df: pd.DataFrame, time_col: str=None, timestamp_unit='s', data_cols: Union[(str, List[str])]=None) -> TimeSeries:
if (not isinstance(df.index, pd.DatetimeIndex)):
if (time_col is None):
time_col = df.columns[0]
elif (time_col not in df.columns):
raise KeyError(f'Expected `time_col` to be in {df.columns}. Got {time_col}.')
df[time_col] = pd.to_datetime(df[time_col], unit=(None if (df[time_col].dtype == 'O') else timestamp_unit))
df = df.set_index(time_col)
df = df.sort_index()
if (data_cols is not None):
data_cols = ([data_cols] if (not isinstance(data_cols, (list, tuple))) else data_cols)
if (not all(((c in df.columns) for c in data_cols))):
raise KeyError(f'Expected each of `data_cols` to be in {df.colums}. Got {data_cols}.')
df = df[data_cols]
return TimeSeries.from_pd(df) |
class VAE(nn.Module):
def __init__(self, in_dim, hidden_dim, latent_dim, conditional=False, condition_dim=None):
super().__init__()
self.latent_dim = latent_dim
self.conditional = conditional
if (self.conditional and (condition_dim is not None)):
input_dim = (in_dim + condition_dim)
dec_dim = (latent_dim + condition_dim)
else:
input_dim = in_dim
dec_dim = latent_dim
self.enc_MLP = nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ELU())
self.linear_means = nn.Linear(hidden_dim, latent_dim)
self.linear_log_var = nn.Linear(hidden_dim, latent_dim)
self.dec_MLP = nn.Sequential(nn.Linear(dec_dim, hidden_dim), nn.ELU(), nn.Linear(hidden_dim, in_dim))
def forward(self, x, c=None, return_pred=False):
if (self.conditional and (c is not None)):
inp = torch.cat((x, c), dim=(- 1))
else:
inp = x
h = self.enc_MLP(inp)
mean = self.linear_means(h)
log_var = self.linear_log_var(h)
z = self.reparameterize(mean, log_var)
if (self.conditional and (c is not None)):
z = torch.cat((z, c), dim=(- 1))
recon_x = self.dec_MLP(z)
(recon_loss, KLD) = self.loss_fn(recon_x, x, mean, log_var)
if (not return_pred):
return (recon_loss, KLD)
else:
return (recon_x, recon_loss, KLD)
def loss_fn(self, recon_x, x, mean, log_var):
recon_loss = torch.sum(((recon_x - x) ** 2), dim=1)
KLD = ((- 0.5) * torch.sum((((1 + log_var) - mean.pow(2)) - log_var.exp()), dim=1))
return (recon_loss, KLD)
def reparameterize(self, mu, log_var):
std = torch.exp((0.5 * log_var))
eps = torch.randn_like(std)
return (mu + (eps * std))
def inference(self, z, c=None):
if (self.conditional and (c is not None)):
z = torch.cat((z, c), dim=(- 1))
recon_x = self.dec_MLP(z)
return recon_x |
def _predict(x: Text):
x = x.split(sep='[SEP]')
inputs = [{'context': y[0], 'question': y[1]} for y in x]
outputs = model(inputs)
if isinstance(outputs, dict):
outputs = [outputs]
return [output['answer'] for output in outputs] |
def _open_file_context(file_like, appendmat, mode='rb'):
(f, opened) = _open_file(file_like, appendmat, mode)
try:
(yield f)
finally:
if opened:
f.close() |
_start_docstrings('T5 Model with a `language modeling` head on top. ', T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.lm_loss = nn.CrossEntropyLoss(ignore_index=(- 100))
self.output_only = config.output_only
self.precomputed_masks = config.precomputed_masks
self.init_weights()
def make_stateless(self):
stateless_shared = StatelessEmbedding(self.shared)
self.encoder.embed_tokens = StatelessEmbedding(self.shared)
self.decoder.embed_tokens = StatelessEmbedding(self.shared)
del self.shared
self.encoder.embed_tokens.pop_weight()
self.decoder.embed_tokens.pop_weight()
self.shared_embed_weight = stateless_shared.pop_weight()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, inverted_encoder_attention_mask=None, lm_labels=None):
encoder_hidden_states = self.encoder(input_ids=input_ids, shared_embedding=self.shared_embed_weight, attention_mask=attention_mask)
if (is_not_None(lm_labels) and is_None(decoder_input_ids)):
decoder_input_ids = self._shift_right(lm_labels)
decoder_hidden_states = self.decoder(input_ids=decoder_input_ids, shared_embedding=self.shared_embed_weight, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=(inverted_encoder_attention_mask if self.precomputed_masks else attention_mask))
sequence_output = (decoder_hidden_states * (self.model_dim ** (- 0.5)))
lm_logits = self.lm_head(sequence_output)
decoder_outputs = (lm_logits, decoder_hidden_states)
if is_not_None(lm_labels):
loss_fct = self.lm_loss
loss = loss_fct(lm_logits.view((- 1), lm_logits.size((- 1))), lm_labels.view((- 1)))
decoder_outputs = ((loss,) + decoder_outputs)
if self.output_only:
return decoder_outputs[0]
return (decoder_outputs + (encoder_hidden_states,))
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, **kwargs):
assert is_not_None(past), 'past has to be defined for encoder_outputs'
if (len(past) < 2):
(encoder_outputs, decoder_past_key_value_states) = (past, None)
else:
(encoder_outputs, decoder_past_key_value_states) = (past[0], past[1])
return {'decoder_input_ids': input_ids, 'decoder_past_key_value_states': decoder_past_key_value_states, 'encoder_outputs': encoder_outputs, 'attention_mask': attention_mask, 'use_cache': use_cache}
def _reorder_cache(self, past, beam_idx):
if (len(past) < 2):
logger.warning('You might want to consider setting `use_cache=True` to speed up decoding')
return past
decoder_past = past[1]
past = (past[0],)
reordered_decoder_past = ()
for layer_past_states in decoder_past:
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
reordered_layer_past_states = (reordered_layer_past_states + (layer_past_state.index_select(0, beam_idx),))
assert (reordered_layer_past_states[0].shape == layer_past_states[0].shape)
assert (len(reordered_layer_past_states) == len(layer_past_states))
reordered_decoder_past = (reordered_decoder_past + (reordered_layer_past_states,))
return (past + (reordered_decoder_past,)) |
def _validate_index(episode: EpisodeBase, index: int) -> None:
assert (index < episode.transition_count) |
class RnnEncoder(torch.nn.Module):
def __init__(self, hidden_size, in_channel, encoding_size, cell_type='GRU', num_layers=1, device='cpu', dropout=0, bidirectional=True):
super(RnnEncoder, self).__init__()
self.hidden_size = hidden_size
self.in_channel = in_channel
self.num_layers = num_layers
self.cell_type = cell_type
self.encoding_size = encoding_size
self.bidirectional = bidirectional
self.device = device
self.nn = torch.nn.Sequential(torch.nn.Linear((self.hidden_size * (int(self.bidirectional) + 1)), self.encoding_size)).to(self.device)
if (cell_type == 'GRU'):
self.rnn = torch.nn.GRU(input_size=self.in_channel, hidden_size=self.hidden_size, num_layers=num_layers, batch_first=False, dropout=dropout, bidirectional=bidirectional).to(self.device)
elif (cell_type == 'LSTM'):
self.rnn = torch.nn.LSTM(input_size=self.in_channel, hidden_size=self.hidden_size, num_layers=num_layers, batch_first=False, dropout=dropout, bidirectional=bidirectional).to(self.device)
else:
raise ValueError('Cell type not defined, must be one of the following {GRU, LSTM, RNN}')
def forward(self, x):
x = x.permute(2, 0, 1)
if (self.cell_type == 'GRU'):
past = torch.zeros((self.num_layers * (int(self.bidirectional) + 1)), x.shape[1], self.hidden_size).to(self.device)
elif (self.cell_type == 'LSTM'):
h_0 = torch.zeros((self.num_layers * (int(self.bidirectional) + 1)), x.shape[1], self.hidden_size).to(self.device)
c_0 = torch.zeros((self.num_layers * (int(self.bidirectional) + 1)), x.shape[1], self.hidden_size).to(self.device)
past = (h_0, c_0)
(out, _) = self.rnn(x.to(self.device), past)
encodings = self.nn(out[(- 1)].squeeze(0))
return encodings |
def masked_hit_miss_counts(pred, gt, mask, thresholds):
from nowcasting.hko_evaluation import rainfall_to_pixel
thresholds = [rainfall_to_pixel(threshold) for threshold in thresholds]
hits = []
misses = []
false_alarms = []
correct_negatives = []
for threshold in thresholds:
pred_rain_mask = (pred > threshold)
gt_rain_mask = (gt > threshold)
hits_ele = ((pred_rain_mask * gt_rain_mask) * mask)
misses_ele = (((1 - pred_rain_mask) * gt_rain_mask) * mask)
false_alarms_ele = ((pred_rain_mask * (1 - gt_rain_mask)) * mask)
correct_negatives_ele = (((1 - pred_rain_mask) * (1 - gt_rain_mask)) * mask)
hits.append(mx.sym.sum(hits_ele, axis=(3, 4)))
misses.append(mx.sym.sum(misses_ele, axis=(3, 4)))
false_alarms.append(mx.sym.sum(false_alarms_ele, axis=(3, 4)))
correct_negatives.append(mx.sym.sum(correct_negatives_ele, axis=(3, 4)))
hits = mx.sym.concat(*hits, dim=2, num_args=len(thresholds))
misses = mx.sym.concat(*misses, dim=2, num_args=len(thresholds))
false_alarms = mx.sym.concat(*false_alarms, dim=2, num_args=len(thresholds))
correct_negatives = mx.sym.concat(*correct_negatives, dim=2, num_args=len(thresholds))
return (hits, misses, false_alarms, correct_negatives) |
def persist(key: str) -> str:
if (_PERSIST_STATE_KEY not in st.session_state):
st.session_state[_PERSIST_STATE_KEY] = set()
st.session_state[_PERSIST_STATE_KEY].add(key)
return key |
def has_valid_annotation(anno, ann_types, filter_crowd=True):
if (len(anno) == 0):
return False
if filter_crowd:
if ('iscrowd' in anno[0]):
anno = [obj for obj in anno if (obj['iscrowd'] == 0)]
if (len(anno) == 0):
return False
if _has_only_empty_bbox(anno):
return False
return False |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'bool'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'double'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned char'])
return |
_function_dispatch(_strip_dispatcher)
def rstrip(a, chars=None):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) |
_params({'labels_true': ['array-like'], 'labels_pred': ['array-like'], 'average_method': [StrOptions({'arithmetic', 'max', 'min', 'geometric'})]}, prefer_skip_nested_validation=True)
def normalized_mutual_info_score(labels_true, labels_pred, *, average_method='arithmetic'):
(labels_true, labels_pred) = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
if ((classes.shape[0] == clusters.shape[0] == 1) or (classes.shape[0] == clusters.shape[0] == 0)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64, copy=False)
mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)
if (mi == 0):
return 0.0
(h_true, h_pred) = (entropy(labels_true), entropy(labels_pred))
normalizer = _generalized_average(h_true, h_pred, average_method)
return (mi / normalizer) |
def get_predicted_probabilities(p1, p2, p3, p4):
prob_all_4 = (((p1 * p2) * p3) * p4)
prob_exactly_3 = (((((((1 - p1) * p2) * p3) * p4) + (((p1 * (1 - p2)) * p3) * p4)) + (((p1 * p2) * (1 - p3)) * p4)) + (((p1 * p2) * p3) * (1 - p4)))
list_of_probs = [p1, p2, p3, p4]
prob_exactly_2 = 0
for i in range(4):
for j in range(4):
if (j <= i):
continue
other_inds = {0: 0, 1: 1, 2: 2, 3: 3}
del other_inds[i]
del other_inds[j]
other_inds = list(other_inds.keys())
prob_exactly_2 += (((list_of_probs[i] * list_of_probs[j]) * (1 - list_of_probs[other_inds[0]])) * (1 - list_of_probs[other_inds[1]]))
prob_exactly_1 = ((((((p1 * (1 - p2)) * (1 - p3)) * (1 - p4)) + ((((1 - p1) * p2) * (1 - p3)) * (1 - p4))) + ((((1 - p1) * (1 - p2)) * p3) * (1 - p4))) + ((((1 - p1) * (1 - p2)) * (1 - p3)) * p4))
prob_exactly_0 = ((((1 - p1) * (1 - p2)) * (1 - p3)) * (1 - p4))
return (prob_all_4, prob_exactly_3, prob_exactly_2, prob_exactly_1, prob_exactly_0) |
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
(n, n2) = (4, 2)
x = create_test_input(1, n, n, 1)
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46], [26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43], [43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37], [37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
(n, n2) = (5, 3)
x = create_test_input(1, n, n, 1)
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55], [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = dict(tf.get_collection('end_points'))
return (net, end_points)
def testEndPointsV2(self):
bottleneck = resnet_v2.bottleneck
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_plain(inputs, blocks, scope='tiny')
expected = ['tiny/block1/unit_1/bottleneck_v2/shortcut', 'tiny/block1/unit_1/bottleneck_v2/conv1', 'tiny/block1/unit_1/bottleneck_v2/conv2', 'tiny/block1/unit_1/bottleneck_v2/conv3', 'tiny/block1/unit_2/bottleneck_v2/conv1', 'tiny/block1/unit_2/bottleneck_v2/conv2', 'tiny/block1/unit_2/bottleneck_v2/conv3', 'tiny/block2/unit_1/bottleneck_v2/shortcut', 'tiny/block2/unit_1/bottleneck_v2/conv1', 'tiny/block2/unit_1/bottleneck_v2/conv2', 'tiny/block2/unit_1/bottleneck_v2/conv3', 'tiny/block2/unit_2/bottleneck_v2/conv1', 'tiny/block2/unit_2/bottleneck_v2/conv2', 'tiny/block2/unit_2/bottleneck_v2/conv3']
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for (i, unit) in enumerate(block.args):
(depth, depth_bottleneck, stride) = unit
with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]):
net = block.unit_fn(net, depth=depth, depth_bottleneck=depth_bottleneck, stride=stride, rate=1)
return net
def _atrousValues(self, bottleneck):
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]), resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]), resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])]
nominal_stride = 8
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
output = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
if (output_stride is None):
factor = 1
else:
factor = (nominal_stride // output_stride)
output = resnet_utils.subsample(output, factor)
tf.get_variable_scope().reuse_variables()
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.initialize_all_variables())
(output, expected) = sess.run([output, expected])
self.assertAllClose(output, expected, atol=0.0001, rtol=0.0001)
def testAtrousValuesBottleneck(self):
self._atrousValues(resnet_v2.bottleneck) |
def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
import ray
def _objective(trial, local_trainer, checkpoint_dir=None):
checkpoint = None
if checkpoint_dir:
for subdir in os.listdir(checkpoint_dir):
if subdir.startswith(PREFIX_CHECKPOINT_DIR):
checkpoint = os.path.join(checkpoint_dir, subdir)
local_trainer.objective = None
local_trainer.train(resume_from_checkpoint=checkpoint, trial=trial)
if (getattr(local_trainer, 'objective', None) is None):
metrics = local_trainer.evaluate()
local_trainer.objective = local_trainer.compute_objective(metrics)
local_trainer._tune_save_checkpoint()
ray.tune.report(objective=local_trainer.objective, **metrics, done=True)
_tb_writer = trainer.pop_callback(TensorBoardCallback)
trainer.model = None
if ('resources_per_trial' not in kwargs):
kwargs['resources_per_trial'] = {'cpu': 1}
if (trainer.args.n_gpu > 0):
kwargs['resources_per_trial']['gpu'] = 1
resource_msg = ('1 CPU' + (' and 1 GPU' if (trainer.args.n_gpu > 0) else ''))
logger.info(f'No `resources_per_trial` arg was passed into `hyperparameter_search`. Setting it to a default value of {resource_msg} for each trial.')
gpus_per_trial = kwargs['resources_per_trial'].get('gpu', 0)
trainer.args._n_gpu = gpus_per_trial
if ('progress_reporter' not in kwargs):
from ray.tune import CLIReporter
kwargs['progress_reporter'] = CLIReporter(metric_columns=['objective'])
if (('keep_checkpoints_num' in kwargs) and (kwargs['keep_checkpoints_num'] > 0)):
trainer.use_tune_checkpoints = True
if (kwargs['keep_checkpoints_num'] > 1):
logger.warning(f"Currently keeping {kwargs['keep_checkpoint_num']} checkpoints for each trial. Checkpoints are usually huge, consider setting `keep_checkpoints_num=1`.")
if ('scheduler' in kwargs):
from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining
if isinstance(kwargs['scheduler'], PopulationBasedTraining):
if (not trainer.use_tune_checkpoints):
logger.warning("You are using PopulationBasedTraining but you haven't enabled checkpointing. This means your trials will train from scratch everytime they are exploiting new configurations. Consider enabling checkpointing by passing `keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`.")
if (isinstance(kwargs['scheduler'], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)) and ((not trainer.args.do_eval) or (trainer.args.evaluation_strategy == IntervalStrategy.NO))):
raise RuntimeError("You are using {cls} as a scheduler but you haven't enabled evaluation during training. This means your trials will not report intermediate results to Ray Tune, and can thus not be stopped early or used to exploit other trials parameters. If this is what you want, do not use {cls}. If you would like to use {cls}, make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the Trainer `args`.".format(cls=type(kwargs['scheduler']).__name__))
analysis = ray.tune.run(ray.tune.with_parameters(_objective, local_trainer=trainer), config=trainer.hp_space(None), num_samples=n_trials, **kwargs)
best_trial = analysis.get_best_trial(metric='objective', mode=direction[:3])
best_run = BestRun(best_trial.trial_id, best_trial.last_result['objective'], best_trial.config)
if (_tb_writer is not None):
trainer.add_callback(_tb_writer)
return best_run |
def arg_parse():
parser = argparse.ArgumentParser(description='GcnInformax Arguments.')
parser.add_argument('--target', dest='target', type=int, default=0, help='')
parser.add_argument('--train-num', dest='train_num', type=int, default=5000)
parser.add_argument('--use-unsup-loss', dest='use_unsup_loss', action='store_const', const=True, default=False)
parser.add_argument('--separate-encoder', dest='separate_encoder', action='store_const', const=True, default=False)
parser.add_argument('--lr', dest='lr', type=float, default=0.01, help='Learning rate.')
parser.add_argument('--lamda', dest='lamda', type=float, default=0.001, help='Learning rate.')
parser.add_argument('--weight-decay', dest='weight_decay', type=float, default=0, help='')
return parser.parse_args() |
def iter21(num):
for idiag in range(num):
(irs, ics) = nm.diag_indices((num - idiag))
for ii in range(irs.shape[0]):
(yield ((irs[ii] + idiag), ics[ii])) |
class RouterGAP(nn.Module):
def __init__(self, input_nc, input_width, input_height, ngf=5, kernel_size=7, soft_decision=True, stochastic=False, **kwargs):
super(RouterGAP, self).__init__()
self.ngf = ngf
self.soft_decision = soft_decision
self.stochastic = stochastic
if (max(input_width, input_height) < kernel_size):
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
self.conv1 = nn.Conv2d(input_nc, ngf, kernel_size=kernel_size)
self.linear1 = nn.Linear(ngf, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv1(x)
if (self.ngf == 1):
x = x.mean(dim=(- 1)).mean(dim=(- 1)).squeeze()
else:
x = F.relu(x)
x = x.mean(dim=(- 1)).mean(dim=(- 1)).squeeze()
x = self.linear1(x).squeeze()
output = self.sigmoid(x)
if self.soft_decision:
return output
if self.stochastic:
return ops.ST_StochasticIndicator()(output)
else:
return ops.ST_Indicator()(output) |
def _linear_transform(attributions, clip_above_percentile=99.9, clip_below_percentile=70.0, low=0.2):
if ((clip_above_percentile < 0) or (clip_above_percentile > 100)):
raise ValueError('clip_above_percentile must be in [0, 100]')
if ((clip_below_percentile < 0) or (clip_below_percentile > 100)):
raise ValueError('clip_below_percentile must be in [0, 100]')
if ((low < 0) or (low > 1)):
raise ValueError('low must be in [0, 1]')
m = _compute_threshold_by_top_percentage(attributions, percentage=(100 - clip_above_percentile))
e = _compute_threshold_by_top_percentage(attributions, percentage=(100 - clip_below_percentile))
transformed = ((((1 - low) * (np.abs(attributions) - e)) / (m - e)) + low)
transformed *= np.sign(attributions)
transformed *= (transformed >= low)
transformed = np.clip(transformed, 0.0, 1.0)
return transformed |
def collect_results_cpu(result_part, size, tmpdir=None):
(rank, world_size) = get_dist_info()
if (tmpdir is None):
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if (rank == 0):
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
if (rank != 0):
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_result = mmcv.load(part_file)
part_list.append(part_result)
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results |
def main(argv):
ns = rospy.get_namespace()
ns = ns[0:(- 1)]
csvfile = argv[0]
dbcfile = argv[1]
node = lead_drive(ns, csvfile, dbcfile)
while (not rospy.is_shutdown()):
node.publish()
if (node.next_time == (- 1)):
break
deltaT = (node.next_time - node.current_time)
time.sleep(deltaT) |
def convert_onnx_proto(attribute):
from daceml.onnx.schema import ONNXAttributeType, _KNOWN_ONNX_PROTOS, ONNXParameterType
if (type(attribute) in _KNOWN_ONNX_PROTOS):
return _KNOWN_ONNX_PROTOS[type(attribute)].from_onnx_proto(attribute)
if isinstance(attribute, (int, str, bool, float)):
return attribute
if (type(attribute) is onnx.defs.OpSchema.FormalParameterOption):
if (attribute == onnx.defs.OpSchema.FormalParameterOption.Single):
return ONNXParameterType.Single
elif (attribute == onnx.defs.OpSchema.FormalParameterOption.Optional):
return ONNXParameterType.Optional
elif (attribute == onnx.defs.OpSchema.FormalParameterOption.Variadic):
return ONNXParameterType.Variadic
else:
raise NotImplementedError('Only single, optional and variadic formal parameters are supported, got'.format(attribute))
if (type(attribute) is onnx.defs.OpSchema.AttrType):
if (attribute == onnx.defs.OpSchema.AttrType.FLOAT):
return ONNXAttributeType.Float
elif (attribute == onnx.defs.OpSchema.AttrType.FLOATS):
return ONNXAttributeType.Floats
elif (attribute == onnx.defs.OpSchema.AttrType.INT):
return ONNXAttributeType.Int
elif (attribute == onnx.defs.OpSchema.AttrType.INTS):
return ONNXAttributeType.Ints
elif (attribute == onnx.defs.OpSchema.AttrType.STRING):
return ONNXAttributeType.String
elif (attribute == onnx.defs.OpSchema.AttrType.STRINGS):
return ONNXAttributeType.Strings
elif (attribute == onnx.defs.OpSchema.AttrType.TENSOR):
return ONNXAttributeType.Tensor
else:
log.debug('Got unsupported attribute type {}'.format(attribute))
return ONNXAttributeType.Unsupported
if (type(attribute) is onnx.AttributeProto):
return convert_attribute_proto(attribute)
raise NotImplementedError('No conversion implemented for {} (type {})'.format(attribute, type(attribute))) |
_module('numpy')
def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
global _namedict, _dictlist
import pydoc
import inspect
if (hasattr(object, '_ppimport_importer') or hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if (object is None):
info(info)
elif isinstance(object, ndarray):
_info(object, output=output)
elif isinstance(object, str):
if (_namedict is None):
(_namedict, _dictlist) = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if (id(obj) in objlist):
print(('\n *** Repeat reference found in %s *** ' % namestr), file=output)
else:
objlist.append(id(obj))
print((' *** Found in %s ***' % namestr), file=output)
info(obj)
print(('-' * maxwidth), file=output)
numfound += 1
except KeyError:
pass
if (numfound == 0):
print(('Help for %s not found.' % object), file=output)
else:
print(('\n *** Total of %d references found. ***' % numfound), file=output)
elif inspect.isfunction(object):
name = object.__name__
arguments = formatargspec(*getargspec(object))
if (len((name + arguments)) > maxwidth):
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = (name + arguments)
print(((' ' + argstr) + '\n'), file=output)
print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
arguments = '()'
try:
if hasattr(object, '__init__'):
arguments = formatargspec(*getargspec(object.__init__.__func__))
arglist = arguments.split(', ')
if (len(arglist) > 1):
arglist[1] = ('(' + arglist[1])
arguments = ', '.join(arglist[1:])
except Exception:
pass
if (len((name + arguments)) > maxwidth):
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = (name + arguments)
print(((' ' + argstr) + '\n'), file=output)
doc1 = inspect.getdoc(object)
if (doc1 is None):
if hasattr(object, '__init__'):
print(inspect.getdoc(object.__init__), file=output)
else:
print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
if (methods != []):
print('\n\nMethods:\n', file=output)
for meth in methods:
if (meth[0] == '_'):
continue
thisobj = getattr(object, meth, None)
if (thisobj is not None):
(methstr, other) = pydoc.splitdoc((inspect.getdoc(thisobj) or 'None'))
print((' %s -- %s' % (meth, methstr)), file=output)
elif ((sys.version_info[0] < 3) and isinstance(object, types.InstanceType)):
print('Instance of class: ', object.__class__.__name__, file=output)
print(file=output)
if hasattr(object, '__call__'):
arguments = formatargspec(*getargspec(object.__call__.__func__))
arglist = arguments.split(', ')
if (len(arglist) > 1):
arglist[1] = ('(' + arglist[1])
arguments = ', '.join(arglist[1:])
else:
arguments = '()'
if hasattr(object, 'name'):
name = ('%s' % object.name)
else:
name = '<name>'
if (len((name + arguments)) > maxwidth):
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = (name + arguments)
print(((' ' + argstr) + '\n'), file=output)
doc = inspect.getdoc(object.__call__)
if (doc is not None):
print(inspect.getdoc(object.__call__), file=output)
print(inspect.getdoc(object), file=output)
else:
print(inspect.getdoc(object), file=output)
elif inspect.ismethod(object):
name = object.__name__
arguments = formatargspec(*getargspec(object.__func__))
arglist = arguments.split(', ')
if (len(arglist) > 1):
arglist[1] = ('(' + arglist[1])
arguments = ', '.join(arglist[1:])
else:
arguments = '()'
if (len((name + arguments)) > maxwidth):
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = (name + arguments)
print(((' ' + argstr) + '\n'), file=output)
print(inspect.getdoc(object), file=output)
elif hasattr(object, '__doc__'):
print(inspect.getdoc(object), file=output) |
def link_classification(output_dim: int=1, output_act: AnyStr='sigmoid', edge_embedding_method: AnyStr='ip'):
edge_function = link_inference(output_dim=output_dim, output_act=output_act, edge_embedding_method=edge_embedding_method, name='link_classification')
return edge_function |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.