code stringlengths 101 5.91M |
|---|
class GLPNImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size_divisor: int=32, resample=PILImageResampling.BILINEAR, do_rescale: bool=True, **kwargs) -> None:
self.do_resize = do_resize
self.do_rescale = do_rescale
self.size_divisor = size_divisor
self.resample = resample
super().__init__(**kwargs)
def resize(self, image: np.ndarray, size_divisor: int, resample, data_format: Optional[ChannelDimension]=None, **kwargs) -> np.ndarray:
(height, width) = get_image_size(image)
new_h = ((height // size_divisor) * size_divisor)
new_w = ((width // size_divisor) * size_divisor)
image = resize(image, (new_h, new_w), resample=resample, data_format=data_format, **kwargs)
return image
def rescale(self, image: np.ndarray, scale: float, data_format: Optional[ChannelDimension]=None, **kwargs) -> np.ndarray:
return rescale(image=image, scale=scale, data_format=data_format, **kwargs)
def preprocess(self, images: Union[('PIL.Image.Image', TensorType, List['PIL.Image.Image'], List[TensorType])], do_resize: Optional[bool]=None, size_divisor: Optional[int]=None, resample=None, do_rescale: Optional[bool]=None, return_tensors: Optional[Union[(TensorType, str)]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, **kwargs) -> BatchFeature:
do_resize = (do_resize if (do_resize is not None) else self.do_resize)
do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale)
size_divisor = (size_divisor if (size_divisor is not None) else self.size_divisor)
resample = (resample if (resample is not None) else self.resample)
if (do_resize and (size_divisor is None)):
raise ValueError('size_divisor is required for resizing')
images = make_list_of_images(images)
if (not valid_images(images)):
raise ValueError('Invalid image(s)')
images = [to_numpy_array(img) for img in images]
if do_resize:
images = [self.resize(image, size_divisor=size_divisor, resample=resample) for image in images]
if do_rescale:
images = [self.rescale(image, scale=(1 / 255)) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors) |
class LSTM(rf.Module):
def __init__(self, in_dim: Dim, out_dim: Dim, *, with_bias: bool=True):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.ff_weight = rf.Parameter(((4 * self.out_dim), self.in_dim))
self.ff_weight.initial = rf.init.Glorot()
self.rec_weight = rf.Parameter(((4 * self.out_dim), self.out_dim))
self.rec_weight.initial = rf.init.Glorot()
self.bias = None
if with_bias:
self.bias = rf.Parameter(((4 * self.out_dim),))
self.bias.initial = 0.0
def __call__(self, source: Tensor, *, state: LstmState, spatial_dim: Dim) -> Tuple[(Tensor, LstmState)]:
if ((not state.h) or (not state.c)):
raise ValueError(f'{self}: state {state} needs attributes ``h`` (hidden) and ``c`` (cell).')
if (self.in_dim not in source.dims):
raise ValueError(f'{self}: input {source} does not have in_dim {self.in_dim}')
(result, (new_state_h, new_state_c)) = source._raw_backend.lstm(source=source, state_c=state.c, state_h=state.h, ff_weight=self.ff_weight, rec_weight=self.rec_weight, bias=self.bias, spatial_dim=spatial_dim, in_dim=self.in_dim, out_dim=self.out_dim)
new_state = LstmState(h=new_state_h, c=new_state_c)
return (result, new_state)
def default_initial_state(self, *, batch_dims: Sequence[Dim]) -> LstmState:
return LstmState(h=rf.zeros((list(batch_dims) + [self.out_dim]), feature_dim=self.out_dim), c=rf.zeros((list(batch_dims) + [self.out_dim]), feature_dim=self.out_dim)) |
def test_zip_and_unzip():
x = ak.Array([[1, 2, 3], [], [4, 5], [6], [7, 8, 9, 10]])
y = ak.Array([1.1, 2.2, 3.3, 4.4, 5.5])
one = ak.operations.zip({'x': x, 'y': y})
two = ak.operations.zip({'x': x, 'y': y}, depth_limit=1)
(xx, yy) = ak.operations.unzip(two)
assert isinstance(one.layout, ak.contents.Content)
assert isinstance(two.layout, ak.contents.Content)
assert isinstance(xx.layout, ak.contents.Content)
assert isinstance(yy.layout, ak.contents.Content)
assert (to_list(one) == [[{'x': 1, 'y': 1.1}, {'x': 2, 'y': 1.1}, {'x': 3, 'y': 1.1}], [], [{'x': 4, 'y': 3.3}, {'x': 5, 'y': 3.3}], [{'x': 6, 'y': 4.4}], [{'x': 7, 'y': 5.5}, {'x': 8, 'y': 5.5}, {'x': 9, 'y': 5.5}, {'x': 10, 'y': 5.5}]])
assert (to_list(two) == [{'x': [1, 2, 3], 'y': 1.1}, {'x': [], 'y': 2.2}, {'x': [4, 5], 'y': 3.3}, {'x': [6], 'y': 4.4}, {'x': [7, 8, 9, 10], 'y': 5.5}])
assert (to_list(xx) == [[1, 2, 3], [], [4, 5], [6], [7, 8, 9, 10]])
assert (to_list(yy) == [1.1, 2.2, 3.3, 4.4, 5.5]) |
def ratio2weight(targets, ratio):
pos_weights = (targets * (1 - ratio))
neg_weights = ((1 - targets) * ratio)
weights = torch.exp((neg_weights + pos_weights))
weights[(targets > 1)] = 0.0
return weights |
class TFCamembertForSequenceClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class TestExecCommand(object):
def setup(self):
self.pyexe = get_pythonexe()
def check_nt(self, **kws):
(s, o) = exec_command.exec_command('cmd /C echo path=%path%')
assert_((s == 0))
assert_((o != ''))
(s, o) = exec_command.exec_command(('"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe))
assert_((s == 0))
assert_((o == 'win32'))
def check_posix(self, **kws):
(s, o) = exec_command.exec_command('echo Hello', **kws)
assert_((s == 0))
assert_((o == 'Hello'))
(s, o) = exec_command.exec_command('echo $AAA', **kws)
assert_((s == 0))
assert_((o == ''))
(s, o) = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
assert_((s == 0))
assert_((o == 'Tere'))
(s, o) = exec_command.exec_command('echo "$AAA"', **kws)
assert_((s == 0))
assert_((o == ''))
if ('BBB' not in os.environ):
os.environ['BBB'] = 'Hi'
(s, o) = exec_command.exec_command('echo "$BBB"', **kws)
assert_((s == 0))
assert_((o == 'Hi'))
(s, o) = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
assert_((s == 0))
assert_((o == 'Hey'))
(s, o) = exec_command.exec_command('echo "$BBB"', **kws)
assert_((s == 0))
assert_((o == 'Hi'))
del os.environ['BBB']
(s, o) = exec_command.exec_command('echo "$BBB"', **kws)
assert_((s == 0))
assert_((o == ''))
(s, o) = exec_command.exec_command('this_is_not_a_command', **kws)
assert_((s != 0))
assert_((o != ''))
(s, o) = exec_command.exec_command('echo path=$PATH', **kws)
assert_((s == 0))
assert_((o != ''))
(s, o) = exec_command.exec_command(('"%s" -c "import sys,os;sys.stderr.write(os.name)"' % self.pyexe), **kws)
assert_((s == 0))
assert_((o == 'posix'))
def check_basic(self, *kws):
(s, o) = exec_command.exec_command(('"%s" -c "raise \'Ignore me.\'"' % self.pyexe), **kws)
assert_((s != 0))
assert_((o != ''))
(s, o) = exec_command.exec_command(('"%s" -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % self.pyexe), **kws)
assert_((s == 0))
assert_((o == '012'))
(s, o) = exec_command.exec_command(('"%s" -c "import sys;sys.exit(15)"' % self.pyexe), **kws)
assert_((s == 15))
assert_((o == ''))
(s, o) = exec_command.exec_command(('"%s" -c "print(\'Heipa\'")' % self.pyexe), **kws)
assert_((s == 0))
assert_((o == 'Heipa'))
def check_execute_in(self, **kws):
with tempdir() as tmpdir:
fn = 'file'
tmpfile = os.path.join(tmpdir, fn)
f = open(tmpfile, 'w')
f.write('Hello')
f.close()
(s, o) = exec_command.exec_command(('"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % (self.pyexe, fn)), **kws)
assert_((s != 0))
assert_((o != ''))
(s, o) = exec_command.exec_command(('"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); f.close()"' % (self.pyexe, fn)), execute_in=tmpdir, **kws)
assert_((s == 0))
assert_((o == 'Hello'))
def test_basic(self):
with redirect_stdout(StringIO()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
if (os.name == 'posix'):
self.check_posix(use_tee=0)
self.check_posix(use_tee=1)
elif (os.name == 'nt'):
self.check_nt(use_tee=0)
self.check_nt(use_tee=1)
self.check_execute_in(use_tee=0)
self.check_execute_in(use_tee=1) |
def _format_health_check_suggestion(label: str) -> str:
return f"Bypass this health check using {bold(f'`--hypothesis-suppress-health-check={label}`')}." |
class BertForTokenClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class Playlist():
playlists = {}
def __init__(self):
self.name = input('Playlist Name:')
Playlist.playlists[self.name] = self
self.init_song_strings = []
self.search_results = []
self.recommended_track_ids = []
self.trax = []
self.df = None
self.playlist = None
self.init_song_strings.append(input('Song 1: '))
self.init_song_strings.append(input('Song 2: '))
self.init_song_strings.append(input('Song 3: '))
self.get_recommendations()
self.get_features()
self.transform()
self.build_playlist()
self.show_playlist()
self.do_pca()
def get_recommendations(self):
print('Getting Recommendations...')
for ss in self.init_song_strings:
r = sp.search(ss, limit=1)['tracks']['items'][0]
self.search_results.append({'id': r['id'], 'artists': [i['name'] for i in r['artists']], 'name': r['name']})
for id_ in tqdm(self.search_results):
results = sp.recommendations(seed_tracks=[id_['id']], limit=100)
for r in results['tracks']:
if (r['id'] not in [i['id'] for i in self.recommended_track_ids]):
self.recommended_track_ids.append({'id': r['id'], 'artists': [i['name'] for i in r['artists']], 'name': r['name']})
print('Getting a Few More...')
results_2 = sp.recommendations(seed_tracks=[id_['id'] for id_ in self.search_results], limit=100)
count = 0
for r in results_2['tracks']:
if (r['id'] not in [i['id'] for i in self.recommended_track_ids]):
count += 1
self.recommended_track_ids.append({'id': r['id'], 'artists': [i['name'] for i in r['artists']], 'name': r['name']})
print('There were', count, 'more!')
def get_features(self):
print('Getting Initial Song Features')
for id_ in tqdm(self.search_results):
dict_ = sp.audio_features(id_['id'])[0]
dict_.update(id_)
self.trax.append(dict_)
print('Getting Recommended Song Features')
n = 100
results = []
broken_list = [self.recommended_track_ids[(i * n):((i + 1) * n)] for i in range((((len(self.recommended_track_ids) + n) - 1) // n))]
for list_ in broken_list:
results += sp.audio_features([id_['id'] for id_ in list_])
for (i, id_) in enumerate(self.recommended_track_ids):
results[i].update(id_)
self.trax.append(results[i])
def transform(self):
print('Applying Transformations...')
columns = (['id', 'artists', 'name', 'tempo', 'time_signature', 'key'] + features)
self.df = pd.DataFrame(self.trax)[columns].dropna()
self.df[features[1:]] = std_scaler.transform(y_j.transform(self.df[features[1:]]))
self.playlist = self.df.iloc[0:3].copy()
def rnn_predict(self):
return model.predict(np.array([np.array(self.playlist[features])]))[(0, (- 1))]
def tempo_similarity(t1, t2):
if (t1 <= 0):
return (- 1)
t2 *= (t2 > 0)
return np.cos(((2 * np.pi) * np.log2((t1 / t2))))
def key_similarity(s1, s2):
k1 = s1['key']
k2 = s2['key']
m1 = s1['mode']
m2 = s2['mode']
k1 += (3 * (m1 == 0))
k2 += (3 * (m2 == 0))
(k1, k2) = np.remainder((k1, k2), 12)
circle_of_fifths = {0: 0, 7: 1, 2: 2, 9: 3, 4: 4, 11: 5, 6: 6, 1: 7, 8: 8, 3: 9, 10: 10, 5: 11}
diff = np.abs((circle_of_fifths[k1] - npi.remap(k2, list(circle_of_fifths.keys()), list(circle_of_fifths.values()))))
diff = np.abs((((diff > 6) * 12) - diff))
return (1 - ((((diff == 0) + diff) - 1) / 2.5))
def argmin_song(self, songs):
song = self.playlist.iloc[(- 1)]
alpha = 1
beta = 1
gamma = 1
delta = 1.2
distance = cdist([(self.rnn_predict() * delta)], songs[features[1:]])[0]
key_similarity = Playlist.key_similarity(song, songs)
tempo_similarity = Playlist.tempo_similarity(song['tempo'], songs['tempo']).values
return songs.reset_index().iloc[np.argmin((((alpha * distance) - (beta * key_similarity)) - (gamma * tempo_similarity)))]
def build_playlist(self):
print('Determining Best Song Sequence...')
for i in tqdm(range(10)):
songs = self.df[(~ self.df['id'].isin(self.playlist['id'].to_list()))]
self.playlist = self.playlist.append(self.argmin_song(songs), ignore_index=True)
def show_playlist(self):
display(self.playlist[['artists', 'name', 'id']])
def do_pca(self):
print('Visualizing...')
(x, y, z) = pca.transform(self.playlist[features[1:]]).T
fig = go.Figure(data=[go.Scatter3d(x=x, y=y, z=z, mode='lines+markers', text=((self.playlist['artists'].apply((lambda x: ', '.join(x))) + ' - ') + self.playlist['name'].astype(str)), marker=dict(size=5, color=z, colorscale='Viridis', opacity=0.8), line=dict(color='#000000', width=1))])
(fig.update_layout(margin=dict(l=0, r=0, b=0, t=0), scene={'xaxis_title': 'PC0', 'yaxis_title': 'PC1', 'zaxis_title': 'PC2'}),)
fig.show() |
def pass_data_iteratively(model, graphs, minibatch_size=64):
output = []
idx = np.arange(len(graphs))
for i in range(0, len(graphs), minibatch_size):
sampled_idx = idx[i:(i + minibatch_size)]
if (len(sampled_idx) == 0):
continue
output.append(model([graphs[j] for j in sampled_idx]))
return tf.concat(output, 0) |
def compare_functions_2v(func, nloop=500, test=True, xs=xs, nmxs=nmxs, ys=ys, nmys=nmys, xl=xl, nmxl=nmxl, yl=yl, nmyl=nmyl):
funcname = func.__name__
print(('-' * 50))
print(('%s on small arrays' % funcname))
(module, data) = ('numpy.ma', 'nmxs,nmys')
timer(('%(module)s.%(funcname)s(%(data)s)' % locals()), v=('%11s' % module), nloop=nloop)
print(('%s on large arrays' % funcname))
(module, data) = ('numpy.ma', 'nmxl,nmyl')
timer(('%(module)s.%(funcname)s(%(data)s)' % locals()), v=('%11s' % module), nloop=nloop)
return |
def plot_throughput_reductions(data):
plt.figure(figsize=(4.5, 3))
ax = plt.subplot2grid((1, 1), (0, 0), colspan=1)
sns.lineplot(x='num_jobs', y='effective_throughput_reductions', style='style', hue='style', data=data, ci=None, markers=True, legend=False)
ax.set_xlabel('Number of jobs')
ax.set_ylabel('Effective throughput\nreduction')
ax.set_xscale('log', basex=2)
ax.set_xticks([4, 8, 16, 32, 64])
ax.set_xticklabels([4, 8, 16, 32, 64])
ax.set_xlim([4, 64])
ax.set_yticks([0.2, 0.4, 0.6, 0.8, 1.0])
ax.set_ylim([0, 1])
sns.despine() |
class TestContinuousMLPBaseline(TfGraphTestCase):
.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_fit(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch('garage.tf.baselines.continuous_mlp_baseline.ContinuousMLPRegressor', new=SimpleMLPRegressor):
cmb = ContinuousMLPBaseline(env_spec=box_env.spec)
paths = [{'observations': [np.full(obs_dim, 1)], 'returns': [1]}, {'observations': [np.full(obs_dim, 2)], 'returns': [2]}]
cmb.fit(paths)
obs = {'observations': [np.full(obs_dim, 1), np.full(obs_dim, 2)]}
prediction = cmb.predict(obs)
assert np.array_equal(prediction, [1, 2])
.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_param_values(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch('garage.tf.baselines.continuous_mlp_baseline.ContinuousMLPRegressor', new=SimpleMLPRegressor):
cmb = ContinuousMLPBaseline(env_spec=box_env.spec)
new_cmb = ContinuousMLPBaseline(env_spec=box_env.spec, name='ContinuousMLPBaseline2')
with tf.compat.v1.variable_scope('ContinuousMLPBaseline2', reuse=True):
return_var = tf.compat.v1.get_variable('SimpleMLPModel/return_var')
return_var.load(1.0)
old_param_values = cmb.get_param_values()
new_param_values = new_cmb.get_param_values()
assert (not np.array_equal(old_param_values, new_param_values))
new_cmb.set_param_values(old_param_values)
new_param_values = new_cmb.get_param_values()
assert np.array_equal(old_param_values, new_param_values)
.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_get_params_internal(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch('garage.tf.baselines.continuous_mlp_baseline.ContinuousMLPRegressor', new=SimpleMLPRegressor):
cmb = ContinuousMLPBaseline(env_spec=box_env.spec)
params_interal = cmb.get_params_internal()
trainable_params = tf.compat.v1.trainable_variables(scope='ContinuousMLPBaseline')
assert np.array_equal(params_interal, trainable_params)
def test_is_pickleable(self):
box_env = GarageEnv(DummyBoxEnv(obs_dim=(1,)))
with mock.patch('garage.tf.baselines.continuous_mlp_baseline.ContinuousMLPRegressor', new=SimpleMLPRegressor):
cmb = ContinuousMLPBaseline(env_spec=box_env.spec)
obs = {'observations': [np.full(1, 1), np.full(1, 1)]}
with tf.compat.v1.variable_scope('ContinuousMLPBaseline', reuse=True):
return_var = tf.compat.v1.get_variable('SimpleMLPModel/return_var')
return_var.load(1.0)
prediction = cmb.predict(obs)
h = pickle.dumps(cmb)
with tf.compat.v1.Session(graph=tf.Graph()):
cmb_pickled = pickle.loads(h)
prediction2 = cmb_pickled.predict(obs)
assert np.array_equal(prediction, prediction2) |
def mask_rcnn_fcn_head_v1up4convs(dim_in, roi_xform_func, spatial_scale):
return mask_rcnn_fcn_head_v1upXconvs(dim_in, roi_xform_func, spatial_scale, 4) |
class UnbufferedStream(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr) |
def shuffle(*arrays):
permutation = None
n_samples = None
shuffled_arrays = []
for (i, a) in enumerate(arrays):
if (a is None):
shuffled_arrays.append(a)
continue
if (permutation is None):
n_samples = a.shape[0]
permutation = np.random.permutation(n_samples)
assert (a.shape[0] == n_samples)
shuffled_a = a[permutation]
shuffled_arrays.append(shuffled_a)
return shuffled_arrays |
class SymEngineMatrixHashTest(TestCase):
_only
def test_matrix_hash(self) -> None:
hash1 = hash(sf.sympy.Matrix([[0, 1], [2, 3]]))
hash2 = hash(sf.sympy.Matrix([[0, 1], [2, 4]]))
hash3 = hash(sf.sympy.Matrix([[0, 1, 2, 3]]))
self.assertNotEqual(hash1, 0)
self.assertNotEqual(hash2, 0)
self.assertNotEqual(hash3, 0)
self.assertNotEqual(hash1, hash2)
self.assertNotEqual(hash1, hash3)
self.assertNotEqual(hash2, hash3) |
class _Parser():
def __init__(self, parse_table, callbacks, debug=False):
self.parse_table = parse_table
self.callbacks = callbacks
self.debug = debug
def parse(self, lexer, start, value_stack=None, state_stack=None):
parse_conf = ParseConf(self.parse_table, self.callbacks, start)
parser_state = ParserState(parse_conf, lexer, state_stack, value_stack)
return self.parse_from_state(parser_state)
def parse_from_state(self, state):
try:
token = None
for token in state.lexer.lex(state):
state.feed_token(token)
token = (Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1))
return state.feed_token(token, True)
except UnexpectedInput as e:
try:
e.puppet = ParserPuppet(self, state, state.lexer)
except NameError:
pass
raise e
except Exception as e:
if self.debug:
print('')
print('STATE STACK DUMP')
print('')
for (i, s) in enumerate(state.state_stack):
print(('%d)' % i), s)
print('')
raise |
_REGISTRY.register()
class VideoTestDataset(data.Dataset):
def __init__(self, opt):
super(VideoTestDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
(self.gt_root, self.lq_root) = (opt['dataroot_gt'], opt['dataroot_lq'])
self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []}
self.file_client = None
self.io_backend_opt = opt['io_backend']
assert (self.io_backend_opt['type'] != 'lmdb'), 'No need to use lmdb during validation/test.'
logger = get_root_logger()
logger.info(f"Generate data info for VideoTestDataset - {opt['name']}")
(self.imgs_lq, self.imgs_gt) = ({}, {})
if ('meta_info_file' in opt):
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders]
subfolders_gt = [osp.join(self.gt_root, key) for key in subfolders]
else:
subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
subfolders_gt = sorted(glob.glob(osp.join(self.gt_root, '*')))
if (opt['name'].lower() in ['vid4', 'reds4', 'redsofficial', 'spmcs']):
for (subfolder_lq, subfolder_gt) in zip(subfolders_lq, subfolders_gt):
subfolder_name = osp.basename(subfolder_lq)
img_paths_lq = sorted(list(scandir(subfolder_lq, full_path=True)))
img_paths_gt = sorted(list(scandir(subfolder_gt, full_path=True)))
max_idx = len(img_paths_lq)
assert (max_idx == len(img_paths_gt)), f'Different number of images in lq ({max_idx}) and gt folders ({len(img_paths_gt)})'
self.data_info['lq_path'].extend(img_paths_lq)
self.data_info['gt_path'].extend(img_paths_gt)
self.data_info['folder'].extend(([subfolder_name] * max_idx))
for i in range(max_idx):
self.data_info['idx'].append(f'{i}/{max_idx}')
border_l = ([0] * max_idx)
for i in range((self.opt['num_frame'] // 2)):
border_l[i] = 1
border_l[((max_idx - i) - 1)] = 1
self.data_info['border'].extend(border_l)
if self.cache_data:
logger.info(f'Cache {subfolder_name} for VideoTestDataset...')
self.imgs_lq[subfolder_name] = read_img_seq(img_paths_lq)
self.imgs_gt[subfolder_name] = read_img_seq(img_paths_gt)
else:
self.imgs_lq[subfolder_name] = img_paths_lq
self.imgs_gt[subfolder_name] = img_paths_gt
else:
raise ValueError(f"Non-supported video test dataset: {type(opt['name'])}")
def __getitem__(self, index):
folder = self.data_info['folder'][index]
(idx, max_idx) = self.data_info['idx'][index].split('/')
(idx, max_idx) = (int(idx), int(max_idx))
border = self.data_info['border'][index]
lq_path = self.data_info['lq_path'][index]
select_idx = generate_frame_indices(idx, max_idx, self.opt['num_frame'], padding=self.opt['padding'])
if self.cache_data:
imgs_lq = self.imgs_lq[folder].index_select(0, torch.LongTensor(select_idx))
img_gt = self.imgs_gt[folder][idx]
else:
img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
imgs_lq = read_img_seq(img_paths_lq)
img_gt = read_img_seq([self.imgs_gt[folder][idx]])
img_gt.squeeze_(0)
return {'lq': imgs_lq, 'gt': img_gt, 'folder': folder, 'idx': self.data_info['idx'][index], 'border': border, 'lq_path': lq_path}
def __len__(self):
return len(self.data_info['gt_path']) |
def astrange_to_symrange(astrange, arrays, arrname=None):
if (arrname is not None):
arrdesc = arrays[arrname]
if (arrdesc.shape is None):
return None
if (astrange is None):
return [(symbolic.pystr_to_symbolic(0), (symbolic.pystr_to_symbolic(symbolic.symbol_name_or_value(s)) - 1), symbolic.pystr_to_symbolic(1)) for s in arrdesc.shape]
missing_slices = (len(arrdesc.shape) - len(astrange))
if (missing_slices < 0):
raise ValueError('Mismatching shape {} - range {} dimensions'.format(arrdesc.shape, astrange))
for i in range(missing_slices):
astrange.append((None, None, None))
result = ([None] * len(astrange))
for (i, r) in enumerate(astrange):
if isinstance(r, tuple):
(begin, end, skip) = r
if (begin is None):
begin = symbolic.pystr_to_symbolic(0)
else:
begin = symbolic.pystr_to_symbolic(unparse(begin))
if ((begin < 0) == True):
begin += arrdesc.shape[i]
if ((end is None) and (arrname is None)):
raise SyntaxError('Cannot define range without end')
elif (end is not None):
end = (symbolic.pystr_to_symbolic(unparse(end)) - 1)
if ((end < 0) == True):
end += arrdesc.shape[i]
else:
end = (symbolic.pystr_to_symbolic(symbolic.symbol_name_or_value(arrdesc.shape[i])) - 1)
if (skip is None):
skip = symbolic.pystr_to_symbolic(1)
else:
skip = symbolic.pystr_to_symbolic(unparse(skip))
else:
begin = symbolic.pystr_to_symbolic(unparse(r))
if ((begin < 0) == True):
begin += arrdesc.shape[i]
end = begin
skip = symbolic.pystr_to_symbolic(1)
result[i] = (begin, end, skip)
return result |
class SchubertPolynomialRing_xbasis(CombinatorialFreeModule):
Element = SchubertPolynomial_class
def __init__(self, R):
self._name = 'Schubert polynomial ring with X basis'
self._repr_option_bracket = False
CombinatorialFreeModule.__init__(self, R, Permutations(), category=GradedAlgebrasWithBasis(R), prefix='X')
_method
def one_basis(self):
return self._indices([1])
def _element_constructor_(self, x):
if isinstance(x, list):
if (x not in Permutations()):
raise ValueError(f'the input {x} is not a valid permutation')
perm = Permutation(x).remove_extra_fixed_points()
return self._from_dict({perm: self.base_ring().one()})
elif isinstance(x, Permutation):
perm = x.remove_extra_fixed_points()
return self._from_dict({perm: self.base_ring().one()})
elif isinstance(x, MPolynomial):
return symmetrica.t_POLYNOM_SCHUBERT(x)
elif isinstance(x, InfinitePolynomial):
R = x.polynomial().parent()
S = PolynomialRing(R.base_ring(), names=list(map(repr, reversed(R.gens()))))
return symmetrica.t_POLYNOM_SCHUBERT(S(x.polynomial()))
elif isinstance(x, KeyPolynomial):
return self(x.expand())
else:
raise TypeError
def some_elements(self):
return [self.one(), (self([1]) + (2 * self([2, 1]))), (self([4, 2, 1, 3]) - self([3, 2, 1]))]
def product_on_basis(self, left, right):
return symmetrica.mult_schubert_schubert(left, right) |
def create_save_path(args):
model_name = args.model.model_name
suffix = ('/{}'.format(model_name) + time.strftime('%Y-%m-%d-%H_%M_%S', time.localtime(time.time())))
from pathlib import Path
saved_name = (Path(args.save_dir).stem + suffix)
args.save_dir = (args.save_dir + suffix)
if os.path.exists(args.save_dir):
print(f'Warning: the folder {args.save_dir} exists.')
else:
print('Creating {}'.format(args.save_dir))
os.makedirs(args.save_dir)
import shutil
shutil.copyfile(args.conf, (args.save_dir + '/config.yaml'))
os.makedirs((args.save_dir + '/parser'))
copy_tree('parser/', (args.save_dir + '/parser'))
return saved_name |
class InPlaceABNSync(ABN):
def __init__(self, num_features, devices=None, eps=1e-05, momentum=0.1, affine=True, activation='leaky_relu', slope=0.01):
super(InPlaceABNSync, self).__init__(num_features, eps, momentum, affine, activation, slope)
self.devices = (devices if devices else list(range(torch.cuda.device_count())))
self.worker_ids = self.devices[1:]
self.master_queue = Queue(len(self.worker_ids))
self.worker_queues = [Queue(1) for _ in self.worker_ids]
def forward(self, x):
if (x.get_device() == self.devices[0]):
extra = {'is_master': True, 'master_queue': self.master_queue, 'worker_queues': self.worker_queues, 'worker_ids': self.worker_ids}
else:
extra = {'is_master': False, 'master_queue': self.master_queue, 'worker_queue': self.worker_queues[self.worker_ids.index(x.get_device())]}
return inplace_abn_sync(x, self.weight, self.bias, self.running_mean, self.running_var, extra, self.training, self.momentum, self.eps, self.activation, self.slope)
def __repr__(self):
rep = '{name}({num_features}, eps={eps}, momentum={momentum}, affine={affine}, devices={devices}, activation={activation}'
if (self.activation == 'leaky_relu'):
rep += ', slope={slope})'
else:
rep += ')'
return rep.format(name=self.__class__.__name__, **self.__dict__) |
class Maze(environment.Environment):
def __init__(self, options={}):
environment.Environment.__init__(self, options=options)
self.configure(options)
self.valid_actions = list(maze_action_enum.keys())
self.valid_observations = xrange(0, (self.max_observation() + 1))
self.valid_rewards = xrange(0, (self.max_reward + 1))
self.teleport_agent()
self.reward = 0
self.calculate_observation()
def calculate_observation(self):
if (self.observation_encoding == cUninformative):
self.observation = oNull
elif (self.observation_encoding == cWalls):
self.observation = 0
if ((self.col == 0) or (self.maze_layout[self.row][(self.col - 1)] == cWall)):
self.observation += oLeftWall
if ((self.row == 0) or (self.maze_layout[(self.row - 1)][self.col] == cWall)):
self.observation += oUpWall
if (((self.col + 1) == self.num_cols) or (self.maze_layout[self.row][(self.col + 1)] == cWall)):
self.observation += oRightWall
if (((self.row + 1) == self.num_rows) or (self.maze_layout[(self.row + 1)][self.col] == cWall)):
self.observation += oDownWall
elif (self.observation_encoding == cCoordinates):
self.observation = ((self.row * self.num_cols) + self.col)
def configure(self, options):
self.num_rows = options.get('maze-num-rows', None)
if (self.num_rows is None):
sys.stderr.write(("ERROR: configuration does not contain a 'maze-num-rows' value. Exiting." + os.linesep))
sys.exit(1)
self.num_rows = int(self.num_rows)
self.num_cols = options.get('maze-num-cols', None)
if (self.num_cols is None):
sys.stderr.write(("ERROR: configuration does not contain a 'maze-num-cols' value. Exiting." + os.linesep))
sys.exit(1)
self.num_cols = int(self.num_cols)
assert (self.num_rows > 0)
assert (self.num_cols > 0)
encoding = options.get('maze-observation-encoding', 'uninformative')
if (encoding == 'uninformative'):
self.observation_encoding = cUninformative
elif (encoding == 'walls'):
self.observation_encoding = cWalls
elif (encoding == 'coordinates'):
self.observation_encoding = cCoordinates
else:
sys.stderr.write((("ERROR: Unknown observation encoding: '%s'" % str(encoding)) + os.linesep))
sys.exit(1)
teleport_impossible = True
min_reward = float('inf')
self.max_reward = float('-inf')
self.maze_rewards = {}
self.maze_layout = {}
self.teleport_to_locations = []
for r in xrange(0, self.num_rows):
reward_option_name = ('maze-rewards%d' % (r + 1))
rewards = options.get(reward_option_name, None)
if (rewards is None):
sys.stderr.write((('ERROR: configuration does not contain a ' + ("'%s' value as a num_rows value of '%d' implies. Exiting." % (reward_option_name, self.num_rows))) + os.linesep))
sys.exit(1)
layout_option_name = ('maze-layout%d' % (r + 1))
layout = options.get(layout_option_name, None)
if (layout is None):
sys.stderr.write((('ERROR: configuration does not contain a ' + ("'%s' value as a num_rows value of '%d' implies. Exiting." % (layout_option_name, self.num_rows))) + os.linesep))
sys.exit(1)
if (r not in self.maze_layout):
self.maze_layout[r] = {}
if (r not in self.maze_rewards):
self.maze_rewards[r] = {}
layout_list = list(layout)
rewards_list = rewards.split(',')
if (len(layout_list) != self.num_cols):
sys.stderr.write(((("ERROR: configuration value '%s' (%s)" % (layout_option_name, layout)) + ("contains too %s entries. (Needs '%d'.) Exiting." % (('few' if (len(layout_list) < self.num_cols) else 'many'), self.num_cols))) + os.linesep))
sys.exit(1)
if (len(rewards_list) != self.num_cols):
sys.stderr.write(((("ERROR: configuration value '%s' (%s)" % (reward_option_name, rewards)) + ("contains too %s entries. (Needs '%d'.) Exiting." % (('few' if (len(rewards_list) < self.num_cols) else 'many'), self.num_cols))) + os.linesep))
sys.exit(1)
for c in xrange(0, self.num_cols):
this_layout = layout_list[c]
this_reward = int(rewards_list[c])
self.maze_layout[r][c] = this_layout
self.maze_rewards[r][c] = this_reward
if (this_layout == cTeleportTo):
teleport_impossible = False
self.teleport_to_locations += [(r, c)]
min_reward = (min_reward if (min_reward < this_reward) else this_reward)
self.max_reward = (self.max_reward if (self.max_reward > this_reward) else this_reward)
if teleport_impossible:
sys.stderr.write('ERROR: There must be at least one square the agent can teleport to.')
sys.exit(1)
self.max_reward -= min_reward
for r in xrange(0, self.num_rows):
for c in xrange(0, self.num_cols):
self.maze_rewards[r][c] = (self.maze_rewards[r][c] - min_reward)
def max_observation(self):
if (self.observation_encoding == cUninformative):
return oNull
elif (self.observation_encoding == cWalls):
return (((oLeftWall + oUpWall) + oRightWall) + oDownWall)
elif (self.observation_encoding == cCoordinates):
return ((self.num_rows * self.num_cols) - 1)
def perform_action(self, action):
assert self.is_valid_action(action)
self.action = action
self.teleported = False
self.wall_collision = False
self.row_to = (((- 1) if (action == aUp) else 0) + (1 if (action == aDown) else 0))
self.row_to = min(max((self.row_to + self.row), 0), (self.num_rows - 1))
self.col_to = (((- 1) if (action == aLeft) else 0) + (1 if (action == aRight) else 0))
self.col_to = min(max((self.col_to + self.col), 0), (self.num_cols - 1))
self.wall_collision = (self.maze_layout[self.row_to][self.col_to] == cWall)
if (not self.wall_collision):
self.row = self.row_to
self.col = self.col_to
if (self.maze_layout[self.row][self.col] == cTeleportFrom):
self.teleport_agent()
self.reward = self.maze_rewards[self.row_to][self.col_to]
self.calculate_observation()
return (self.observation, self.reward)
def print(self):
message = ((((((('row = %d' % self.row) + (', col = %d' % self.col)) + (', observation = %d' % self.observation)) + (', reward = %d' % self.reward)) + (', teleported' if self.teleported else '')) + (', wall collision' if self.wall_collision else '')) + os.linesep)
for r in xrange(0, self.num_rows):
for c in xrange(0, self.num_cols):
if ((self.row == r) and (self.col == c)):
message += 'A'
else:
message += self.maze_layout[r][c]
message += os.linesep
return message
def teleport_agent(self):
self.teleported = True
(self.row, self.col) = util.choice(self.teleport_to_locations) |
def register_Ns3LtePhyTag_methods(root_module, cls):
cls.add_constructor([param('ns3::LtePhyTag const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('uint16_t', 'cellId')])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('GetCellId', 'uint16_t', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
return |
def test_constructor_mutable_arg_count(test_case_mock, constructor_mock):
const = stmt.ConstructorStatement(test_case_mock, constructor_mock, {'test': MagicMock(vr.VariableReference)})
assert (const._mutable_argument_count() == 1) |
def resample_folder(input_folder, output_folder, fs, regex):
files = glob.glob(os.path.join(input_folder, regex), recursive=True)
for f in tqdm.tqdm(files):
(audio, fs_read) = torchaudio.load(f)
audio = audio[0].numpy()
audio = signal.resample_poly(audio, fs, fs_read)
peak = np.max(np.abs(audio))
audio = (audio / peak)
audio = torch.from_numpy(audio).float()
relative_path = os.path.join(Path(f).relative_to(Path(input_folder)).parent, (Path(f).relative_to(Path(input_folder)).stem + '_peak_{}.wav'.format(peak)))
os.makedirs(Path(os.path.join(output_folder, Path(f).relative_to(Path(input_folder)))).parent, exist_ok=True)
torchaudio.save(os.path.join(output_folder, relative_path), audio.reshape(1, (- 1)), fs) |
def update_cfg(base_cfg, update_cfg):
res_cfg = copy.deepcopy(base_cfg)
res_cfg.update(update_cfg)
return res_cfg |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--triviaqa_file', help='Triviaqa file')
parser.add_argument('--squad_file', help='Squad file')
parser.add_argument('--wikipedia_dir', help='Wikipedia doc dir')
parser.add_argument('--web_dir', help='Web doc dir')
parser.add_argument('--seed', default=10, type=int, help='Random seed')
parser.add_argument('--max_num_tokens', default=800, type=int, help='Maximum number of tokens from a document')
parser.add_argument('--sample_size', default=80000, type=int, help='Random seed')
parser.add_argument('--tokenizer', default='tokenizers/punkt/english.pickle', help='Sentence tokenizer')
args = parser.parse_args()
return args |
def find_match_ref_at_step(collab_attr_list, all_collborators):
collab_names = all_collborators.keys()
matched_ref_dict = {}
for collborator_name in collab_names:
matched_ref_dict[collborator_name] = []
previous_collaborator = ''
for attr in collab_attr_list:
attr_dict = {attr: []}
for collborator_name in all_collborators.keys():
attr_id = all_collborators[collborator_name][attr]
if (attr_id not in attr_dict.get(attr)):
attr_dict.get(attr).append(attr_id)
else:
matched_ref_dict.get(collborator_name).append(attr)
print((f'{bcolors.FAIL} ... Reference test failed - {collborator_name} sharing same ' + f'{attr} reference with {previous_collaborator} {bcolors.ENDC}'))
previous_collaborator = collborator_name
return matched_ref_dict |
def load_h5_data_label_normal(h5_filename):
f = h5py.File(h5_filename, 'r')
data = f['data'][:]
label = f['label'][:]
normal = f['normal'][:]
return (data, label, normal) |
def refine(graph: Graph, node_weight_function: NodeWeightFunction, edge_weight_function: EdgeWeightFunction, round_limit=(- 1)):
re_assign_partition_indices(graph)
refiner = Refiner(graph, node_weight_function, edge_weight_function)
rounds = 0
num_moved = 1
total_moves = 0
while ((num_moved > 0) and ((round_limit < 0) or (round_limit < rounds))):
rounds += 1
num_moved = 0
for (stage_id, borders) in reversed(refiner.stage_borders.items()):
(outgoing_edges, outgoing_nodes, incoming_edges, incoming_nodes) = borders
invalid_local_nodes = set()
valid_local_noedes = set()
for e in sorted(outgoing_edges, key=(lambda x: (x[0].topo_sort_id, (- x[1].topo_sort_id))), reverse=True):
node = e[0]
dst_stage = e[1].stage_id
if (node not in valid_local_noedes):
if ((node not in valid_local_noedes) and (node in invalid_local_nodes)):
if refiner.is_fwd_move_valid_local(node):
valid_local_noedes.add(node)
else:
invalid_local_nodes.add(node)
continue
if (not refiner.is_fwd_move_valid_topo(node, dst_stage)):
continue
moved = refiner.update_on_move(nodes=[node], new_stage_id=dst_stage, escape_minima=False)
if moved:
num_moved += 1
num_moved_fwd = num_moved
for (stage_id, borders) in reversed(refiner.stage_borders.items()):
(outgoing_edges, outgoing_nodes, incoming_edges, incoming_nodes) = borders
for e in sorted(incoming_edges, key=(lambda x: (x[0].topo_sort_id, (- x[1].topo_sort_id))), reverse=False):
dst_stage = e[0].stage_id
node = e[1]
if (not refiner.is_bwd_move_valid_topo(node, dst_stage)):
continue
moved = refiner.update_on_move(nodes=[node], new_stage_id=dst_stage, escape_minima=False)
if moved:
num_moved += 1
num_moved_bwd = (num_moved - num_moved_fwd)
total_moves += num_moved
print(f'Round {rounds}: num_moved {num_moved}, (fwd {num_moved_fwd}, bwd {num_moved_bwd})')
pori = refiner.percents_of_relative_objective_improvement()
print(f'Refinement ended after {rounds} rounds and {total_moves} moves. Relative improvement: {pori:.2%}')
return (refiner.best_objective, pori) |
class DropoutParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DROPOUTPARAMETER |
def _adam_delta(optimizer, model, grads):
deltas = {}
for group in optimizer.param_groups:
for param in group['params']:
grad = grads[param]
state = optimizer.state[param]
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
step = (state['step'] + 1)
if (group['weight_decay'] != 0):
grad = (grad + (group['weight_decay'] * param.data))
exp_avg = ((exp_avg * beta1) + ((1.0 - beta1) * grad))
exp_avg_sq = ((exp_avg_sq * beta2) + (((1.0 - beta2) * grad) * grad))
denom = (exp_avg_sq.sqrt() + group['eps'])
bias_correction1 = (1.0 - (beta1 ** step))
bias_correction2 = (1.0 - (beta2 ** step))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
deltas[param] = (((- step_size) * exp_avg) / denom)
param_to_name = {param: name for (name, param) in model.named_parameters()}
return {param_to_name[param]: delta for (param, delta) in deltas.items()} |
def common_parent_scope(sdict: ScopeDictType, scope_a: NodeType, scope_b: NodeType) -> NodeType:
if (scope_a is scope_b):
return scope_a
if scope_contains_scope(sdict, scope_a, scope_b):
return scope_a
if scope_contains_scope(sdict, scope_b, scope_a):
return scope_b
spath_a = _scope_path(sdict, scope_a)
spath_b = _scope_path(sdict, scope_b)
common = None
for (spa, spb) in reversed(zip(spath_a, spath_b)):
if (spa is spb):
common = spa
else:
break
return common |
class MolMap(Base):
def __init__(self, ftype='descriptor', flist=None, fmap_type='grid', fmap_shape=None, split_channels=True, metric='cosine', var_thr=0.0001):
super().__init__()
assert (ftype in ['descriptor', 'fingerprint']), 'no such feature type supported!'
assert (fmap_type in ['scatter', 'grid']), 'no such feature map type supported!'
self.ftype = ftype
self.metric = metric
self.method = None
self.isfit = False
dist_matrix = load_config(ftype, metric)
feature_order = dist_matrix.index.tolist()
feat_seq_dict = dict(zip(feature_order, range(len(feature_order))))
scale_info = load_config(ftype, 'scale')
scale_info = scale_info[(scale_info['var'] > var_thr)]
slist = scale_info.index.tolist()
if (not flist):
flist = list(dist_matrix.columns)
final_list = list((set(slist) & set(flist)))
final_list.sort(key=(lambda x: feat_seq_dict.get(x)))
dist_matrix = dist_matrix.loc[final_list][final_list]
self.dist_matrix = dist_matrix
self.flist = final_list
self.scale_info = scale_info.loc[final_list]
if (ftype == 'fingerprint'):
self.extract = fext()
else:
self.extract = dext()
self.fmap_type = fmap_type
if (fmap_type == 'grid'):
S = Scatter2Grid()
else:
if (fmap_shape == None):
N = len(self.flist)
l = (np.int(np.sqrt(N)) * 2)
fmap_shape = (l, l)
S = Scatter2Array(fmap_shape)
self._S = S
self.split_channels = split_channels
def _fit_embedding(self, method='tsne', n_components=2, random_state=1, verbose=2, n_neighbors=30, min_dist=0.1, **kwargs):
dist_matrix = self.dist_matrix
if ('metric' in kwargs.keys()):
metric = kwargs.get('metric')
kwargs.pop('metric')
else:
metric = 'precomputed'
if (method == 'tsne'):
embedded = TSNE(n_components=n_components, random_state=random_state, metric=metric, verbose=verbose, **kwargs)
elif (method == 'umap'):
embedded = UMAP(n_components=n_components, n_neighbors=n_neighbors, min_dist=min_dist, verbose=verbose, random_state=random_state, metric=metric, **kwargs)
elif (method == 'mds'):
if ('metric' in kwargs.keys()):
kwargs.pop('metric')
if ('dissimilarity' in kwargs.keys()):
dissimilarity = kwargs.get('dissimilarity')
kwargs.pop('dissimilarity')
else:
dissimilarity = 'precomputed'
embedded = MDS(metric=True, n_components=n_components, verbose=verbose, dissimilarity=dissimilarity, random_state=random_state, **kwargs)
embedded = embedded.fit(dist_matrix)
df = pd.DataFrame(embedded.embedding_, index=self.flist, columns=['x', 'y'])
typemap = self.extract.bitsinfo.set_index('IDs')
df = df.join(typemap)
df['Channels'] = df['Subtypes']
self.df_embedding = df
self.embedded = embedded
def fit(self, method='umap', min_dist=0.1, n_neighbors=30, verbose=2, random_state=1, **kwargs):
if ('n_components' in kwargs.keys()):
kwargs.pop('n_components')
assert (method in ['tsne', 'umap', 'mds']), 'no support such method!'
self.method = method
self._fit_embedding(method=method, n_neighbors=n_neighbors, random_state=random_state, min_dist=min_dist, verbose=verbose, n_components=2, **kwargs)
if (self.fmap_type == 'scatter'):
print_info('Applying naive scatter feature map...')
self._S.fit(self.df_embedding, self.split_channels, channel_col='Channels')
print_info('Finished')
else:
print_info('Applying grid feature map(assignment), this may take several minutes(1~30 min)')
self._S.fit(self.df_embedding, self.split_channels, channel_col='Channels')
print_info('Finished')
self.isfit = True
self.fmap_shape = self._S.fmap_shape
def transform(self, smiles, scale=True, scale_method='minmax'):
if (not self.isfit):
print_error('please fit first!')
return
arr = self.extract.transform(smiles)
df = pd.DataFrame(arr).T
df.columns = self.extract.bitsinfo.IDs
if (scale & (self.ftype == 'descriptor')):
if (scale_method == 'standard'):
df = self.StandardScaler(df, self.scale_info['mean'], self.scale_info['std'])
else:
df = self.MinMaxScaleClip(df, self.scale_info['min'], self.scale_info['max'])
df = df[self.flist]
vector_1d = df.values[0]
fmap = self._S.transform(vector_1d)
return np.nan_to_num(fmap)
def batch_transform(self, smiles_list, scale=True, scale_method='minmax', n_jobs=4):
P = Parallel(n_jobs=n_jobs)
res = P((delayed(self.transform)(smiles, scale, scale_method) for smiles in tqdm(smiles_list, ascii=True)))
X = np.stack(res)
return X
def rearrangement(self, orignal_X, target_mp):
assert (self.flist == target_mp.flist), print_error('Input features list is different, can not re-arrangement, check your flist by mp.flist method')
assert (len(orignal_X.shape) == 4), print_error('Input X has error shape, please reshape to (samples, w, h, channels)')
idx = self._S.df.sort_values('indices').idx.tolist()
idx = np.argsort(idx)
N = len(orignal_X)
M = len(self.flist)
res = []
for i in tqdm(range(N), ascii=True):
x = orignal_X[i].sum(axis=(- 1))
vector_1d_ordered = x.reshape((- 1))
vector_1d_ordered = vector_1d_ordered[:M]
vector_1d = vector_1d_ordered[idx]
fmap = target_mp._S.transform(vector_1d)
res.append(fmap)
return np.stack(res)
def plot_scatter(self, htmlpath='./', htmlname=None, radius=3):
(df_scatter, H_scatter) = vismap.plot_scatter(self, htmlpath=htmlpath, htmlname=htmlname, radius=radius)
self.df_scatter = df_scatter
return H_scatter
def plot_grid(self, htmlpath='./', htmlname=None):
if (self.fmap_type != 'grid'):
return
(df_grid, H_grid) = vismap.plot_grid(self, htmlpath=htmlpath, htmlname=htmlname)
self.df_grid = df_grid
return H_grid
def load(self, filename):
return self._load(filename)
def save(self, filename):
return self._save(filename) |
def prepare(element):
image = element['image']
image = tf.cast(image, tf.float32)
return image |
class FreeModule_ambient_field(FreeModule_generic_field, FreeModule_ambient_pid):
def __init__(self, base_field, dimension, sparse=False, category=None):
FreeModule_ambient_pid.__init__(self, base_field, dimension, sparse=sparse, category=category)
def _repr_(self):
if self.is_sparse():
return ('Sparse vector space of dimension %s over %s' % (self.dimension(), self.base_ring()))
else:
return ('Vector space of dimension %s over %s' % (self.dimension(), self.base_ring()))
def ambient_vector_space(self):
return self
def base_field(self):
return self.base_ring()
def _element_constructor_(self, e, *args, **kwds):
try:
k = e.parent()
if (isinstance(k, FiniteField) and (k.base_ring() == self.base_ring()) and (k.degree() == self.degree())):
return self(e._vector_())
except AttributeError:
pass
return FreeModule_generic_field._element_constructor_(self, e, *args, **kwds) |
def __recompute_bwweights(G, M, E, D, T):
weightscale = 10000
if (((3 * E) >= T) and ((3 * G) >= T)):
casename = 'Case 1 (Wgd=Wmd=Wed)'
Wgd = Wed = Wmd = (weightscale / 3)
Wee = ((weightscale * ((E + G) + M)) / (3 * E))
Wme = (weightscale - Wee)
Wmg = ((weightscale * (((2 * G) - E) - M)) / (3 * G))
Wgg = (weightscale - Wmg)
check = __check_weights_errors(Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed, weightscale, G, M, E, D, T, 10, True)
if (check != bww_errors.NO_ERROR):
raise ValueError('ERROR: {0} Wgd={1}, Wed={2}, Wmd={3}, Wee={4}, Wme={5}, Wmg={6}, Wgg={7}'.format(bww_errors[check], Wgd, Wed, Wmd, Wee, Wme, Wmg, Wgg))
elif (((3 * E) < T) and ((3 * G) < T)):
R = min(E, G)
S = max(E, G)
if ((R + D) < S):
Wgg = Wee = weightscale
Wmg = Wme = Wmd = 0
if (E < G):
casename = 'Case 2a (E scarce)'
Wed = weightscale
Wgd = 0
else:
casename = 'Case 2a (G scarce)'
Wed = 0
Wgd = weightscale
else:
casename = 'Case 2b1 (Wgg=weightscale, Wmd=Wgd)'
Wee = ((weightscale * ((E - G) + M)) / E)
Wed = ((weightscale * (((D - (2 * E)) + (4 * G)) - (2 * M))) / (3 * D))
Wme = ((weightscale * (G - M)) / E)
Wmg = 0
Wgg = weightscale
Wmd = Wgd = ((weightscale - Wed) / 2)
check = __check_weights_errors(Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed, weightscale, G, M, E, D, T, 10, True)
if (check != bww_errors.NO_ERROR):
casename = 'Case 2b2 (Wgg=weightscale, Wee=weightscale)'
Wgg = Wee = weightscale
Wed = ((weightscale * (((D - (2 * E)) + G) + M)) / (3 * D))
Wmd = ((weightscale * (((D - (2 * M)) + G) + E)) / (3 * D))
Wme = Wmg = 0
if (Wmd < 0):
casename = 'case 2b3 (Wmd=0)'
Wmd = 0
Wgd = ((weightscale - Wed) - Wmd)
check = __check_weights_errors(Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed, weightscale, G, M, E, D, T, 10, True)
if ((check != bww_errors.NO_ERROR) and (check != bww_errors.BALANCE_MID_ERROR)):
raise ValueError('ERROR: {0} Wgd={1}, Wed={2}, Wmd={3}, Wee={4}, Wme={5}, Wmg={6}, Wgg={7}'.format(bww_errors[check], Wgd, Wed, Wmd, Wee, Wme, Wmg, Wgg))
else:
S = min(E, G)
if ((not (((3 * E) < T) or ((3 * G) < T))) or (not (((3 * G) >= T) or ((3 * E) >= T)))):
raise ValueError('ERROR: Bandwidths have inconsistent values G={0}, M={1}, E={2}, D={3}, T={4}'.format(G, M, E, D, T))
if ((3 * (S + D)) < T):
if (G < E):
casename = 'Case 3a (G scarce)'
Wgg = Wgd = weightscale
Wmd = Wed = Wmg = 0
if (E < M):
Wme = 0
else:
Wme = ((weightscale * (E - M)) / (2 * E))
Wee = (weightscale - Wme)
else:
casename = 'Case 3a (E scarce)'
Wee = Wed = weightscale
Wmd = Wgd = Wme = 0
if (G < M):
Wmg = 0
else:
Wmg = ((weightscale * (G - M)) / (2 * G))
Wgg = (weightscale - Wmg)
else:
if (G < E):
casename = 'Case 3bg (G scarce, Wgg=weightscale, Wmd == Wed'
Wgg = weightscale
Wgd = ((weightscale * (((D - (2 * G)) + E) + M)) / (3 * D))
Wmg = 0
Wee = ((weightscale * (E + M)) / (2 * E))
Wme = (weightscale - Wee)
Wmd = Wed = ((weightscale - Wgd) / 2)
check = __check_weights_errors(Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed, weightscale, G, M, E, D, T, 10, True)
else:
casename = 'Case 3be (E scarce, Wee=weightscale, Wmd == Wgd'
Wee = weightscale
Wed = ((weightscale * (((D - (2 * E)) + G) + M)) / (3 * D))
Wme = 0
Wgg = ((weightscale * (G + M)) / (2 * G))
Wmg = (weightscale - Wgg)
Wmd = Wgd = ((weightscale - Wed) / 2)
check = __check_weights_errors(Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed, weightscale, G, M, E, D, T, 10, True)
if check:
raise ValueError('ERROR: {0} Wgd={1}, Wed={2}, Wmd={3}, Wee={4}, Wme={5}, Wmg={6}, Wgg={7}'.format(bww_errors[check], Wgd, Wed, Wmd, Wee, Wme, Wmg, Wgg))
return (casename, Wgg, Wgd, Wee, Wed, Wmg, Wme, Wmd) |
def write_labels(dirpath, dictionary):
print(('Writing labels for trees in ' + dirpath))
with open(os.path.join(dirpath, 'labels.txt'), 'w') as labels, open(os.path.join(dirpath, 'dlabels.txt'), 'w') as dlabels:
(const_trees, dep_trees, toks) = load_trees(dirpath)
for i in xrange(len(const_trees)):
const_trees[i].set_spans()
dep_trees[i].set_spans(toks[i])
(s, l) = ([], [])
for j in xrange(const_trees[i].size()):
s.append(None)
l.append(None)
const_trees[i].get_labels(s, l, dictionary)
labels.write((' '.join(map(str, l)) + '\n'))
dep_trees[i].span = const_trees[i].span
(s, l) = ([], [])
for j in xrange(len(toks[i])):
s.append(None)
l.append('#')
dep_trees[i].get_labels(s, l, dictionary)
dlabels.write((' '.join(map(str, l)) + '\n')) |
def get_activation_distance_stats(activations_0, activations_1, layer_name=''):
if (layer_name != ''):
print('In layer {}: getting activation distance statistics'.format(layer_name))
M = (cost_matrix(activations_0, activations_1) ** (1 / 2))
mean_dists = torch.mean(M, dim=(- 1))
max_dists = torch.max(M, dim=(- 1))[0]
min_dists = torch.min(M, dim=(- 1))[0]
std_dists = torch.std(M, dim=(- 1))
print('Statistics of the distance from neurons of layer 1 (averaged across nodes of layer 0): \n')
print('Max : {}, Mean : {}, Min : {}, Std: {}'.format(torch.mean(max_dists), torch.mean(mean_dists), torch.mean(min_dists), torch.mean(std_dists))) |
class GeneralBlock(ControlFlow):
elements: List[ControlFlow]
gotos_to_ignore: Sequence[Edge[InterstateEdge]]
gotos_to_continue: Sequence[Edge[InterstateEdge]]
gotos_to_break: Sequence[Edge[InterstateEdge]]
assignments_to_ignore: Sequence[Edge[InterstateEdge]]
sequential: bool
def as_cpp(self, codegen, symbols) -> str:
expr = ''
for (i, elem) in enumerate(self.elements):
expr += elem.as_cpp(codegen, symbols)
if isinstance(elem, SingleState):
sdfg = elem.state.parent
out_edges = sdfg.out_edges(elem.state)
for (j, e) in enumerate(out_edges):
if (e not in self.gotos_to_ignore):
successor = None
if (j == (len(out_edges) - 1)):
if ((i + 1) < len(self.elements)):
successor = self.elements[(i + 1)].first_state
elif (i == (len(self.elements) - 1)):
next_block = _find_next_block(self)
if (next_block is not None):
successor = next_block.first_state
expr += elem.generate_transition(sdfg, e, successor)
else:
if (e not in self.assignments_to_ignore):
expr += elem.generate_transition(sdfg, e, assignments_only=True)
if (e in self.gotos_to_break):
expr += 'break;\n'
elif (e in self.gotos_to_continue):
expr += 'continue;\n'
if elem.last_state:
continue
if ((len(out_edges) == 2) and (out_edges[0].data.condition_sympy() == sp.Not(out_edges[1].data.condition_sympy()))):
continue
if ((len(out_edges) == 1) and out_edges[0].data.is_unconditional()):
continue
expr += f'''goto __state_exit_{sdfg.sdfg_id};
'''
return expr
def first_state(self) -> SDFGState:
if (not self.elements):
return None
return self.elements[0].first_state
def children(self) -> List[ControlFlow]:
return self.elements |
def gpu_mem_usage():
if (not torch.cuda.is_available()):
return 0
_B_IN_GB = ((1024 * 1024) * 1024)
mem_usage_bytes = torch.cuda.max_memory_allocated()
return (mem_usage_bytes / _B_IN_GB) |
class LearnerModelParallel(nn.Module):
def __init__(self, module, sections):
super(LearnerModelParallel, self).__init__()
self.module = module.cuda()
self.sections = sections
self.num_sections = len(self.sections)
self._scatter_sections()
def _scatter_sections(self):
num_parameters = (lambda s: sum((p.numel() for p in s.parameters() if p.requires_grad)))
gpu_ids = list(range(torch.cuda.device_count()))
available_devices = [i for i in gpu_ids]
sorted_sections = sorted(self.sections, key=(lambda x: num_parameters(self.sections[x].network)), reverse=True)
device_load = dict(((i, 0) for i in available_devices))
for id in sorted_sections:
device = min(device_load, key=device_load.get)
device_load[device] += num_parameters(self.sections[id].network)
device = torch.device('cuda:{}'.format(device))
self.sections[id].network.to(device)
self.sections[id].device = device
def cpu(self):
for (i, s) in self.sections.items():
self.sections[i].network = s.network.cpu()
self.sections[i].device = torch.device('cpu')
self.module = self.module.cpu()
def _scatter_features(self, features):
for id in self.sections:
inp = features[(id - 1)]
features[(id - 1)] = inp.detach().cuda(self.sections[id].device, non_blocking=True)
def forward(self, features):
self._scatter_features(features)
modules = self.sections.values()
inputs = list(features.values())[:(- 1)]
outputs = parallel_apply(modules, inputs)
outputs = OrderedDict((((i + 1), outputs[i]) for i in range(len(outputs))))
return outputs |
def main():
import argparse
import pickle as pkl
parser = argparse.ArgumentParser()
parser.add_argument('datafile', type=str, help='sequences to embed')
parser.add_argument('model', type=str, help='which model to use')
parser.add_argument('--load-from', type=str, default=None, help='file from which to load pretrained weights')
parser.add_argument('--task', default=None, help='If running a forward pass through existing task datasets, refer to the task with this flag')
parser.add_argument('--output', default='outputs.pkl', type=str, help='file to output results to')
args = parser.parse_args()
embeddings = run_embed(args.datafile, args.model, args.load_from, args.task)
with open(args.output, 'wb') as f:
pkl.dump(embeddings, f) |
class MADGRAD(torch.optim.Optimizer):
def __init__(self, params: _params_t, lr: float=0.01, momentum: float=0.9, weight_decay: float=0, eps: float=1e-06, decoupled_decay: bool=False):
if ((momentum < 0) or (momentum >= 1)):
raise ValueError(f'Momentum {momentum} must be in the range [0,1]')
if (lr <= 0):
raise ValueError(f'Learning rate {lr} must be positive')
if (weight_decay < 0):
raise ValueError(f'Weight decay {weight_decay} must be non-negative')
if (eps < 0):
raise ValueError(f'Eps must be non-negative')
defaults = dict(lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay)
super().__init__(params, defaults)
def supports_memory_efficient_fp16(self) -> bool:
return False
def supports_flat_params(self) -> bool:
return True
_grad()
def step(self, closure: Optional[Callable[([], float)]]=None) -> Optional[float]:
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
eps = group['eps']
lr = (group['lr'] + eps)
weight_decay = group['weight_decay']
momentum = group['momentum']
ck = (1 - momentum)
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad
if ((momentum != 0.0) and grad.is_sparse):
raise RuntimeError('momentum != 0 is not compatible with sparse gradients')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['grad_sum_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
if (momentum != 0):
state['x0'] = torch.clone(p).detach()
state['step'] += 1
grad_sum_sq = state['grad_sum_sq']
s = state['s']
lamb = (lr * math.sqrt(state['step']))
if (weight_decay != 0):
if group['decoupled_decay']:
p.mul_((1.0 - (group['lr'] * weight_decay)))
else:
if grad.is_sparse:
raise RuntimeError('weight_decay option is not compatible with sparse gradients')
grad.add_(p, alpha=weight_decay)
if grad.is_sparse:
grad = grad.coalesce()
grad_val = grad._values()
p_masked = p.sparse_mask(grad)
grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)
s_masked = s.sparse_mask(grad)
rms_masked_vals = grad_sum_sq_masked._values().pow((1 / 3)).add_(eps)
x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1)
grad_sq = (grad * grad)
grad_sum_sq.add_(grad_sq, alpha=lamb)
grad_sum_sq_masked.add_(grad_sq, alpha=lamb)
rms_masked_vals = grad_sum_sq_masked._values().pow_((1 / 3)).add_(eps)
s.add_(grad, alpha=lamb)
s_masked._values().add_(grad_val, alpha=lamb)
p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=(- 1))
p_masked._values().add_(p_kp1_masked_vals, alpha=(- 1))
p.add_(p_masked, alpha=(- 1))
else:
if (momentum == 0):
rms = grad_sum_sq.pow((1 / 3)).add_(eps)
x0 = p.addcdiv(s, rms, value=1)
else:
x0 = state['x0']
grad_sum_sq.addcmul_(grad, grad, value=lamb)
rms = grad_sum_sq.pow((1 / 3)).add_(eps)
s.add_(grad, alpha=lamb)
if (momentum == 0):
p.copy_(x0.addcdiv(s, rms, value=(- 1)))
else:
z = x0.addcdiv(s, rms, value=(- 1))
p.mul_((1 - ck)).add_(z, alpha=ck)
return loss |
def visualize_predictions(frame_sequence, one_hot_pred, one_hot_gt, many_hot_pred=None, many_hot_gt=None):
batch_size = len(frame_sequence)
images = []
for i in range(batch_size):
scene = frame_sequence[i]
scene_labels = one_hot_gt[i]
scene_one_hot_pred = one_hot_pred[i]
scene_many_hot_pred = (many_hot_pred[i] if (many_hot_pred is not None) else None)
(scene_len, ih, iw) = scene.shape[:3]
grid_width = max([i for i in range(int((scene_len ** 0.5)), 0, (- 1)) if ((scene_len % i) == 0)])
grid_height = (scene_len // grid_width)
scene = scene.reshape(([grid_height, grid_width] + list(scene.shape[1:])))
scene = np.concatenate(np.split(np.concatenate(np.split(scene, grid_height), axis=2)[0], grid_width), axis=2)[0]
img = Image.fromarray(scene.astype(np.uint8))
draw = ImageDraw.Draw(img)
j = 0
for h in range(grid_height):
for w in range(grid_width):
if (scene_labels[j] == 1):
draw.text(((5 + (w * iw)), (h * ih)), 'T', fill=(0, 255, 0))
draw.rectangle([((((w * iw) + iw) - 1), (h * ih)), ((((w * iw) + iw) - 6), (((h * ih) + ih) - 1))], fill=(0, 0, 0))
draw.rectangle([((((w * iw) + iw) - 4), (h * ih)), ((((w * iw) + iw) - 5), ((h * ih) + ((ih - 1) * scene_one_hot_pred[j])))], fill=(0, 255, 0))
draw.rectangle([((((w * iw) + iw) - 2), (h * ih)), ((((w * iw) + iw) - 3), ((h * ih) + ((ih - 1) * (scene_many_hot_pred[j] if (scene_many_hot_pred is not None) else 0))))], fill=(255, 255, 0))
j += 1
images.append(np.array(img))
images = np.stack(images, 0)
return images |
def main_random(split, logit_file, is_training=False, num_retain=10, force_diff=FORCE_DIFF_CONFIG):
import random
random.seed(123)
candidate_info = load_candidates_file(f'outputs/grail_{split}_candidates-ranking.jsonline')
logit_info = torch.load(logit_file)
gen_dataset = []
num_spec = 0
top_ex_cnt = 0
for data in candidate_info:
target_expr = data['target_expr']
candidates = data['candidates']
if (not candidates):
continue
qid = data['qid']
if (qid not in logit_info):
continue
logits = logit_info[qid]
sorted_idx = torch.argsort((- logits)).tolist()
top_idx = random.sample(range(len(sorted_idx)), min(num_retain, len(sorted_idx)))
top_candidates = [candidates[i] for i in top_idx]
(top_pred, top_is_ex) = (top_candidates[0]['logical_form'], top_candidates[0]['ex'])
if ((top_pred != target_expr) and top_is_ex):
gen_target = top_pred
num_spec += 1
else:
gen_target = target_expr
if top_is_ex:
top_ex_cnt += 1
gen_ex = {'qid': qid, 'genation_target': gen_target, 'top_candidates': top_candidates, 'target_full_expr': target_expr}
gen_dataset.append(gen_ex)
print(len(gen_dataset))
print((num_spec / len(gen_dataset)))
print((top_ex_cnt / len(gen_dataset)))
dump_json(gen_dataset, f'outputs/grail_{split}_randgen.json') |
def gen_rest_table_index(obj, names=None, sort=True, only_local_functions=True, root=None):
if (names is None):
names = {}
if (inspect.isclass(obj) or inspect.ismodule(obj)):
(list_of_entries, names) = list_of_subfunctions(obj, only_local_functions=only_local_functions)
else:
list_of_entries = obj
fname = (lambda x: names.get(x, getattr(x, '__name__', '')))
assert isinstance(list_of_entries, list)
s = ['.. csv-table::', ' :class: contentstable', ' :widths: 30, 70', ' :delim: \n']
if sort:
list_of_entries.sort(key=fname)
obj_or_root_is_class = False
if inspect.isclass(root):
obj_or_root_is_class = True
class_name = root.__name__
module_name = root.__module__
elif inspect.isclass(obj):
obj_or_root_is_class = True
class_name = obj.__name__
module_name = obj.__module__
for e in list_of_entries:
if inspect.ismethod(e):
link = ':meth:`~{module}.{cls}.{func}`'.format(module=e.im_class.__module__, cls=e.im_class.__name__, func=fname(e))
elif (_isfunction(e) and obj_or_root_is_class):
link = ':meth:`~{module}.{cls}.{func}`'.format(module=module_name, cls=class_name, func=fname(e))
elif _isfunction(e):
link = ':func:`~{module}.{func}`'.format(module=e.__module__, func=fname(e))
else:
continue
doc = e.__doc__
doc_tmp = _extract_embedded_position(doc)
if doc_tmp:
doc = doc_tmp[0]
if doc:
desc = doc.split('\n\n')[0]
desc = ' '.join((x.strip() for x in desc.splitlines()))
desc = desc.strip()
else:
desc = 'NO DOCSTRING'
s.append(' {} {}'.format(link, desc.lstrip()))
return ('\n'.join(s) + '\n') |
def asy_calc_old(create_loss, nbins):
(loss, (Nsig, Nbkg, mean, sigma)) = create_loss(npeak=10, nbins=nbins)
mean.floating = False
sigma.floating = False
return (Nsig, AsymptoticCalculatorOld(loss, Minuit())) |
def build_criterion(args):
weight = torch.ones(args.num_classes)
weight[args.eos_index] = args.eos_loss_coef
criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=args.padding_index)
device = torch.device('cuda')
criterion = criterion.to(device)
return criterion |
class InspectDialogPhenomena(object):
def __init__(self, config):
super().__init__()
self.config = config
self.data_dir = config.data_dir
self.save_data_dir = config.save_data_dir
self.weather_list = ['rainy', 'sunny', 'daytime', 'day', 'night']
self.difficult_pronouns = ['other', 'it']
self.nlp = spacy.load('en_core_web_sm')
self.const_parser = pretrained.span_based_constituency_parsing_with_elmo_joshi_2018()
self.heuristic_root_cp = ['S', 'SQ', 'SBARQ', 'SINV']
def create_stats_dic(self, data_type: str):
data_path = self._get_json_path(self.data_dir, data_type)
print(f'Reading json {data_path}')
json_data = json.load(open(data_path))
questions = json_data['data']['questions']
print('Total questions: ', len(questions))
answers = json_data['data']['answers']
print('Total answers: ', len(answers))
dialogs = json_data['data']['dialogs']
stats_dic = {'image_id': [], 'pronouns_ques_dialog': [], 'pronouns_ans_dialog': [], 'pronouns_ques': [], 'pronouns_ans': [], 'pronouns_dialog': [], 'non_pleonastic_pronouns_ques_dialog': [], 'non_pleonastic_pronouns_ans_dialog': [], 'non_pleonastic_pronouns_ques': [], 'non_pleonastic_pronouns_ans': [], 'non_pleonastic_pronouns_dialog': [], 'ellipsis_ques_dialog': [], 'ellipsis_ans_dialog': [], 'ellipsis_ques': [], 'ellipsis_ans': [], 'ellipsis_dialog': [], 'pronouns_caption': [], 'non_pleonastic_pronouns_caption': [], 'ellipsis_caption': [], 'ques_list_dialog': [], 'ans_list_dialog': [], 'caption_list_dialog': []}
for dialog_id in tqdm(range(len(dialogs))):
image_id = dialogs[dialog_id]['image_id']
dialog_for_image = dialogs[dialog_id]['dialog']
caption = dialogs[dialog_id]['caption']
ellipsis_caption = self._get_ellipsis(caption)
(non_pleonastic_pronouns_caption, pronouns_caption) = self._get_pronouns(caption)
pronouns_ques_dialog = 0
pronouns_ans_dialog = 0
pronouns_dialog = 0
non_pleonastic_pronouns_ques_dialog = 0
non_pleonastic_pronouns_ans_dialog = 0
non_pleonastic_pronouns_dialog = 0
ellipsis_ques_dialog = 0
ellipsis_ans_dialog = 0
ellipsis_dialog = 0
ques_list_dialog = []
ans_list_dialog = []
for round_id in range(len(dialog_for_image)):
question = questions[dialog_for_image[round_id]['question']]
answer = answers[dialog_for_image[round_id]['answer']]
ques_list_dialog.append(question)
ans_list_dialog.append(answer)
ellipsis_ques = self._get_ellipsis(question)
(non_pleonastic_pronouns_ques, pronouns_ques) = self._get_pronouns(question)
ellipsis_ans = self._get_ellipsis(answer)
(non_pleonastic_pronouns_ans, pronouns_ans) = self._get_pronouns(answer)
stats_dic['non_pleonastic_pronouns_ques'].append(non_pleonastic_pronouns_ques)
stats_dic['non_pleonastic_pronouns_ans'].append(non_pleonastic_pronouns_ans)
stats_dic['pronouns_ques'].append(pronouns_ques)
stats_dic['pronouns_ans'].append(pronouns_ans)
stats_dic['ellipsis_ques'].append(ellipsis_ques)
stats_dic['ellipsis_ans'].append(ellipsis_ans)
pronouns_ques_dialog += pronouns_ques
pronouns_ans_dialog += pronouns_ans
pronouns_dialog += (pronouns_ques + pronouns_ans)
non_pleonastic_pronouns_ques_dialog += non_pleonastic_pronouns_ques
non_pleonastic_pronouns_ans_dialog += non_pleonastic_pronouns_ans
non_pleonastic_pronouns_dialog += (non_pleonastic_pronouns_ques + non_pleonastic_pronouns_ans)
ellipsis_ques_dialog += ellipsis_ques
ellipsis_ans_dialog += ellipsis_ans
ellipsis_dialog += (ellipsis_ques + ellipsis_ans)
stats_dic['image_id'].append(image_id)
stats_dic['ellipsis_caption'].append(ellipsis_caption)
stats_dic['non_pleonastic_pronouns_caption'].append(non_pleonastic_pronouns_caption)
stats_dic['pronouns_caption'].append(pronouns_caption)
stats_dic['pronouns_ques_dialog'].append(pronouns_ques_dialog)
stats_dic['pronouns_ans_dialog'].append(pronouns_ans_dialog)
stats_dic['pronouns_dialog'].append(pronouns_dialog)
stats_dic['non_pleonastic_pronouns_ques_dialog'].append(non_pleonastic_pronouns_ques_dialog)
stats_dic['non_pleonastic_pronouns_ans_dialog'].append(non_pleonastic_pronouns_ans_dialog)
stats_dic['non_pleonastic_pronouns_dialog'].append(non_pleonastic_pronouns_dialog)
stats_dic['ellipsis_ques_dialog'].append(ellipsis_ques_dialog)
stats_dic['ellipsis_ans_dialog'].append(ellipsis_ans_dialog)
stats_dic['ellipsis_dialog'].append(ellipsis_dialog)
stats_dic['ques_list_dialog'].append(ques_list_dialog)
stats_dic['ans_list_dialog'].append(ans_list_dialog)
stats_dic['caption_list_dialog'].append(caption)
pkl_file_path = self._save_file_path(save_data_dir=self.save_data_dir, data_type=data_type)
self.pickle_dump(pkl_file_path, stats_dic)
return
def get_analysis(self, data_type: str):
pkl_file_path = self._save_file_path(save_data_dir=self.save_data_dir, data_type=data_type)
print('Reading stats dic data from: ', pkl_file_path)
stats_dic = self.pickle_load(pkl_file_path)
per_turn_keys = ['non_pleonastic_pronouns_ques', 'non_pleonastic_pronouns_ans', 'pronouns_ques', 'pronouns_ans', 'ellipsis_ques', 'ellipsis_ans']
per_turn_stats = dict(((k, stats_dic[k]) for k in per_turn_keys if (k in stats_dic)))
per_dialog_keys = ['pronouns_ques_dialog', 'pronouns_ans_dialog', 'pronouns_dialog', 'non_pleonastic_pronouns_ques_dialog', 'non_pleonastic_pronouns_ans_dialog', 'ellipsis_ques_dialog', 'ellipsis_ans_dialog', 'ellipsis_dialog', 'ellipsis_caption', 'non_pleonastic_pronouns_caption', 'pronouns_caption']
per_dialog_stats = dict(((k, stats_dic[k]) for k in per_dialog_keys if (k in stats_dic)))
per_turn_stats_df = pd.DataFrame.from_dict(per_turn_stats)
per_turn_stats_df_describe = per_turn_stats_df.describe()
per_dialog_stats_df = pd.DataFrame.from_dict(per_dialog_stats)
per_dialog_stats_df_describe = per_dialog_stats_df.describe()
write_file_path = self._save_file_path(save_data_dir=self.save_data_dir, data_type=data_type, ext='xlsx')
self.write_excel_df(write_file_path, [per_turn_stats_df_describe, per_dialog_stats_df_describe], ['turn_level', 'dialog_level'])
write_file_path = self._save_file_path(save_data_dir=self.save_data_dir, data_type=data_type, file_type_name='percent', ext='xlsx')
writer = pd.ExcelWriter(write_file_path, engine='xlsxwriter')
self.write_value_counts_df_excel_offset(write_file_path=write_file_path, sheet_name='Dialog level', key_list=per_dialog_keys, write_df=per_dialog_stats_df, writer=writer)
self.write_value_counts_df_excel_offset(write_file_path=write_file_path, sheet_name='Turn level', key_list=per_turn_keys, write_df=per_turn_stats_df, writer=writer)
writer.save()
print(get_column_stats(per_dialog_stats_df, 'non_pleonastic_pronouns_ques_dialog'))
print(get_column_stats(per_turn_stats_df, 'non_pleonastic_pronouns_ques'))
def write_value_counts_df_excel_offset(write_file_path: str, sheet_name: str, key_list: List, write_df, writer=None, start_pos: int=0, offset: int=3, engine: str='xlsxwriter'):
if (writer is None):
writer = pd.ExcelWriter(write_file_path, engine=engine)
workbook = writer.book
worksheet = workbook.add_worksheet(sheet_name)
writer.sheets[sheet_name] = worksheet
for key in key_list:
column_stats_df = get_column_stats(write_df, key)
worksheet.write_string(start_pos, 0, key)
column_stats_df.to_excel(writer, sheet_name=sheet_name, startrow=(start_pos + 1), startcol=0)
start_pos = ((start_pos + offset) + len(column_stats_df.index))
return
def _get_pronouns(self, text):
doc = self.nlp(text)
non_pleonastic_pronoun = 0
pronouns = 0
for token in doc:
if (token.pos_ == 'PRON'):
pronouns += 1
non_pleonastic_pronoun += 1
if ((token.text == 'it') and any(((weather_element in text) for weather_element in self.weather_list))):
non_pleonastic_pronoun -= 1
if ('other' in text):
pronouns += 1
non_pleonastic_pronoun += 1
return (non_pleonastic_pronoun, pronouns)
def _get_ellipsis(self, text):
ellipsis = 0
const_results = self.const_parser.predict(text)
root = const_results['trees'].replace('(', '').split(' ')[0]
if (root not in self.heuristic_root_cp):
ellipsis = 1
return ellipsis
def pickle_dump(save_file_path: str, pickle_obj: Any):
with open(save_file_path, 'wb') as outfile:
pkl.dump(pickle_obj, outfile)
return
def pickle_load(file_path: str):
with open(file_path, 'rb') as infile:
pickle_obj = pkl.load(infile)
return pickle_obj
def _get_json_path(data_dir: str, data_type: str, split: str='1.0') -> str:
json_path = f'{data_dir}/visdial_{split}_{data_type}.json'
return json_path
def _save_file_path(save_data_dir: str, data_type: str, split: str='1.0', file_type_name: str='analysis', ext: str='pkl') -> str:
file_path = f'{save_data_dir}/visdial_{split}_{data_type}_{file_type_name}.{ext}'
return file_path
def write_excel_df(save_file_path: str, df_list: List, sheet_name_list: List):
writer = pd.ExcelWriter(save_file_path, engine='xlsxwriter')
assert (len(df_list) == len(sheet_name_list))
for index in range(len(df_list)):
df_list[index].to_excel(writer, sheet_name=sheet_name_list[index])
writer.save()
return |
def up_stage(inputs, skip, filters, kernel_size=3, activation='relu', padding='SAME'):
up = UpSampling2D()(inputs)
up = Conv2D(filters, 2, activation=activation, padding=padding)(up)
up = GroupNormalization()(up)
merge = concatenate([skip, up])
merge = GroupNormalization()(merge)
conv = Conv2D(filters, kernel_size, activation=activation, padding=padding)(merge)
conv = GroupNormalization()(conv)
conv = Conv2D(filters, kernel_size, activation=activation, padding=padding)(conv)
conv = GroupNormalization()(conv)
conv = SpatialDropout2D(0.5)(conv, training=True)
return conv |
def reg_component(name, c):
global _Id, _Components, _ComponentNames, _Name2Component
c.id = _Id
_Id = (_Id + 1)
_Components.append(c)
_ComponentNames.add(name)
_Name2Component[name] = c
if VERBOSE:
print(("New component: '%s'" % name)) |
.parametrize('alphas', ALPHAS)
def test_compute_quantiles_2D_and_3D(alphas: NDArray):
vector1 = np.random.rand(1000, 1)
vector2 = np.repeat(vector1, len(alphas), axis=1)
quantiles1 = compute_quantiles(vector1, alphas)
quantiles2 = compute_quantiles(vector2, alphas)
assert (quantiles1 == quantiles2).all() |
def log_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
dy = grad_inputs[0]
x0 = inputs[0]
dx0 = (dy / x0)
return dx0 |
_model
def res2next50(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['res2next50']
res2net_block_args = dict(scale=4)
model = ResNet(Bottle2neck, [3, 4, 6, 3], base_width=4, cardinality=8, num_classes=num_classes, in_chans=in_chans, block_args=res2net_block_args, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def init_dist(backend='nccl', **kwargs):
if (mp.get_start_method(allow_none=True) != 'spawn'):
mp.set_start_method('spawn')
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((rank % num_gpus))
dist.init_process_group(backend=backend, **kwargs) |
def register_Ns3MmWaveMacPduTag_methods(root_module, cls):
cls.add_constructor([param('ns3::MmWaveMacPduTag const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::SfnSf', 'sfn')])
cls.add_constructor([param('ns3::SfnSf', 'sfn'), param('uint8_t', 'symStart'), param('uint8_t', 'numSym')])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetNumSym', 'uint8_t', [])
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetSfn', 'ns3::SfnSf', [], is_const=True)
cls.add_method('GetSymStart', 'uint8_t', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
cls.add_method('SetNumSym', 'void', [param('uint8_t', 'numSym')])
cls.add_method('SetSfn', 'void', [param('ns3::SfnSf', 'sfn')])
cls.add_method('SetSymStart', 'void', [param('uint8_t', 'symStart')])
return |
class RandomDataSplit(BaseTransform):
def __init__(self, num_nodes_per_class, train_ratio=0.7, test_ratio=0.2):
self.num_nodes_per_class = num_nodes_per_class
self.train_ratio = train_ratio
self.test_ratio = test_ratio
def __call__(self, data: Data) -> Data:
y = data.y
num_classes = (y.max().item() + 1)
num_train_nodes_per_class = int((self.num_nodes_per_class * self.train_ratio))
num_test_nodes_per_class = int((self.num_nodes_per_class * self.test_ratio))
train_mask = torch.zeros_like(y, dtype=torch.bool)
test_mask = torch.zeros_like(y, dtype=torch.bool)
val_mask = torch.zeros_like(y, dtype=torch.bool)
for c in range(num_classes):
idx = (y == c).nonzero(as_tuple=False).view((- 1))
num_nodes = idx.size(0)
if (num_nodes >= self.num_nodes_per_class):
idx = idx[torch.randperm(idx.size(0))][:self.num_nodes_per_class]
train_mask[idx[:num_train_nodes_per_class]] = True
test_mask[idx[num_train_nodes_per_class:(num_train_nodes_per_class + num_test_nodes_per_class)]] = True
val_mask[idx[(num_train_nodes_per_class + num_test_nodes_per_class):]] = True
data.train_mask = train_mask
data.test_mask = test_mask
data.val_mask = val_mask
return data |
def norm_point_xyxy(point, *, w, h):
(x, y) = point
norm_x = max(0.0, min((x / w), 1.0))
norm_y = max(0.0, min((y / h), 1.0))
point = (norm_x, norm_y)
return point |
def make_install_req_from_link(link, template):
assert (not template.editable), 'template is editable'
if template.req:
line = str(template.req)
else:
line = link.url
ireq = install_req_from_line(line, user_supplied=template.user_supplied, comes_from=template.comes_from, use_pep517=template.use_pep517, isolated=template.isolated, constraint=template.constraint, options=dict(install_options=template.install_options, global_options=template.global_options, hashes=template.hash_options))
ireq.original_link = template.original_link
ireq.link = link
return ireq |
_criterion('nat_loss')
class LabelSmoothedDualImitationCriterion(FairseqCriterion):
def __init__(self, task, label_smoothing):
super().__init__(task)
self.label_smoothing = label_smoothing
def add_args(parser):
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
def _compute_loss(self, outputs, targets, masks=None, label_smoothing=0.0, name='loss', factor=1.0):
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (x.float().mean().type_as(x) if (dim is None) else x.float().mean(dim).type_as(x))
if (masks is not None):
(outputs, targets) = (outputs[masks], targets[masks])
if ((masks is not None) and (not masks.any())):
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=(- 1))
if (targets.dim() == 1):
losses = F.nll_loss(logits, targets.to(logits.device), reduction='none')
else:
losses = F.kl_div(logits, targets.to(logits.device), reduction='none')
losses = losses.sum((- 1))
nll_loss = mean_ds(losses)
if (label_smoothing > 0):
loss = ((nll_loss * (1 - label_smoothing)) - (mean_ds(logits) * label_smoothing))
else:
loss = nll_loss
loss = (loss * factor)
return {'name': name, 'loss': loss, 'nll_loss': nll_loss, 'factor': factor}
def _custom_loss(self, loss, name='loss', factor=1.0):
return {'name': name, 'loss': loss, 'factor': factor}
def forward(self, model, sample, reduce=True):
(nsentences, ntokens) = (sample['nsentences'], sample['ntokens'])
(src_tokens, src_lengths) = (sample['net_input']['src_tokens'], sample['net_input']['src_lengths'])
(tgt_tokens, prev_output_tokens) = (sample['target'], sample['prev_target'])
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)
(losses, nll_loss) = ([], [])
for obj in outputs:
if (outputs[obj].get('loss', None) is None):
_losses = self._compute_loss(outputs[obj].get('out'), outputs[obj].get('tgt'), outputs[obj].get('mask', None), outputs[obj].get('ls', 0.0), name=(obj + '-loss'), factor=outputs[obj].get('factor', 1.0))
else:
_losses = self._custom_loss(outputs[obj].get('loss'), name=(obj + '-loss'), factor=outputs[obj].get('factor', 1.0))
losses += [_losses]
if outputs[obj].get('nll_loss', False):
nll_loss += [_losses.get('nll_loss', 0.0)]
loss = sum((l['loss'] for l in losses))
nll_loss = (sum((l for l in nll_loss)) if (len(nll_loss) > 0) else loss.new_tensor(0))
sample_size = 1
logging_output = {'loss': loss.data, 'nll_loss': nll_loss.data, 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
for l in losses:
logging_output[l['name']] = (utils.item((l['loss'].data / l['factor'])) if reduce else (l[['loss']].data / l['factor']))
return (loss, sample_size, logging_output)
def reduce_metrics(logging_outputs) -> None:
sample_size = utils.item(sum((log.get('sample_size', 0) for log in logging_outputs)))
loss = utils.item(sum((log.get('loss', 0) for log in logging_outputs)))
nll_loss = utils.item(sum((log.get('nll_loss', 0) for log in logging_outputs)))
metrics.log_scalar('loss', ((loss / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_scalar('nll_loss', ((nll_loss / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_derived('ppl', (lambda meters: utils.get_perplexity(meters['loss'].avg)))
for key in logging_outputs[0]:
if (key[(- 5):] == '-loss'):
val = sum((log.get(key, 0) for log in logging_outputs))
metrics.log_scalar(key[:(- 5)], (((val / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), sample_size, round=3)
def logging_outputs_can_be_summed() -> bool:
return True |
class SpectralNorm():
_version: int = 1
name: str
dim: int
n_power_iterations: int
eps: float
def __init__(self, name: str='weight', n_power_iterations: int=1, dim: int=0, eps: float=1e-12) -> None:
self.name = name
self.dim = dim
if (n_power_iterations <= 0):
raise ValueError('Expected n_power_iterations to be positive, but got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor:
weight_mat = weight
if (self.dim != 0):
weight_mat = weight_mat.permute(self.dim, *[d for d in range(weight_mat.dim()) if (d != self.dim)])
height = weight_mat.size(0)
return weight_mat.reshape(height, (- 1))
def compute_weight(self, module: Module, do_power_iteration: bool) -> torch.Tensor:
weight = getattr(module, (self.name + '_orig'))
u = getattr(module, (self.name + '_u'))
v = getattr(module, (self.name + '_v'))
weight_mat = self.reshape_weight_to_matrix(weight)
if do_power_iteration:
with torch.no_grad():
for _ in range(self.n_power_iterations):
v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v)
u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)
if (self.n_power_iterations > 0):
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
sigma = torch.dot(u, torch.mv(weight_mat, v))
weight = (weight / sigma)
return weight
def remove(self, module: Module) -> None:
with torch.no_grad():
weight = self.compute_weight(module, do_power_iteration=False)
delattr(module, self.name)
delattr(module, (self.name + '_u'))
delattr(module, (self.name + '_v'))
delattr(module, (self.name + '_orig'))
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
def __call__(self, module: Module, inputs: Any) -> None:
setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training))
def _solve_v_and_rescale(self, weight_mat, u, target_sigma):
v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)).squeeze(1)
return v.mul_((target_sigma / torch.dot(u, torch.mv(weight_mat, v))))
def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float) -> 'SpectralNorm':
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNorm) and (hook.name == name)):
raise RuntimeError('Cannot register two spectral_norm hooks on the same parameter {}'.format(name))
fn = SpectralNorm(name, n_power_iterations, dim, eps)
weight = module._parameters[name]
with torch.no_grad():
weight_mat = fn.reshape_weight_to_matrix(weight)
(h, w) = weight_mat.size()
u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps)
v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter((fn.name + '_orig'), weight)
setattr(module, fn.name, weight.data)
module.register_buffer((fn.name + '_u'), u)
module.register_buffer((fn.name + '_v'), v)
module.register_forward_pre_hook(fn)
module._register_state_dict_hook(SpectralNormStateDictHook(fn))
module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn))
return fn |
def thresholding(S: np.ndarray, thresh: Union[(str, float)]) -> np.ndarray:
if (thresh == 'auto'):
mu = np.median(S)
sig = (np.median(np.abs((S - mu))) / 0.675)
thresh = norm.ppf((1 - 1e-06), loc=mu, scale=sig)
M = (S >= thresh)
return M |
def load_model(file_name, gpu=False):
with open(file_name, 'rb') as f:
model = torch.load(f, map_location=(lambda storage, loc: storage))
print('gpu:', gpu)
if (not gpu):
model.set_device_id(None)
else:
model.cuda()
return model |
def _register_pytree_node(typ: Any, flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc) -> None:
SUPPORTED_NODES[typ] = NodeDef(flatten_fn, unflatten_fn) |
class Subsets_sk(Subsets_s):
def __init__(self, s, k):
Subsets_s.__init__(self, s)
self._k = Integer(k)
if (self._k < 0):
raise ValueError('the integer k (={}) should be non-negative'.format(k))
def _repr_(self):
return (Subsets_s._repr_(self) + ' of size {}'.format(self._k))
def __contains__(self, value):
return ((len(value) == self._k) and Subsets_s.__contains__(self, value))
def __eq__(self, other):
if (self.__class__ != other.__class__):
return False
return ((self._s == other._s) and (self._k == other._k))
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash((self._s, self._k))
def cardinality(self):
if (self._k > self._s.cardinality()):
return ZZ_0
return binomial(self._s.cardinality(), self._k)
__len__ = cardinality
def first(self):
if ((self._k < 0) or (self._k > self._s.cardinality())):
raise EmptySetError
else:
return self.element_class(list(itertools.islice(self._s, int(self._k))))
def last(self):
if (self._k > self._s.cardinality()):
raise EmptySetError
return self.element_class([i for i in itertools.islice(reversed(self._s), int(self._k))])
def _fast_iterator(self):
return itertools.combinations(self._s, self._k)
def __iter__(self):
for x in self._fast_iterator():
(yield self.element_class(x))
def random_element(self):
lset = self._ls
if (self._k > len(lset)):
raise EmptySetError
else:
return self.element_class(rnd.sample(lset, self._k))
def rank(self, sub):
sub = Set(sub)
n = self._s.cardinality()
if ((self._k != sub.cardinality()) or (self._k > n)):
raise ValueError('{} is not a subset of length {} of {}'.format(sub, self._k, self._s))
try:
index_list = sorted((self._s.rank(x) for x in sub))
except ValueError:
raise ValueError('{} is not a subset of length {} of {}'.format(sub, self._k, self._s))
return combination.rank(index_list, n)
def unrank(self, r):
lset = self._ls
n = len(lset)
if ((self._k > n) or (r >= self.cardinality()) or (r < 0)):
raise IndexError('index out of range')
else:
return self.element_class([lset[i] for i in combination.from_rank(r, n, self._k)])
def an_element(self):
return self.unrank((self.cardinality() // 2)) |
_model
def ig_resnext101_32x32d(pretrained=True, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs)
return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args) |
def ndrange(slice_list: Union[(Tuple[slice], slice)]):
if (not isinstance(slice_list, (tuple, list))):
(yield from slicetoxrange(slice_list))
else:
ndxrange = tuple((slicetoxrange(d) for d in slice_list))
for indices in itertools.product(*ndxrange):
(yield indices) |
def getargvalues(frame):
(args, varargs, varkw) = getargs(frame.f_code)
return (args, varargs, varkw, frame.f_locals) |
class RetinaNetModule(torch.nn.Module):
def __init__(self, cfg, in_channels):
super(RetinaNetModule, self).__init__()
self.cfg = cfg.clone()
anchor_generator = make_anchor_generator_retinanet(cfg)
head = RetinaNetHead(cfg, in_channels)
box_coder = BoxCoder(weights=(10.0, 10.0, 5.0, 5.0))
box_selector_test = make_retinanet_postprocessor(cfg, box_coder, is_train=False)
loss_evaluator = make_retinanet_loss_evaluator(cfg, box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
(box_cls, box_regression) = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, box_cls, box_regression, targets)
else:
return self._forward_test(anchors, box_cls, box_regression)
def _forward_train(self, anchors, box_cls, box_regression, targets):
(loss_box_cls, loss_box_reg) = self.loss_evaluator(anchors, box_cls, box_regression, targets)
losses = {'loss_retina_cls': loss_box_cls, 'loss_retina_reg': loss_box_reg}
return (anchors, losses)
def _forward_test(self, anchors, box_cls, box_regression):
boxes = self.box_selector_test(anchors, box_cls, box_regression)
return (boxes, {}) |
class BiLSTMp(nn.Module):
def __init__(self, input_size, hidden_size, proj_size, layers, proj_activ='tanh', dropout=0):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.proj_size = proj_size
self.layers = [int(i) for i in layers.split('_')]
self.n_layers = len(self.layers)
self.dropout = dropout
self.proj_activ = proj_activ
self.ctx_size = (self.hidden_size * 2)
self.pad_tuple = (0, 0, 0, 0, 0, 1)
self.ffs = nn.ModuleList()
self.lstms = nn.ModuleList()
if (self.dropout > 0):
self.do = nn.Dropout(self.dropout)
for (i, ss_factor) in enumerate(self.layers):
self.lstms.append(nn.LSTM((self.input_size if (i == 0) else self.hidden_size), self.hidden_size, bidirectional=True))
self.ffs.append(FF(self.ctx_size, self.proj_size, activ=self.proj_activ))
def forward(self, x):
hs = F.pad(x, self.pad_tuple)
for (ss_factor, f_lstm, f_ff) in zip(self.layers, self.lstms, self.ffs):
if (ss_factor > 1):
hs = f_ff(f_lstm(hs[::ss_factor])[0])
else:
hs = f_ff(f_lstm(hs)[0])
if (self.dropout > 0):
hs = self.do(hs)
return (hs, None) |
class BucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1):
super(BucketingSampler, self).__init__(data_source)
self.data_source = data_source
ids = list(range(0, len(data_source)))
self.bins = [ids[i:(i + batch_size)] for i in range(0, len(ids), batch_size)]
def __iter__(self):
for ids in self.bins:
np.random.shuffle(ids)
(yield ids)
def __len__(self):
return len(self.bins)
def shuffle(self, epoch):
np.random.shuffle(self.bins) |
class UnpairedImageTest(UnpairedImageBase):
def __init__(self, size=None, random_crop=False, folder1=None, folder2=None, numpy_folder1=None, numpy_folder2=None, wikiart_info1=None, wikiart_key1=None, wikiart_info2=None, wikiart_key2=None):
super().__init__()
self.data = UnpairedImagePaths(size=size, random_crop=random_crop, folder1=folder1, folder2=folder2, numpy_folder1=numpy_folder1, numpy_folder2=numpy_folder2, wikiart_info1=wikiart_info1, wikiart_key1=wikiart_key1, wikiart_info2=wikiart_info2, wikiart_key2=wikiart_key2)
self.data._length = min(self.data._length, 1000) |
def load_img_future_de_haze_revide(filepath, nFrames, img_id, phase='train'):
tt = int((nFrames / 2))
img_id = (img_id + tt)
num_dir = filepath.split('/')[3]
if (phase == 'train'):
targetPath = ('Dataset/REVIDE/Train_GT/' + num_dir)
else:
targetPath = ('Dataset/REVIDE/Test_GT/' + num_dir)
neigbor = []
target = []
seq = [x for x in range((img_id - tt), ((img_id + 1) + tt))]
for j in seq:
neigbor.append(Image.open((((filepath + '/') + str(j).zfill(5)) + '.jpg')).convert('RGB'))
target.append(Image.open((((targetPath + '/') + str(j).zfill(5)) + '.jpg')).convert('RGB'))
"\n a = filepath.split('/')[-1].split('_')[0][1]\n b = filepath.split('/')[-1].split('_')[2][1]\n base_path = filepath + '/motion_{}_{}.txt'.format(a, b)\n motion = np.loadtxt(base_path, delimiter=',')\n tar_motion = np.ones([128, 128, 2])\n # tar_motion = np.ones([480, 640, 2])\n tar_motion[:, :, 0] = tar_motion[:, :, 0] * motion[img_id - 1][0]\n # tar_motion[:, :, 1] = tar_motion[:, :, 1] * motion[img_id - 1][1]\n tar_motion[:, :, 1] = tar_motion[:, :, 1] * motion[img_id - 1][2]\n "
if (target is None):
print('read false')
exit()
return (target, neigbor) |
def load_and_cache_rank_examples(args, tokenizer, evaluate=False):
if (args.dataset == 'grail'):
return grail_load_and_cache_rank_examples(args, tokenizer, evaluate=evaluate)
elif (args.dataset == 'webqsp'):
return webqsp_load_and_cache_rank_examples(args, tokenizer, evaluate=evaluate)
else:
raise RuntimeError('Unsupported Ranking Dataset') |
class CohereTokenCostEstimator(TokenCostEstimator):
def estimate_tokens(self, request: Request, metric_service: MetricService) -> int:
return (request.num_completions * request.max_tokens) |
def pytest_addoption(parser):
group = parser.getgroup('schemathesis')
group.addoption('--schemathesis-io-token', action='store', default=DEFAULT_SERVICE_TOKEN, help='A token to access the test Schemathesis.io instance.') |
def parse_encoder(parser, arg_str=None):
enc_parser = parser.add_argument_group()
enc_parser.add_argument('--conv_type', type=str, help='type of convolution')
enc_parser.add_argument('--method_type', type=str, help='type of embedding')
enc_parser.add_argument('--batch_size', type=int, help='Training batch size')
enc_parser.add_argument('--n_layers', type=int, help='Number of graph conv layers')
enc_parser.add_argument('--hidden_dim', type=int, help='Training hidden size')
enc_parser.add_argument('--skip', type=str, help='"all" or "last"')
enc_parser.add_argument('--dropout', type=float, help='Dropout rate')
enc_parser.add_argument('--n_batches', type=int, help='Number of training minibatches')
enc_parser.add_argument('--margin', type=float, help='margin for loss')
enc_parser.add_argument('--dataset', type=str, help='Dataset')
enc_parser.add_argument('--test_set', type=str, help='test set filename')
enc_parser.add_argument('--eval_interval', type=int, help='how often to eval during training')
enc_parser.add_argument('--val_size', type=int, help='validation set size')
enc_parser.add_argument('--model_path', type=str, help='path to save/load model')
enc_parser.add_argument('--opt_scheduler', type=str, help='scheduler name')
enc_parser.add_argument('--node_anchored', action='store_true', help='whether to use node anchoring in training')
enc_parser.add_argument('--test', action='store_true')
enc_parser.add_argument('--n_workers', type=int)
enc_parser.add_argument('--tag', type=str, help='tag to identify the run')
enc_parser.set_defaults(conv_type='SAGE', method_type='order', dataset='syn', n_layers=8, batch_size=64, hidden_dim=64, skip='learnable', dropout=0.0, n_batches=1000000, opt='adam', opt_scheduler='none', opt_restart=100, weight_decay=0.0, lr=0.0001, margin=0.1, test_set='', eval_interval=1000, n_workers=4, model_path='ckpt/model.pt', tag='', val_size=4096, node_anchored=True) |
def get_render_func(venv):
if hasattr(venv, 'envs'):
return venv.envs[0].render
elif hasattr(venv, 'venv'):
return get_render_func(venv.venv)
elif hasattr(venv, 'env'):
return get_render_func(venv.env)
return None |
def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
return binop_node_classes[operator](pos, operator=operator, operand1=operand1, operand2=operand2, inplace=inplace, **kwargs) |
_if_32bit
.parametrize('csr_container', CSR_CONTAINERS)
def test_countvectorizer_sort_features_64bit_sparse_indices(csr_container):
X = csr_container((5, 5), dtype=np.int64)
INDICES_DTYPE = np.int64
X.indices = X.indices.astype(INDICES_DTYPE)
X.indptr = X.indptr.astype(INDICES_DTYPE)
vocabulary = {'scikit-learn': 0, 'is': 1, 'great!': 2}
Xs = CountVectorizer()._sort_features(X, vocabulary)
assert (INDICES_DTYPE == Xs.indices.dtype) |
def conv2d_transpose(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
(kernel_h, kernel_w) = kernel_size
num_in_channels = inputs.get_shape()[(- 1)].value
kernel_shape = [kernel_h, kernel_w, num_output_channels, num_in_channels]
kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
(stride_h, stride_w) = stride
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if ((padding == 'VALID') and (dim_size is not None)):
dim_size += max((kernel_size - stride_size), 0)
return dim_size
batch_size = inputs.get_shape()[0].value
height = inputs.get_shape()[1].value
width = inputs.get_shape()[2].value
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = [batch_size, out_height, out_width, num_output_channels]
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, [1, stride_h, stride_w, 1], padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training, bn_decay=bn_decay, scope='bn')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 check datasets')
parser.add_argument('config', help='test config file path')
parser.add_argument('--options', nargs='+', action=DictAction, default={}, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function (deprecate), change to --eval-options instead.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, default={}, help="override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. For example, '--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument('--output-file', default='invalid-video.txt', help='Output file path which keeps corrupted/missing video file paths')
parser.add_argument('--split', default='train', choices=['train', 'val', 'test'], help='Dataset split')
parser.add_argument('--decoder', default='decord', choices=['decord', 'opencv', 'pyav'], help='Video decoder type, should be one of [decord, opencv, pyav]')
parser.add_argument('--num-processes', type=int, default=((cpu_count() - 1) or 1), help='Number of processes to check videos')
parser.add_argument('--remove-corrupted-videos', action='store_true', help='Whether to delete all corrupted videos')
args = parser.parse_args()
if (args.options and args.eval_options):
raise ValueError('--options and --eval-options cannot be both specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args |
((not have_sympy), 'SymPy not installed')
def test_conv4():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
e = (x ** y)
assert (e._sympy_() == (sympy.Symbol('x') ** sympy.Symbol('y')))
e = ((x + y) ** z)
assert (e._sympy_() == ((sympy.Symbol('x') + sympy.Symbol('y')) ** sympy.Symbol('z'))) |
class textSpace(gym.spaces.Space):
def contains(self, x) -> bool:
return isinstance(x, str) |
def read_segmentation(filename):
assert os.path.isfile(filename)
seg_to_verts = {}
with open(filename) as f:
data = json.load(f)
num_verts = len(data['segIndices'])
for i in range(num_verts):
seg_id = data['segIndices'][i]
if (seg_id in seg_to_verts):
seg_to_verts[seg_id].append(i)
else:
seg_to_verts[seg_id] = [i]
return (seg_to_verts, num_verts) |
class GradMixin():
def _resize(preprocess_function, image):
assert (image.shape[0] == 1), '`image` can contain one instance only.'
if (preprocess_function is None):
return image
y = image.to_numpy()
x = preprocess_function(image)
if (not isinstance(x, np.ndarray)):
try:
x = x.detach().cpu().numpy()
except:
x = x.numpy()
x = x.squeeze()
if (x.shape[0] == 3):
x = np.transpose(x, (1, 2, 0))
(min_a, max_a) = (np.min(y), np.max(y))
(min_b, max_b) = (np.min(x), np.max(x))
r = ((max_a - min_a) / ((max_b - min_b) + 1e-08))
return Image(data=(((r * x) + min_a) - (r * min_b)).astype(int), batched=False, channel_last=True) |
def modify_notebook(path: Path, config: dict) -> None:
notebook = path.read_text(encoding='utf-8')
if ('# quickrun' in notebook):
logger.warning('Already modified %s for quickrun', path.name)
return
for repl in config['replace']:
repl_from = '^(( *){})$'.format(repl['from'])
repl_to = '\\2{} # quickrun \\1'.format(repl['to'])
if (not re.search(repl_from, notebook, flags=re.MULTILINE)):
raise ValueError(("Quickrun replacement %r doesn't match file %s" % (repl['from'], path)))
notebook = re.sub(repl_from, repl_to, notebook, flags=re.MULTILINE)
path.write_text(notebook, encoding='utf-8')
logger.info('Modified %s', path.name) |
def add_toctree_functions(app, pagename, templatename, context, doctree):
from sphinx.environment.adapters.toctree import TocTree
def get_nav_object(maxdepth=None, collapse=True, numbered=False, **kwargs):
toctree = TocTree(app.env).get_toctree_for(pagename, app.builder, collapse=collapse, maxdepth=maxdepth, **kwargs)
if (toctree is None):
return []
toc_items = [item for child in toctree.children for item in child if isinstance(item, docutils.nodes.list_item)]
nav = [docutils_node_to_jinja(child, only_pages=True, numbered=numbered) for child in toc_items]
return nav
context['get_nav_object'] = get_nav_object |
class IfScope(ControlFlow):
sdfg: SDFG
branch_state: SDFGState
condition: CodeBlock
body: GeneralBlock
orelse: Optional[GeneralBlock] = None
def as_cpp(self, codegen, symbols) -> str:
condition_string = unparse_interstate_edge(self.condition.code[0], self.sdfg, codegen=codegen)
expr = f'''if ({condition_string}) {{
'''
expr += self.body.as_cpp(codegen, symbols)
expr += '\n}'
if self.orelse:
expr += ' else {\n'
expr += self.orelse.as_cpp(codegen, symbols)
expr += '\n}'
expr += '\n'
return expr
def first_state(self) -> SDFGState:
return self.branch_state
def children(self) -> List[ControlFlow]:
return ([self.body] + ([self.orelse] if self.orelse else [])) |
def test__detrend_signal_no_trend():
df = pd.DataFrame({'timestamp': range(5), 'value': ([0.0] * 5)})
expected_return = df.copy()
returned = benchmark._detrend_signal(df, 'value')
pd.testing.assert_frame_equal(returned, expected_return) |
class QObserverBenchmark(op_bench.TorchBenchmarkBase):
def init(self, C, M, N, dtype, qscheme, op_func, device):
self.f_input = torch.rand(C, M, N, device=device)
self.op_func = op_func(dtype=dtype, qscheme=qscheme).to(device)
def forward(self):
self.op_func(self.f_input)
self.op_func.calculate_qparams()
return |
class DetaModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TrainerSchool():
cfg: T.DictConfig
model: T.Module
def init_school(self) -> T.Module:
return stad.models.School()
def load_pretrained_model(self):
self.school.load_state_dict(torch.load(self.cfg.model.school.pretrained)) |
def get_ae(**model_cfg):
arch = model_cfg.pop('arch')
x_dim = model_cfg.pop('x_dim')
z_dim = model_cfg.pop('z_dim')
enc_cfg = model_cfg.pop('encoder')
dec_cfg = model_cfg.pop('decoder')
if (arch == 'ae'):
encoder = get_net(in_dim=x_dim, out_dim=z_dim, **enc_cfg)
decoder = get_net(in_dim=z_dim, out_dim=x_dim, **dec_cfg)
ae = AE(encoder, decoder)
elif (arch == 'dae'):
sig = model_cfg['sig']
noise_type = model_cfg['noise_type']
encoder = get_net(in_dim=x_dim, out_dim=z_dim, **enc_cfg)
decoder = get_net(in_dim=z_dim, out_dim=x_dim, **dec_cfg)
ae = DAE(encoder, decoder, sig=sig, noise_type=noise_type)
elif (arch == 'wae'):
encoder = get_net(in_dim=x_dim, out_dim=z_dim, **enc_cfg)
decoder = get_net(in_dim=z_dim, out_dim=x_dim, **dec_cfg)
ae = WAE(encoder, decoder, **model_cfg)
elif (arch == 'vae'):
sigma_trainable = model_cfg.get('sigma_trainable', False)
encoder = get_net(in_dim=x_dim, out_dim=(z_dim * 2), **enc_cfg)
decoder = get_net(in_dim=z_dim, out_dim=x_dim, **dec_cfg)
ae = VAE(encoder, decoder, **model_cfg)
return ae |
def all_but(train: list[Example], x: Example) -> list[Example]:
output = [y for y in train if (not set.intersection(set((x.get('history', []) + [x.question])), set((y.get('history', []) + [y.question]))))]
return output |
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None, features='S', data_path='ETTh1.csv', target='OT', scale=True, timeenc=0, freq='h'):
if (size == None):
self.seq_len = ((24 * 4) * 4)
self.label_len = (24 * 4)
self.pred_len = (24 * 4)
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
assert (flag in ['train', 'test', 'val'])
type_map = {'train': 0, 'val': 1, 'test': 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
border1s = [0, (((12 * 30) * 24) - self.seq_len), ((((12 * 30) * 24) + ((4 * 30) * 24)) - self.seq_len)]
border2s = [((12 * 30) * 24), (((12 * 30) * 24) + ((4 * 30) * 24)), (((12 * 30) * 24) + ((8 * 30) * 24))]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if ((self.features == 'M') or (self.features == 'MS')):
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif (self.features == 'S'):
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if (self.timeenc == 0):
df_stamp['month'] = df_stamp.date.apply((lambda row: row.month), 1)
df_stamp['day'] = df_stamp.date.apply((lambda row: row.day), 1)
df_stamp['weekday'] = df_stamp.date.apply((lambda row: row.weekday()), 1)
df_stamp['hour'] = df_stamp.date.apply((lambda row: row.hour), 1)
data_stamp = df_stamp.drop(['date'], 1).values
elif (self.timeenc == 1):
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = (s_begin + self.seq_len)
r_begin = (s_end - self.label_len)
r_end = ((r_begin + self.label_len) + self.pred_len)
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return (seq_x, seq_y, seq_x_mark, seq_y_mark)
def __len__(self):
return (((len(self.data_x) - self.seq_len) - self.pred_len) + 1)
def inverse_transform(self, data):
return self.scaler.inverse_transform(data) |
class AmbientSpace(ambient_space.AmbientSpace):
def dimension(self):
return self.root_system.cartan_type().rank()
def root(self, i, j, p1, p2):
if (i != j):
return ((((- 1) ** p1) * self.monomial(i)) + (((- 1) ** p2) * self.monomial(j)))
return (((- 1) ** p1) * self.monomial(i))
def simple_root(self, i):
if (i not in self.index_set()):
raise ValueError('{} is not in the index set'.format(i))
return (self.root((i - 1), i, 0, 1) if (i < self.n) else self.root((self.n - 2), (self.n - 1), 0, 0))
def positive_roots(self):
res = []
for p in [0, 1]:
for j in range(self.n):
res.extend([self.root(i, j, 0, p) for i in range(j)])
return res
def negative_roots(self):
res = []
for p in [0, 1]:
for j in range(self.n):
res.extend([self.root(i, j, 1, p) for i in range(j)])
return res
def fundamental_weight(self, i):
if (i not in self.index_set()):
raise ValueError('{} is not in the index set'.format(i))
n = self.dimension()
if (i == n):
return (self.sum((self.monomial(j) for j in range(n))) / 2)
elif (i == (n - 1)):
return ((self.sum((self.monomial(j) for j in range((n - 1)))) - self.monomial((n - 1))) / 2)
else:
return self.sum((self.monomial(j) for j in range(i))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.