code stringlengths 101 5.91M |
|---|
class FreeModule_submodule_with_basis_pid(FreeModule_generic_pid):
def __init__(self, ambient, basis, check=True, echelonize=False, echelonized_basis=None, already_echelonized=False, category=None):
if (not isinstance(ambient, FreeModule_ambient_pid)):
raise TypeError(('ambient (=%s) must be ambient.' % ambient))
self.__ambient_module = ambient
R = ambient.base_ring()
R_coord = R
try:
basis = [ambient(x) for x in basis]
except TypeError:
V = ambient.ambient_vector_space()
R_coord = V.base_ring()
try:
basis = [V(x) for x in basis]
except TypeError:
raise TypeError('each element of basis must be in the ambient vector space')
if (echelonize and (not already_echelonized)):
basis = self._echelonized_basis(ambient, basis)
from sage.categories.modules_with_basis import ModulesWithBasis
modules_category = ModulesWithBasis(R.category()).FiniteDimensional()
try:
if (R.is_finite() or (len(basis) == 0)):
modules_category = modules_category.Enumerated().Finite()
except (ValueError, TypeError, AttributeError, NotImplementedError):
pass
modules_category = modules_category.Subobjects()
category = modules_category.or_subcategory(category, join=True)
FreeModule_generic_pid.__init__(self, base_ring=R, coordinate_ring=R_coord, rank=len(basis), degree=ambient.degree(), sparse=ambient.is_sparse(), category=category)
C = self.element_class
w = [C(self, x.list(), coerce=False, copy=False) for x in basis]
self.__basis = basis_seq(self, w)
if (echelonize or already_echelonized):
self.__echelonized_basis = self.__basis
else:
if (echelonized_basis is None):
echelonized_basis = self._echelonized_basis(ambient, basis)
w = [C(self, x.list(), coerce=False, copy=True) for x in echelonized_basis]
self.__echelonized_basis = basis_seq(self, w)
if (check and (len(basis) != len(self.__echelonized_basis))):
raise ValueError('The given basis vectors must be linearly independent.')
def __hash__(self):
return hash(self.__basis)
def _echelon_matrix_richcmp(self, other, op):
if (self is other):
return rich_to_bool(op, 0)
if (not isinstance(other, FreeModule_generic)):
return NotImplemented
lx = self.ambient_vector_space()
rx = other.ambient_vector_space()
if (lx != rx):
return lx._echelon_matrix_richcmp(rx, op)
lx = self.dimension()
rx = other.dimension()
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
lx = self.base_ring()
rx = other.base_ring()
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
return richcmp(self.echelonized_basis_matrix(), other.echelonized_basis_matrix(), op)
def construction(self):
from sage.categories.pushout import SubspaceFunctor
return (SubspaceFunctor(self.basis()), self.ambient_module())
def echelonized_basis_matrix(self):
try:
return self.__echelonized_basis_matrix
except AttributeError:
pass
self._echelonized_basis(self.ambient_module(), self.__basis)
return self.__echelonized_basis_matrix
def _echelonized_basis(self, ambient, basis):
d = self._denominator(basis)
MAT = sage.matrix.matrix_space.MatrixSpace(ambient.base_ring(), len(basis), ambient.degree(), sparse=ambient.is_sparse())
if (d != 1):
basis = [(x * d) for x in basis]
A = MAT(basis)
E = A.echelon_form()
if (d != 1):
E = (E.matrix_over_field() * (~ d))
r = E.rank()
if (r < E.nrows()):
E = E.matrix_from_rows(range(r))
self.__echelonized_basis_matrix = E
return E.rows()
def _denominator(self, B):
if (not B):
return 1
d = B[0].denominator()
from sage.arith.functions import lcm
for x in B[1:]:
d = lcm(d, x.denominator())
return d
def _repr_(self):
if self.is_sparse():
s = (('Sparse free module of degree %s and rank %s over %s\n' % (self.degree(), self.rank(), self.base_ring())) + ('User basis matrix:\n%r' % self.basis_matrix()))
else:
s = (('Free module of degree %s and rank %s over %s\n' % (self.degree(), self.rank(), self.base_ring())) + ('User basis matrix:\n%r' % self.basis_matrix()))
return s
def _latex_(self):
return ('\\mathrm{RowSpan}_{%s}%s' % (latex.latex(self.base_ring()), latex.latex(self.basis_matrix())))
def ambient_module(self):
return self.__ambient_module
def ambient(self):
if (self.base_ring() == self.coordinate_ring()):
return self.ambient_module()
else:
return self.ambient_vector_space()
_attribute
def lift(self):
ambient = self.ambient()
return self.module_morphism(function=ambient, codomain=ambient)
_attribute
def retract(self):
return self.ambient().module_morphism(function=self, codomain=self)
def relations(self):
return self.__ambient_module.relations()
def echelon_coordinates(self, v, check=True):
if (not isinstance(v, free_module_element.FreeModuleElement)):
v = self.ambient_vector_space()(v)
elif (v.degree() != self.degree()):
raise ArithmeticError('vector is not in free module')
E = self.echelonized_basis_matrix()
P = E.pivots()
w = v.list_from_positions(P)
T = self._rref_to_echelon_matrix()
x = T.linear_combination_of_rows(w).list(copy=False)
if (not check):
return x
if (v.parent() is self):
return x
lc = E.linear_combination_of_rows(x)
if (list(lc) != list(v)):
raise ArithmeticError('vector is not in free module')
return x
def user_to_echelon_matrix(self):
try:
return self.__user_to_echelon_matrix
except AttributeError:
if self.base_ring().is_field():
self.__user_to_echelon_matrix = self._user_to_rref_matrix()
else:
rows = sum([self.echelon_coordinates(b, check=False) for b in self.basis()], [])
M = sage.matrix.matrix_space.MatrixSpace(self.base_ring().fraction_field(), self.dimension(), sparse=self.is_sparse())
self.__user_to_echelon_matrix = M(rows)
return self.__user_to_echelon_matrix
def echelon_to_user_matrix(self):
try:
return self.__echelon_to_user_matrix
except AttributeError:
self.__echelon_to_user_matrix = (~ self.user_to_echelon_matrix())
return self.__echelon_to_user_matrix
def _user_to_rref_matrix(self):
try:
return self.__user_to_rref_matrix
except AttributeError:
A = self.basis_matrix()
P = self.echelonized_basis_matrix().pivots()
T = A.matrix_from_columns(P)
self.__user_to_rref_matrix = T
return self.__user_to_rref_matrix
def _rref_to_user_matrix(self):
try:
return self.__rref_to_user_matrix
except AttributeError:
self.__rref_to_user_matrix = (~ self._user_to_rref_matrix())
return self.__rref_to_user_matrix
def _echelon_to_rref_matrix(self):
try:
return self.__echelon_to_rref_matrix
except AttributeError:
A = self.echelonized_basis_matrix()
T = A.matrix_from_columns(A.pivots())
self.__echelon_to_rref_matrix = T
return self.__echelon_to_rref_matrix
def _rref_to_echelon_matrix(self):
try:
return self.__rref_to_echelon_matrix
except AttributeError:
self.__rref_to_echelon_matrix = (~ self._echelon_to_rref_matrix())
return self.__rref_to_echelon_matrix
def vector_space(self, base_field=None):
if (base_field is None):
V = self.ambient_vector_space()
return V.submodule_with_basis(self.basis())
return self.change_ring(base_field)
def ambient_vector_space(self):
return self.ambient_module().ambient_vector_space()
def basis(self):
return self.__basis
def change_ring(self, R):
if (self.base_ring() is R):
return self
if (R not in PrincipalIdealDomains()):
raise TypeError(('the new ring %r should be a principal ideal domain' % R))
K = R.fraction_field()
V = VectorSpace(K, self.degree())
B = [V(b) for b in self.basis()]
M = self.ambient_module().change_ring(R)
if self.has_user_basis():
return M.span_of_basis(B)
else:
return M.span(B)
def coordinate_vector(self, v, check=True):
w = self.echelon_coordinate_vector(v, check=check)
T = self.echelon_to_user_matrix()
return T.linear_combination_of_rows(w)
def echelonized_basis(self):
return self.__echelonized_basis
def echelon_coordinate_vector(self, v, check=True):
return FreeModule(self.base_ring().fraction_field(), self.rank())(self.echelon_coordinates(v, check=check))
def has_user_basis(self):
return True
def linear_combination_of_basis(self, v):
R = self.base_ring()
check = ((not R.is_field()) and any(((a not in R) for a in list(v))))
return self(self.basis_matrix().linear_combination_of_rows(v), check=check, copy=False, coerce=False) |
def test_load_arff_from_gzip_file_error_parser():
err_msg = "Unknown parser: 'xxx'. Should be 'liac-arff' or 'pandas'"
with pytest.raises(ValueError, match=err_msg):
load_arff_from_gzip_file('xxx', 'xxx', 'xxx', 'xxx', 'xxx', 'xxx') |
def create_instance_layout(state) -> html.Div:
if (state.instances is not None):
figure = plot_one_instance(state.instances, state.get_display_instance('local'))
return html.Div(id='info_card', children=[html.B('Query Instance'), html.Hr(), html.Center(id='instance_table', children=figure)])
else:
return html.Div() |
def gen_restore_seq_with_ratio(device, cl_setting, abbrs, lm_model, ratio, use_gt=False, with_token=True, tr_perc=None, setting_idx=1, **kwargs):
root_dir = kwargs.get('root_dir', '/Users/stan')
print(f'ROOT={root_dir}')
print(f'DEVICE={device}')
tmpl = 'if [ ! -f "{}" ] ; then \n CUDA_VISIBLE_DEVICES=$DEVICE mmf_run config={} \\\n model=unicl \\\n dataset=clvqa \\\n training.CL.use_cl=True \\\n training.CL.use_callback=False \\\n training.CL.use_replay=True \\\n training.CL.replay_method=restore_with_prob \\\n training.CL.task_order={} \\\n training.CL.restore_rate={} \\\n training.CL.restore_dir=$ROOT/exp/clvqa/QAG_seq/{}/QAG_{}_{}/{}_replay/{}_{}_{} \\\n training.CL.restore_paths={} \\\n dataset_config.clvqa.use_mask_img=True \\\n dataset_config.clvqa.mask_img_prob=0.15 \\\n run_type=train_val \\\n checkpoint.resume_file={} \\\n env.save_dir={} \\\n training.checkpoint_interval=4000 \\\n training.callbacks=[] \nfi \n'
arrvs = abbrs
stages = ABBR2TASKList(cl_setting=cl_setting, abbr_seq=abbrs)
token_append = ('task_token' if with_token else 'wo_token')
if (tr_perc is not None):
token_append += f'_tr{tr_perc}'
use_gt_append = ('use_gt' if use_gt else 'not_use_gt')
for (idx, stage) in enumerate(stages):
if (idx == 0):
continue
stand_alone_dir = f'$ROOT/exp/clvqa/save/stand_alone/{cl_setting}'
follow_dir = f'$ROOT/exp/clvqa/save/{cl_setting}'
resume_file = ('{}/unicl_{}/unicl_final.pth'.format(stand_alone_dir, stages[(idx - 1)]) if (idx == 1) else '{}/setting_{}_{}/{}_replay_qag_seq_{}_{}_{}/unicl_{}/unicl_final.pth'.format(follow_dir, setting_idx, arrvs, lm_model, use_gt_append, token_append, ratio, stages[(idx - 1)]))
restore_paths = ','.join(['{}_REPLAY[{}]_AT[{}].npy'.format(arrvs, arrvs[i], arrvs[idx]) for i in range(idx)])
final_model_pth = '$ROOT/exp/clvqa/save/{}/setting_{}_{}/{}_replay_qag_seq_{}_{}_{}/unicl_{}/unicl_final.pth'.format(cl_setting, setting_idx, arrvs, lm_model, use_gt_append, token_append, ratio, stage)
config_pth = 'EXP_CONFIG/{}/cl_{}_unicl_standalone.yaml'.format(cl_setting, stage)
save_dir = '$ROOT/exp/clvqa/save/{}/setting_{}_{}/{}_replay_qag_seq_{}_{}_{}/unicl_{}'.format(cl_setting, setting_idx, arrvs, lm_model, use_gt_append, token_append, ratio, stage)
run_script = tmpl.format(final_model_pth, config_pth, arrvs, ratio, use_gt_append, cl_setting, token_append, lm_model, lm_model, cl_setting, arrvs, restore_paths, resume_file, save_dir)
print('{}\n\n'.format(run_script))
exp_name = '{}_replay_qag_seq_{}_{}_{}'.format(lm_model, use_gt_append, token_append, ratio)
export_f = None
if kwargs.get('log', False):
export_f = '> $ROOT/results/{}_run_{}_S{}.txt'.format(cl_setting[0].upper(), exp_name, setting_idx)
else:
export_f = ''
print('python -c \'from eval_os import *; stage_sweep(cl_setting="{}", setting_idx={}, abbr_seq="{}", device="\'${{DEVICE}}\'", model_name="unicl", save_dir="\'${{ROOT}}\'/exp/clvqa", val_exp="{}", test_stand_alone=False, test_reg=False)\' {}'.format(cl_setting, setting_idx, arrvs, exp_name, export_f)) |
class StepParamScheduler(ParamScheduler):
def __init__(self, num_updates: Union[(int, float)], values: List[float]) -> None:
if (num_updates <= 0):
raise ValueError('Number of updates must be larger than 0')
if (not (isinstance(values, Sequence) and (len(values) > 0))):
raise ValueError('Step scheduler requires a list of at least one param value')
self._param_schedule = values
def __call__(self, where: float) -> float:
ind = int(((where + self.WHERE_EPSILON) * len(self._param_schedule)))
return self._param_schedule[ind] |
def get_global_knowledge(args):
global_knowledge = ''
if (args.shared_knowledge_file is not None):
with open(args.shared_knowledge_file, 'r') as f:
global_knowledge = f.read()
global_knowledge = knowledge_parser(global_knowledge)
return global_knowledge |
class DeHazeDatasetFromFolderTest(data.Dataset):
def __init__(self, image_dir, nFrames, upscale_factor, file_list, other_dataset, future_frame, transform=None):
super(DeHazeDatasetFromFolderTest, self).__init__()
self.nFrames = nFrames
self.upscale_factor = upscale_factor
self.transform = transforms.Compose([transforms.ToTensor()])
self.other_dataset = other_dataset
self.future_frame = future_frame
self.image_num = 0
self.index_compute = []
self.image_filenames = [join(image_dir, x) for x in os.listdir(image_dir)]
self.image_filenames = sorted(self.image_filenames)
image_num = 0
for i in range(len(self.image_filenames)):
image_list = os.listdir(self.image_filenames[i])
for img in image_list:
if (img.endswith('jpg') and ('rs' not in img)):
image_num += 1
image_num = ((image_num - self.nFrames) + 1)
self.index_compute.append(image_num)
self.image_num = self.index_compute[(- 1)]
def __getitem__(self, index):
file_id = 0
index = (index + 1)
for i in range(len(self.index_compute)):
if (self.index_compute[i] >= index):
file_id = i
break
img_id = (index if (file_id == 0) else (index - int(self.index_compute[(file_id - 1)])))
if (not os.path.exists(('Results/VIWSNET_REVIDE/' + str((int(file_id) + 1)).zfill(2)))):
os.makedirs(('Results/VIWSNET_REVIDE/' + str((int(file_id) + 1)).zfill(2)))
if self.future_frame:
(target, neigbor) = load_img_future_de_haze_revide(self.image_filenames[file_id], self.nFrames, img_id, phase='test')
file = (((str((int(file_id) + 1)).zfill(2) + '/recover') + str((img_id + int((self.nFrames / 2))))) + '.png')
if self.transform:
neigbor = [self.transform(j) for j in neigbor]
neigbors = torch.stack(neigbor, 0)
return (neigbors, file)
def __len__(self):
return self.image_num |
class LayerNorm(nn.Module):
def __init__(self, size, eps=1e-06):
super(LayerNorm, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(size))
self.b_2 = nn.Parameter(torch.zeros(size))
def forward(self, x):
mean = x.mean((- 1), keepdim=True)
std = x.std((- 1), keepdim=True)
return (((self.a_2 * (x - mean)) / (std + self.eps)) + self.b_2) |
.parametrize('ctx, solver_name', ctxs)
.parametrize('decay', [0.0001])
.parametrize('lr', [0.1, 0.001])
.parametrize('momentum', [0.9, 0.5])
.parametrize('coefficient', [0.001])
.parametrize('eps', [1e-08])
.parametrize('seed', [313])
def test_lars(seed, lr, momentum, coefficient, decay, eps, ctx, solver_name):
rng = np.random.RandomState(seed)
solver_tester(rng, S.Lars, RefLars, [lr, momentum, coefficient, eps], atol=1e-06, decay=decay, ctx=ctx, solver_name=solver_name) |
def instance_builder(toposort: List[BaseNode], wrapper: Callable=None) -> Dict[(BaseNode, Layer)]:
nodes_dict = dict()
for n in toposort:
if (not n.reuse):
keras_node = node_builder(n)
if (wrapper is not None):
keras_node = wrapper(n, keras_node)
nodes_dict.update({n: keras_node})
return nodes_dict |
class BeatREMI(BaseEventREMI):
def __init__(self, is_bar, bar, position, start_time, duration):
super().__init__('beat', bar, position)
self.is_bar = is_bar
self.start_time = start_time
self.duration = duration
self.segment_tag = None
def __repr__(self):
return '[Beat]\n -- is_bar: {}\n -- {}\n -- {}\n'.format(self.is_bar, self.pos_remi, self.segment_tag)
def get_tempo(self, tempo_cls_bound, tempo_bins):
self.tempo_cls = (- 1)
tempo = ((1.0 / self.duration) * 60)
for (i, cl) in enumerate(tempo_cls_bound):
if (tempo >= cl):
self.tempo_cls = i
self.tempo_cls = clip_val(self.tempo_cls, 0, (len(tempo_cls_bound) - 2))
self.tempo_bin = np.abs((tempo_bins - tempo)).argmin()
def patch_segment_tag(self, end_seg='', start_seg=''):
(part_start, part_end, rep_start, rep_end) = ('', '', '', '')
(end_seg, start_seg) = (end_seg.replace("'", ''), start_seg.replace("'", ''))
if end_seg:
rep_end = (end_seg[1] if (len(end_seg) > 1) else '1')
part_end = end_seg[0]
if start_seg:
rep_start = (start_seg[1] if (len(start_seg) > 1) else '1')
part_start = start_seg[0]
self.segment_tag = SegmentTag(part_end, rep_end, part_start, rep_start) |
class AbsoluteValue(Function):
node_type = 'goos.function.abs'
def __init__(self, fun: Function) -> None:
super().__init__(fun)
def eval(self, input_vals: List[goos.NumericFlow]) -> goos.NumericFlow:
val = copy.deepcopy(input_vals[0])
val.array = np.abs(val.array)
return val
def grad(self, input_vals: List[goos.NumericFlow], grad_val: goos.NumericFlow.Grad) -> List[goos.NumericFlow.Grad]:
grad = ((np.conj(input_vals[0].array) / np.abs(input_vals[0].array)) * grad_val.array_grad)
if np.iscomplexobj(input_vals[0].array):
grad /= 2
grad_val = copy.deepcopy(grad_val)
grad_val.array_grad = grad
return [grad_val] |
class AutoTokenizer():
def __init__(self):
raise EnvironmentError('AutoTokenizer is designed to be instantiated using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method.')
_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
config = kwargs.pop('config', None)
kwargs['_from_auto'] = True
use_fast = kwargs.pop('use_fast', True)
tokenizer_type = kwargs.pop('tokenizer_type', None)
trust_remote_code = kwargs.pop('trust_remote_code', False)
if (tokenizer_type is not None):
tokenizer_class = None
tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None)
if (tokenizer_class_tuple is None):
raise ValueError(f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of {', '.join((c for c in TOKENIZER_MAPPING_NAMES.keys()))}.")
(tokenizer_class_name, tokenizer_fast_class_name) = tokenizer_class_tuple
if (use_fast and (tokenizer_fast_class_name is not None)):
tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name)
if (tokenizer_class is None):
tokenizer_class = tokenizer_class_from_name(tokenizer_class_name)
if (tokenizer_class is None):
raise ValueError(f'Tokenizer class {tokenizer_class_name} is not currently imported.')
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
config_tokenizer_class = tokenizer_config.get('tokenizer_class')
tokenizer_auto_map = tokenizer_config.get('auto_map')
if (config_tokenizer_class is None):
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
config_tokenizer_class = config.tokenizer_class
if (hasattr(config, 'auto_map') and ('AutoTokenizer' in config.auto_map)):
tokenizer_auto_map = config.auto_map['AutoTokenizer']
if (config_tokenizer_class is not None):
tokenizer_class = None
if (tokenizer_auto_map is not None):
if (not trust_remote_code):
raise ValueError(f'Loading {pretrained_model_name_or_path} requires you to execute the tokenizer file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
if (kwargs.get('revision', None) is None):
logger.warn('Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision.')
if (use_fast and (tokenizer_auto_map[1] is not None)):
class_ref = tokenizer_auto_map[1]
else:
class_ref = tokenizer_auto_map[0]
(module_file, class_name) = class_ref.split('.')
tokenizer_class = get_class_from_dynamic_module(pretrained_model_name_or_path, (module_file + '.py'), class_name, **kwargs)
elif (use_fast and (not config_tokenizer_class.endswith('Fast'))):
tokenizer_class_candidate = f'{config_tokenizer_class}Fast'
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if (tokenizer_class is None):
tokenizer_class_candidate = config_tokenizer_class
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if (tokenizer_class is None):
raise ValueError(f'Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported.')
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if isinstance(config, EncoderDecoderConfig):
if (type(config.decoder) is not type(config.encoder)):
logger.warning(f'The encoder model config class: {config.encoder.__class__} is different from the decoder model config class: {config.decoder.__class__}. It is not recommended to use the `AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder specific tokenizer classes.')
config = config.encoder
model_type = config_class_to_model_type(type(config).__name__)
if (model_type is not None):
(tokenizer_class_py, tokenizer_class_fast) = TOKENIZER_MAPPING[type(config)]
if (tokenizer_class_fast and (use_fast or (tokenizer_class_py is None))):
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif (tokenizer_class_py is not None):
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError('This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed in order to use this tokenizer.')
raise ValueError(f'''Unrecognized configuration class {config.__class__} to build an AutoTokenizer.
Model type should be one of {', '.join((c.__name__ for c in TOKENIZER_MAPPING.keys()))}.''')
def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None):
if ((slow_tokenizer_class is None) and (fast_tokenizer_class is None)):
raise ValueError('You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class')
if ((slow_tokenizer_class is not None) and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast)):
raise ValueError('You passed a fast tokenizer in the `slow_tokenizer_class`.')
if ((fast_tokenizer_class is not None) and issubclass(fast_tokenizer_class, PreTrainedTokenizer)):
raise ValueError('You passed a slow tokenizer in the `fast_tokenizer_class`.')
if ((slow_tokenizer_class is not None) and (fast_tokenizer_class is not None) and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast) and (fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class)):
raise ValueError(f'The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not consistent with the slow tokenizer class you passed (fast tokenizer has {fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those so they match!')
if (config_class in TOKENIZER_MAPPING._extra_content):
(existing_slow, existing_fast) = TOKENIZER_MAPPING[config_class]
if (slow_tokenizer_class is None):
slow_tokenizer_class = existing_slow
if (fast_tokenizer_class is None):
fast_tokenizer_class = existing_fast
TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class)) |
def main():
for (filt, st, nt) in itertools.product(('none', 'contains-hole'), ('cov-xent', 'cov-examples'), (10, 20, 40, 80)):
steps = list(range(100, 2600, 100))
args = "{{filt: '{filt}', st: '{st}', nt: {nt}}}".format(filt=filt, st=st, nt=nt)
logdir = os.path.join('logdirs/-hs-allmatches-anysplit-multimean', 'filt-{filt}_st-{st}_nt-{nt}'.format(filt=filt, st=st, nt=nt))
for step in steps:
if (not os.path.exists(os.path.join(logdir, 'model_checkpoint-{:08d}'.format(step)))):
continue
if os.path.exists(os.path.join(logdir, 'infer-val-step{:05d}-bs1.jsonl'.format(step))):
continue
infer_command = (('python infer.py --config configs/hearthstone-idioms/nl2code-0201-allmatches-anysplit-multimean.jsonnet --logdir {logdir} --config-args "{args}" ' + '--output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(logdir=logdir, args=args, step=step)
print(infer_command) |
class Conv1DWithMasking(Conv1D):
def __init__(self, **kwargs):
self.supports_masking = True
super(Conv1DWithMasking, self).__init__(**kwargs)
def compute_mask(self, x, mask):
return mask |
class CartoonGAN(object):
def __init__(self, sess, args):
self.model_name = 'CartoonGAN'
self.sess = sess
self.checkpoint_dir = args.checkpoint_dir
self.result_dir = args.result_dir
self.log_dir = args.log_dir
self.dataset_name = args.dataset
self.augment_flag = args.augment_flag
self.epoch = args.epoch
self.init_epoch = args.init_epoch
self.iteration = args.iteration
self.decay_flag = args.decay_flag
self.decay_epoch = args.decay_epoch
self.gan_type = args.gan_type
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.init_lr = args.lr
self.ch = args.ch
self.adv_weight = args.adv_weight
self.vgg_weight = args.vgg_weight
self.ld = args.ld
self.n_res = args.n_res
self.n_dis = args.n_dis
self.n_critic = args.n_critic
self.sn = args.sn
self.img_size = args.img_size
self.img_ch = args.img_ch
self.sample_dir = os.path.join(args.sample_dir, self.model_dir)
check_folder(self.sample_dir)
self.trainA_dataset = glob('./dataset/{}/*.*'.format((self.dataset_name + '/trainA')))
self.trainB_dataset = glob('./dataset/{}/*.*'.format((self.dataset_name + '/trainB')))
self.trainB_smooth_dataset = glob('./dataset/{}/*.*'.format((self.dataset_name + '/trainB_smooth')))
self.dataset_num = max(len(self.trainA_dataset), len(self.trainB_dataset))
print()
print('##### Information #####')
print('# gan type : ', self.gan_type)
print('# dataset : ', self.dataset_name)
print('# max dataset number : ', self.dataset_num)
print('# batch_size : ', self.batch_size)
print('# epoch : ', self.epoch)
print('# init_epoch : ', self.init_epoch)
print('# iteration per epoch : ', self.iteration)
print()
print('##### Generator #####')
print('# residual blocks : ', self.n_res)
print()
print('##### Discriminator #####')
print('# the number of discriminator layer : ', self.n_dis)
print('# the number of critic : ', self.n_critic)
print('# spectral normalization : ', self.sn)
print()
def generator(self, x_init, reuse=False, scope='generator'):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse):
x = conv(x_init, channel, kernel=7, stride=1, pad=3, pad_type='reflect', use_bias=False, scope='conv')
x = instance_norm(x, scope='ins_norm')
x = relu(x)
for i in range(2):
x = conv(x, (channel * 2), kernel=3, stride=2, pad=1, use_bias=False, scope=('conv_s2_' + str(i)))
x = conv(x, (channel * 2), kernel=3, stride=1, pad=1, use_bias=False, scope=('conv_s1_' + str(i)))
x = instance_norm(x, scope=('ins_norm_' + str(i)))
x = relu(x)
channel = (channel * 2)
for i in range(self.n_res):
x = resblock(x, channel, use_bias=False, scope=('resblock_' + str(i)))
for i in range(2):
x = deconv(x, (channel // 2), kernel=3, stride=2, use_bias=False, scope=('deconv_' + str(i)))
x = conv(x, (channel // 2), kernel=3, stride=1, pad=1, use_bias=False, scope=('up_conv_' + str(i)))
x = instance_norm(x, scope=('up_ins_norm_' + str(i)))
x = relu(x)
channel = (channel // 2)
x = conv(x, channels=self.img_ch, kernel=7, stride=1, pad=3, pad_type='reflect', use_bias=False, scope='G_logit')
x = tanh(x)
return x
def discriminator(self, x_init, reuse=False, scope='discriminator'):
channel = (self.ch // 2)
with tf.variable_scope(scope, reuse=reuse):
x = conv(x_init, channel, kernel=3, stride=1, pad=1, use_bias=False, sn=self.sn, scope='conv_0')
x = lrelu(x, 0.2)
for i in range(1, self.n_dis):
x = conv(x, (channel * 2), kernel=3, stride=2, pad=1, use_bias=False, sn=self.sn, scope=('conv_s2_' + str(i)))
x = lrelu(x, 0.2)
x = conv(x, (channel * 4), kernel=3, stride=1, pad=1, use_bias=False, sn=self.sn, scope=('conv_s1_' + str(i)))
x = instance_norm(x, scope=('ins_norm_' + str(i)))
x = lrelu(x, 0.2)
channel = (channel * 2)
x = conv(x, (channel * 2), kernel=3, stride=1, pad=1, use_bias=False, sn=self.sn, scope='last_conv')
x = instance_norm(x, scope='last_ins_norm')
x = lrelu(x, 0.2)
x = conv(x, channels=1, kernel=3, stride=1, pad=1, use_bias=False, sn=self.sn, scope='D_logit')
return x
def gradient_panalty(self, real, fake, scope='discriminator'):
if self.gan_type.__contains__('dragan'):
eps = tf.random_uniform(shape=tf.shape(real), minval=0.0, maxval=1.0)
(_, x_var) = tf.nn.moments(real, axes=[0, 1, 2, 3])
x_std = tf.sqrt(x_var)
fake = (real + ((0.5 * x_std) * eps))
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0.0, maxval=1.0)
interpolated = (real + (alpha * (fake - real)))
logit = self.discriminator(interpolated, reuse=True, scope=scope)
grad = tf.gradients(logit, interpolated)[0]
grad_norm = tf.norm(flatten(grad), axis=1)
GP = 0
if self.gan_type.__contains__('lp'):
GP = (self.ld * tf.reduce_mean(tf.square(tf.maximum(0.0, (grad_norm - 1.0)))))
elif (self.gan_type.__contains__('gp') or (self.gan_type == 'dragan')):
GP = (self.ld * tf.reduce_mean(tf.square((grad_norm - 1.0))))
return GP
def build_model(self):
self.lr = tf.placeholder(tf.float32, name='learning_rate')
Image_Data_Class = ImageData(self.img_size, self.img_ch, self.augment_flag)
trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset)
trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset)
trainB_smooth = tf.data.Dataset.from_tensor_slices(self.trainB_smooth_dataset)
gpu_device = '/gpu:0'
trainA = trainA.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
trainB = trainB.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
trainB_smooth = trainB_smooth.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))
trainA_iterator = trainA.make_one_shot_iterator()
trainB_iterator = trainB.make_one_shot_iterator()
trainB_smooth_iterator = trainB_smooth.make_one_shot_iterator()
self.real_A = trainA_iterator.get_next()
self.real_B = trainB_iterator.get_next()
self.real_B_smooth = trainB_smooth_iterator.get_next()
self.test_real_A = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='test_real_A')
self.fake_B = self.generator(self.real_A)
real_B_logit = self.discriminator(self.real_B)
fake_B_logit = self.discriminator(self.fake_B, reuse=True)
real_B_smooth_logit = self.discriminator(self.real_B_smooth, reuse=True)
if (self.gan_type.__contains__('gp') or self.gan_type.__contains__('lp') or self.gan_type.__contains__('dragan')):
GP = (self.gradient_panalty(real=self.real_B, fake=self.fake_B) + self.gradient_panalty(self.real_B, fake=self.real_B_smooth))
else:
GP = 0.0
v_loss = (self.vgg_weight * vgg_loss(self.real_A, self.fake_B))
g_loss = (self.adv_weight * generator_loss(self.gan_type, fake_B_logit))
d_loss = ((self.adv_weight * discriminator_loss(self.gan_type, real_B_logit, fake_B_logit, real_B_smooth_logit)) + GP)
self.Vgg_loss = v_loss
self.Generator_loss = (g_loss + v_loss)
self.Discriminator_loss = d_loss
self.test_fake_B = self.generator(self.test_real_A, reuse=True)
t_vars = tf.trainable_variables()
G_vars = [var for var in t_vars if ('generator' in var.name)]
D_vars = [var for var in t_vars if ('discriminator' in var.name)]
self.init_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Vgg_loss, var_list=G_vars)
self.G_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Generator_loss, var_list=G_vars)
self.D_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss, var_list=D_vars)
self.G_loss = tf.summary.scalar('Generator_loss', self.Generator_loss)
self.D_loss = tf.summary.scalar('Discriminator_loss', self.Discriminator_loss)
self.G_gan = tf.summary.scalar('G_gan', g_loss)
self.G_vgg = tf.summary.scalar('G_vgg', v_loss)
self.V_loss_merge = tf.summary.merge([self.G_vgg])
self.G_loss_merge = tf.summary.merge([self.G_loss, self.G_gan, self.G_vgg])
self.D_loss_merge = tf.summary.merge([self.D_loss])
def train(self):
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
self.writer = tf.summary.FileWriter(((self.log_dir + '/') + self.model_dir), self.sess.graph)
(could_load, checkpoint_counter) = self.load(self.checkpoint_dir)
if could_load:
start_epoch = int((checkpoint_counter / self.iteration))
start_batch_id = (checkpoint_counter - (start_epoch * self.iteration))
counter = checkpoint_counter
print(' [*] Load SUCCESS')
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(' [!] Load failed...')
start_time = time.time()
past_g_loss = (- 1.0)
lr = self.init_lr
for epoch in range(start_epoch, self.epoch):
if self.decay_flag:
lr = (self.init_lr * pow(0.5, (epoch // self.decay_epoch)))
for idx in range(start_batch_id, self.iteration):
train_feed_dict = {self.lr: lr}
if (epoch < self.init_epoch):
(real_A_images, fake_B_images, _, v_loss, summary_str) = self.sess.run([self.real_A, self.fake_B, self.init_optim, self.Vgg_loss, self.V_loss_merge], feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
print(('Epoch: [%3d] [%5d/%5d] time: %4.4f v_loss: %.8f' % (epoch, idx, self.iteration, (time.time() - start_time), v_loss)))
else:
(_, d_loss, summary_str) = self.sess.run([self.D_optim, self.Discriminator_loss, self.D_loss_merge], feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
g_loss = None
if (((counter - 1) % self.n_critic) == 0):
(real_A_images, fake_B_images, _, g_loss, summary_str) = self.sess.run([self.real_A, self.fake_B, self.G_optim, self.Generator_loss, self.G_loss_merge], feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
past_g_loss = g_loss
if (g_loss == None):
g_loss = past_g_loss
print(('Epoch: [%3d] [%5d/%5d] time: %4.4f d_loss: %.8f, g_loss: %.8f' % (epoch, idx, self.iteration, (time.time() - start_time), d_loss, g_loss)))
counter += 1
if (np.mod((idx + 1), self.print_freq) == 0):
save_images(real_A_images, [self.batch_size, 1], './{}/real_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, (idx + 1)))
save_images(fake_B_images, [self.batch_size, 1], './{}/fake_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, (idx + 1)))
if (np.mod((idx + 1), self.save_freq) == 0):
self.save(self.checkpoint_dir, counter)
start_batch_id = 0
self.save(self.checkpoint_dir, counter)
def model_dir(self):
n_res = (str(self.n_res) + 'resblock')
n_dis = (str(self.n_dis) + 'dis')
return '{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(self.model_name, self.dataset_name, self.gan_type, n_res, n_dis, self.n_critic, self.sn, int(self.adv_weight), int(self.vgg_weight))
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, (self.model_name + '.model')), global_step=step)
def load(self, checkpoint_dir):
print(' [*] Reading checkpoints...')
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if (ckpt and ckpt.model_checkpoint_path):
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[(- 1)])
print(' [*] Success to read {}'.format(ckpt_name))
return (True, counter)
else:
print(' [*] Failed to find a checkpoint')
return (False, 0)
def test(self):
tf.global_variables_initializer().run()
test_A_files = glob('./dataset/{}/*.*'.format((self.dataset_name + '/testA')))
self.saver = tf.train.Saver()
(could_load, checkpoint_counter) = self.load(self.checkpoint_dir)
self.result_dir = os.path.join(self.result_dir, self.model_dir)
check_folder(self.result_dir)
if could_load:
print(' [*] Load SUCCESS')
else:
print(' [!] Load failed...')
index_path = os.path.join(self.result_dir, 'index.html')
index = open(index_path, 'w')
index.write('<html><body><table><tr>')
index.write('<th>name</th><th>input</th><th>output</th></tr>')
for sample_file in test_A_files:
print(('Processing A image: ' + sample_file))
sample_image = np.asarray(load_test_data(sample_file))
image_path = os.path.join(self.result_dir, '{0}'.format(os.path.basename(sample_file)))
fake_img = self.sess.run(self.test_fake_B, feed_dict={self.test_real_A: sample_image})
save_images(fake_img, [1, 1], image_path)
index.write(('<td>%s</td>' % os.path.basename(image_path)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((sample_file if os.path.isabs(sample_file) else (('../..' + os.path.sep) + sample_file)), self.img_size, self.img_size)))
index.write(("<td><img src='%s' width='%d' height='%d'></td>" % ((image_path if os.path.isabs(image_path) else (('../..' + os.path.sep) + image_path)), self.img_size, self.img_size)))
index.write('</tr>')
index.close() |
def assert_and_infer_cfg(cache_urls=True, make_immutable=True):
if (__C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN):
__C.RPN.RPN_ON = True
if (__C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON):
__C.TEST.PRECOMPUTED_PROPOSALS = False
if cache_urls:
cache_cfg_urls()
if make_immutable:
cfg.immutable(True) |
def TAGMUtil_ConnectCmtyVV(CmtyVV, CIDSzPrV, NIDMemPrV, Rnd):
return _snap.TAGMUtil_ConnectCmtyVV(CmtyVV, CIDSzPrV, NIDMemPrV, Rnd) |
class SequencialIterator(object):
def __init__(self, files):
self.files = files
types = map((lambda x: x['text_type']), files)
self.preprocessor = Preprocessing(types)
def __iter__(self):
for f in self.files:
max_sentences = f['max_sentences']
file = open_file(f['file_name'], f['file_type'])
text_type = f['text_type']
text_idx = f['tags'].values()
max_index = f['max_index']
for (i, line) in enumerate(file):
if (i >= max_sentences):
break
line = line.replace('\r', '').replace('\n', '').split('\t')
if (len(line) == (max_index + 1)):
for idx in text_idx:
text = self.preprocessor.preprocess_text(line[idx], text_type)
(yield text) |
class Parameter():
seed: int
use_ema: bool
ema_decay: float
max_epochs: int
tensorboard_dir: str
RANK: int |
def run(argv=None):
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--shared', action='store_true', help='create a shared lock')
group.add_argument('-x', '--exclusive', action='store_true', help='create an exclusive lock (the default)')
group.add_argument('-u', '--unlock', action='store_true', help='remove an existing lock')
parser.add_argument('lock', metavar='LOCK', type=IntOrFileType('w+', makedirs=True), help='filename of the lock an integer file descriptor')
parser.add_argument('command', metavar='COMMAND', nargs=argparse.REMAINDER, help='command to run with the lock including any arguments to that command')
args = parser.parse_args(argv)
if args.shared:
locktype = fcntl.LOCK_SH
elif args.unlock:
locktype = fcntl.LOCK_UN
else:
locktype = fcntl.LOCK_EX
lock = args.lock
command = args.command
if (isinstance(lock, int) and command):
parser.error('sage-flock does not accept a command when passed a file descriptor number')
try:
fcntl.flock(lock, (locktype | fcntl.LOCK_NB))
except IOError as exc:
if (locktype == fcntl.LOCK_SH):
kind = 'shared'
elif (locktype == fcntl.LOCK_UN):
sys.stderr.write('Unexpected error trying to unlock fd: {0}\n'.format(exc))
return 1
else:
kind = 'exclusive'
sys.stderr.write('Waiting for {0} lock to run {1} ... '.format(kind, ' '.join((pipes.quote(arg) for arg in command))))
fcntl.flock(lock, locktype)
sys.stderr.write('ok\n')
if (not (args.unlock or isinstance(lock, int))):
os.execvp(command[0], command) |
def test_case86():
url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.upsertCommand), headers=headers)
url = (discoveryIp + '/ngsi9/registration/urn:ngsi-ld:Device:water001')
r = requests.get(url, headers=headers)
assert (r.status_code == 200) |
def resnet101v6(pthfile, device=None):
if (device is None):
if torch.cuda.is_available():
warnings.warn('device not defined in resnet101v6, assigning to first CUDA visible device.')
device = torch.device('cuda')
else:
device = torch.device('cpu')
model = ResNet(Bottleneck, [3, 4, 23, 3], mode='encode', num_classes=65359)
model.load_state_dict(torch.load(pthfile, map_location=device))
return model |
def recalculate_moving_avgs(flow, train_loader, device):
with torch.no_grad():
flow.eval()
print('Recalculating moving averages for batch norm layers.')
flow.start_averaging()
for (h, x) in train_loader:
if (device is not None):
h = h.to(device, non_blocking=True)
x = x.to(device, non_blocking=True)
y = (h + torch.randn_like(h))
(_, _) = flow(x, y)
flow.end_averaging() |
def main():
filter_r = regex.compile("[^\\p{L}\\p{N}\\p{M}\\' \\-]")
for line in sys.stdin:
line = line.strip()
line = filter_r.sub(' ', line)
line = ' '.join(line.split())
print(line) |
def init_property_of_dataset():
global gold_heads, gold_tails, gold_relations
global candidate_heads, candidate_tails
global train_link, aux_link
trace('load train')
for line in open(args.train_file):
items = line.strip().split('\t')
items = list(map(int, items))
(h, r, t) = items
candidate_heads[r].add(h)
candidate_tails[r].add(t)
gold_heads[(r, t)].add(h)
gold_tails[(h, r)].add(t)
tail_per_head[h].add(t)
head_per_tail[t].add(h)
train_link[t].add(h)
train_link[h].add(t)
gold_relations[(h, t)] = r
for e in train_link:
train_link[e] = list(train_link[e])
for r in candidate_heads:
candidate_heads[r] = list(candidate_heads[r])
for r in candidate_tails:
candidate_tails[r] = list(candidate_tails[r])
for h in tail_per_head:
tail_per_head[h] = (len(tail_per_head[h]) + 0.0)
for t in head_per_tail:
head_per_tail[t] = (len(head_per_tail[t]) + 0.0)
trace('set axiaulity')
trace('OOKB esetting, use: different edges')
aux_link = defaultdict(set)
for line in open(args.auxiliary_file):
items = line.strip().split('\t')
items = list(map(int, items))
(h, r, t) = items
gold_relations[(h, t)] = r
aux_link[t].add(h)
aux_link[h].add(t)
for e in aux_link:
aux_link[e] = list(aux_link[e]) |
def test_scimodel_keras_optimizers(variable_x, variable_y, functional_fx, functional_gx):
xs = [variable_x, variable_y]
ys = [functional_fx, functional_gx]
assert isinstance(sn.SciModel(xs, ys, 'mse', tf_optimizers.Adam()), sn.SciModel)
assert isinstance(sn.SciModel(xs, ys, 'mse', tf_optimizers.RMSprop()), sn.SciModel) |
_method
class HeckeSubmodule(module.HeckeModule_free_module):
def __init__(self, ambient, submodule, dual_free_module=None, check=True):
from . import ambient_module
if (not isinstance(ambient, ambient_module.AmbientHeckeModule)):
raise TypeError('ambient must be an ambient Hecke module')
if (not is_FreeModule(submodule)):
raise TypeError('submodule must be a free module')
if (not submodule.is_submodule(ambient.free_module())):
raise ValueError('submodule must be a submodule of the ambient free module')
if check:
if (not ambient._is_hecke_equivariant_free_module(submodule)):
raise ValueError('The submodule must be invariant under all Hecke operators.')
self.__ambient = ambient
self.__submodule = submodule
module.HeckeModule_free_module.__init__(self, ambient.base_ring(), ambient.level(), ambient.weight())
if (not (dual_free_module is None)):
if (not is_FreeModule(dual_free_module)):
raise TypeError('dual_free_module must be a free module')
if (dual_free_module.rank() != submodule.rank()):
raise ArithmeticError('dual_free_module must have the same rank as submodule')
self.dual_free_module.set_cache(dual_free_module)
def _repr_(self):
return ('Rank %s submodule of a Hecke module of level %s' % (self.rank(), self.level()))
def __add__(self, other):
if (not isinstance(other, module.HeckeModule_free_module)):
raise TypeError(('other (=%s) must be a Hecke module.' % other))
if (self.ambient() != other.ambient()):
raise ArithmeticError('sum only defined for submodules of a common ambient space')
if other.is_ambient():
return other
M = (self.free_module() + other.free_module())
return self.ambient().submodule(M, check=False)
def _element_constructor_(self, x, check=True):
z = self.ambient_hecke_module()(x).element()
if (check and (z not in self.__submodule)):
raise TypeError('x does not coerce to an element of this Hecke module')
return self.element_class(self, z)
def __richcmp__(self, other, op):
if (not isinstance(other, module.HeckeModule_free_module)):
return NotImplemented
lx = self.ambient()
rx = other.ambient()
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
return self.free_module()._echelon_matrix_richcmp(other.free_module(), op)
def _compute_dual_hecke_matrix(self, n):
A = self.ambient_hecke_module().dual_hecke_matrix(n)
check = (arith.gcd(self.level(), n) != 1)
return A.restrict(self.dual_free_module(), check=check)
def _compute_hecke_matrix(self, n):
A = self.ambient_hecke_module().hecke_matrix(n)
check = (arith.gcd(self.level(), n) != 1)
return A.restrict(self.free_module(), check=check)
def _compute_diamond_matrix(self, d):
return self.ambient_hecke_module().diamond_bracket_matrix(d).restrict(self.free_module(), check=False)
def _compute_atkin_lehner_matrix(self, d):
A = self.ambient_hecke_module()._compute_atkin_lehner_matrix(d)
return A.restrict(self.free_module(), check=True)
def _set_dual_free_module(self, V):
if (V.degree() != self.ambient_hecke_module().rank()):
raise ArithmeticError('The degree of V must equal the rank of the ambient space.')
if (V.rank() != self.rank()):
raise ArithmeticError('The rank of V must equal the rank of self.')
self.dual_free_module.set_cache(V)
def ambient_hecke_module(self):
return self.__ambient
def ambient(self):
return self.__ambient
_method
def complement(self, bound=None):
if self.dual_free_module.is_in_cache():
D = self.dual_free_module()
V = D.basis_matrix().right_kernel()
return self.submodule(V, check=False)
if self.is_ambient():
return self.ambient_hecke_module().zero_submodule()
if self.is_zero():
return self.ambient_hecke_module()
if self.is_full_hecke_module():
anemic = False
else:
anemic = True
verbose('computing')
N = self.level()
A = self.ambient_hecke_module()
V = A.free_module()
p = 2
if (bound is None):
bound = A.hecke_bound()
while True:
if anemic:
while ((N % p) == 0):
p = arith.next_prime(p)
verbose(('using T_%s' % p))
f = self.hecke_polynomial(p)
T = A.hecke_matrix(p)
g = T.charpoly('x')
V = T.kernel_on(V, poly=(g // f), check=False)
if ((V.rank() + self.rank()) <= A.rank()):
break
p = arith.next_prime(p)
if (p > bound):
break
if ((V.rank() + self.rank()) == A.rank()):
C = A.submodule(V, check=False)
return C
verbose('falling back on naive algorithm')
D = A.decomposition()
C = A.zero_submodule()
for X in D:
if (self.intersection(X).dimension() == 0):
C = (C + X)
if ((C.rank() + self.rank()) == A.rank()):
return C
raise RuntimeError(('Computation of complementary space failed (cut down to rank %s, but should have cut down to rank %s).' % (V.rank(), (A.rank() - self.rank()))))
def degeneracy_map(self, level, t=1):
d = self.ambient_hecke_module().degeneracy_map(level, t)
return d.restrict_domain(self)
_method
def dual_free_module(self, bound=None, anemic=True, use_star=True):
if self.complement.is_in_cache():
verbose('This module knows its complement already -- cheating in dual_free_module')
C = self.complement()
V = C.basis_matrix().right_kernel()
return V
verbose('computing dual')
A = self.ambient_hecke_module()
if (self.dimension() == 0):
return A.zero_submodule()
if (A.dimension() == self.dimension()):
return A.free_module()
if (not hasattr(self, 'star_eigenvalues')):
use_star = False
if use_star:
if (len(self.star_eigenvalues()) == 2):
V = (self.plus_submodule(compute_dual=False).dual_free_module() + self.minus_submodule(compute_dual=False).dual_free_module())
return V
V = A.sign_submodule(self.sign()).dual_free_module()
else:
V = A.free_module()
N = self.level()
p = 2
if (bound is None):
bound = A.hecke_bound()
while True:
if anemic:
while ((N % p) == 0):
p = arith.next_prime(p)
verbose(('using T_%s' % p))
f = self.hecke_polynomial(p)
T = A.dual_hecke_matrix(p)
V = T.kernel_on(V, poly=f, check=False)
if (V.dimension() <= self.dimension()):
break
p = arith.next_prime(p)
if (p > bound):
break
if (V.rank() == self.rank()):
return V
W = self.complement()
V2 = W.basis_matrix().right_kernel()
if (V2.rank() == self.rank()):
return V2
raise RuntimeError(('Computation of embedded dual vector space failed (cut down to rank %s, but should have cut down to rank %s).' % (V.rank(), self.rank())))
def free_module(self):
return self.__submodule
def module(self):
return self.free_module()
def intersection(self, other):
if (self.ambient_hecke_module() != other.ambient_hecke_module()):
raise ArithmeticError('intersection only defined for subspaces of a common ambient modular symbols space')
if other.is_ambient():
return self
if self.is_ambient():
return other
V = self.free_module().intersection(other.free_module())
M = self.ambient_hecke_module().submodule(V, check=False)
try:
if self.sign():
M._set_sign(self.sign())
elif other.sign():
M._set_sign(other.sign())
except AttributeError:
pass
return M
def is_ambient(self):
return (self.free_module() == self.ambient_hecke_module().free_module())
def is_new(self, p=None):
try:
return self.__is_new[p]
except AttributeError:
self.__is_new = {}
except KeyError:
pass
N = self.ambient_hecke_module().new_submodule(p)
self.__is_new[p] = self.is_submodule(N)
return self.__is_new[p]
def is_old(self, p=None):
try:
return self.__is_old[p]
except AttributeError:
self.__is_old = {}
except KeyError:
pass
O = self.ambient_hecke_module().old_submodule(p)
self.__is_old[p] = self.is_submodule(O)
return self.__is_old[p]
def is_submodule(self, V):
if (not isinstance(V, module.HeckeModule_free_module)):
return False
return ((self.ambient_hecke_module() == V.ambient_hecke_module()) and self.free_module().is_subspace(V.free_module()))
def linear_combination_of_basis(self, v):
x = self.free_module().linear_combination_of_basis(v)
return self(x)
def new_submodule(self, p=None):
try:
if self.__is_new[p]:
return self
except AttributeError:
self.__is_new = {}
except KeyError:
pass
if (self.rank() == 0):
self.__is_new[p] = True
return self
try:
return self.__new_submodule[p]
except AttributeError:
self.__new_submodule = {}
except KeyError:
pass
S = self.ambient_hecke_module().new_submodule(p)
ns = S.intersection(self)
if (ns.rank() == self.rank()):
self.__is_new[p] = True
ns.__is_new = {p: True}
self.__new_submodule[p] = ns
return ns
def nonembedded_free_module(self):
return self.free_module().nonembedded_free_module()
def old_submodule(self, p=None):
try:
if self.__is_old[p]:
return self
except AttributeError:
self.__is_old = {}
except KeyError:
pass
if (self.rank() == 0):
self.__is_old[p] = True
return self
try:
return self.__old_submodule[p]
except AttributeError:
self.__old_submodule = {}
except KeyError:
pass
S = self.ambient_hecke_module().old_submodule(p)
os = S.intersection(self)
if (os.rank() == self.rank()):
self.__is_old[p] = True
os.__is_old = {p: True}
self.__old_submodule[p] = os
return os
def rank(self):
return self.__submodule.rank()
def submodule(self, M, Mdual=None, check=True):
if (not is_FreeModule(M)):
V = self.ambient_module().free_module()
if isinstance(M, (list, tuple)):
M = V.span([V(x.element()) for x in M])
else:
M = V.span(M)
if check:
if (not M.is_submodule(self.free_module())):
raise TypeError(('M (=%s) must be a submodule of the free module (=%s) associated to this module.' % (M, self.free_module())))
return self.ambient().submodule(M, Mdual, check=check)
def submodule_from_nonembedded_module(self, V, Vdual=None, check=True):
E = self.free_module()
M_V = V.matrix()
M_E = E.matrix()
A = (M_V * M_E)
V = A.row_space()
if (not (Vdual is None)):
E = self.dual_free_module()
M_Vdual = Vdual.matrix()
M_E = E.matrix()
A = (M_Vdual * M_E)
Vdual = A.row_space()
return self.ambient_hecke_module().submodule(V, Vdual, check=check)
def hecke_bound(self):
if self.is_cuspidal():
return self.sturm_bound()
else:
return self.ambient_hecke_module().hecke_bound() |
.parametrize('family', 'CLUQJ')
def test_constant_inner(family):
D = FunctionSpace(6, family, alpha=1, beta=2)
for quad in quads[D.family()]:
q = inner(1, Array(D, buffer=(x ** 2)))
assert (abs((q - (2 / 3))) < 1e-08) |
class GNN(torch.nn.Module):
def __init__(self, num_tasks=1, num_layers=5, emb_dim=300, gnn_type='gin', virtual_node=True, residual=False, drop_ratio=0, JK='last', graph_pooling='sum'):
super(GNN, self).__init__()
self.num_layers = num_layers
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
if (self.num_layers < 2):
raise ValueError('Number of GNN layers must be greater than 1.')
if virtual_node:
self.gnn_node = GNN_node_Virtualnode(num_layers, emb_dim, JK=JK, drop_ratio=drop_ratio, residual=residual, gnn_type=gnn_type)
else:
self.gnn_node = GNN_node(num_layers, emb_dim, JK=JK, drop_ratio=drop_ratio, residual=residual, gnn_type=gnn_type)
if (self.graph_pooling == 'sum'):
self.pool = global_add_pool
elif (self.graph_pooling == 'mean'):
self.pool = global_mean_pool
elif (self.graph_pooling == 'max'):
self.pool = global_max_pool
elif (self.graph_pooling == 'attention'):
self.pool = GlobalAttention(gate_nn=torch.nn.Sequential(torch.nn.Linear(emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU(), torch.nn.Linear(emb_dim, 1)))
elif (self.graph_pooling == 'set2set'):
self.pool = Set2Set(emb_dim, processing_steps=2)
else:
raise ValueError('Invalid graph pooling type.')
if (graph_pooling == 'set2set'):
self.graph_pred_linear = torch.nn.Linear((2 * self.emb_dim), self.num_tasks)
else:
self.graph_pred_linear = torch.nn.Linear(self.emb_dim, self.num_tasks)
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
output = self.graph_pred_linear(h_graph)
if self.training:
return output
else:
return torch.clamp(output, min=0, max=50) |
def _bench(args):
rng = np.random.RandomState(412)
(m, M) = ((- 1), 1)
B = args.batch_size
G0 = args.grid_size_base
gf = args.growth_factor
T0 = args.table_size_base
L = args.n_levels
D = args.feature_size
query_data = (m + (rng.rand(B, 3) * (M - m)))
query = nn.Variable.from_numpy_array(query_data).apply(need_grad=True)
n_params = compute_num_params(G0, gf, T0, D, L)
initializer_data = (rng.randn(n_params) * 0.01)
feature = nn.parameter.get_parameter_or_create('F0', (n_params,), initializer_data)
out0 = F.query_on_voxel_hash(query, feature, G0, gf, T0, L, D)
out1 = query_on_voxel_hash_composite(query, feature, G0, gf, T0, L, D)
def _bench_one(out):
nnabla_ext.cuda.synchronize(device_id=args.device_id)
st = time.perf_counter()
for i in range(args.n_iters):
out.forward()
out.backward(clear_buffer=True)
nnabla_ext.cuda.synchronize(device_id=args.device_id)
et = (time.perf_counter() - st)
return et
et0 = _bench_one(out0)
et1 = _bench_one(out1)
return (et0, et1) |
def get_parent_sentence(doc, char_start, char_end):
offsets = [s.abs_char_offsets[0] for s in doc.sentences]
for i in range((len(offsets) - 1)):
if ((char_start >= offsets[i]) and (char_end <= offsets[(i + 1)])):
return doc.sentences[i]
return doc.sentences[(i + 1)] |
def svd(A, eps_or_k, rand=True):
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if (eps_or_k < 1):
eps = eps_or_k
if rand:
if real:
(U, V, S) = backend.iddp_asvd(eps, A)
else:
(U, V, S) = backend.idzp_asvd(eps, A)
elif real:
(U, V, S) = backend.iddp_svd(eps, A)
else:
(U, V, S) = backend.idzp_svd(eps, A)
else:
k = int(eps_or_k)
if (k > min(A.shape)):
raise ValueError(('Approximation rank %s exceeds min(A.shape) = %s ' % (k, min(A.shape))))
if rand:
if real:
(U, V, S) = backend.iddr_asvd(A, k)
else:
(U, V, S) = backend.idzr_asvd(A, k)
elif real:
(U, V, S) = backend.iddr_svd(A, k)
else:
(U, V, S) = backend.idzr_svd(A, k)
elif isinstance(A, LinearOperator):
(m, n) = A.shape
matvec = (lambda x: A.matvec(x))
matveca = (lambda x: A.rmatvec(x))
if (eps_or_k < 1):
eps = eps_or_k
if real:
(U, V, S) = backend.iddp_rsvd(eps, m, n, matveca, matvec)
else:
(U, V, S) = backend.idzp_rsvd(eps, m, n, matveca, matvec)
else:
k = int(eps_or_k)
if real:
(U, V, S) = backend.iddr_rsvd(m, n, matveca, matvec, k)
else:
(U, V, S) = backend.idzr_rsvd(m, n, matveca, matvec, k)
else:
raise _TYPE_ERROR
return (U, S, V) |
def parse_if_range_header(value):
if (not value):
return IfRange()
date = parse_date(value)
if (date is not None):
return IfRange(date=date)
return IfRange(unquote_etag(value)[0]) |
class Result():
outputs: torch.Tensor
loss: torch.Tensor
batch_dim = 0
def plot(self) -> Dict[(str, Any)]:
return {}
def batch_size(self) -> int:
return self.outputs.shape[self.batch_dim]
def merge(l: List, batch_weights: Optional[List[float]]=None):
if (len(l) == 1):
return l[0]
batch_weights = (batch_weights if (batch_weights is not None) else ([1] * len(l)))
loss = (sum([(r.loss * w) for (r, w) in zip(l, batch_weights)]) / sum(batch_weights))
out = torch.stack([r.outputs for r in l], l[0].batch_dim)
return l[0].__class__(out, loss) |
class DauphinTransform(object):
def __init__(self, name=None, prob=1.0, level=0):
self.name = (name if (name is not None) else type(self).__name__)
self.prob = prob
assert (0 <= level <= 1.0), 'Invalid level, level must be in [0, 1.0].'
self.level = level
def transform(self, text, label, **kwargs):
return (text, label)
def __call__(self, text, label, **kwargs):
if (random.random() <= self.get_prob()):
return self.transform(text, label, **kwargs)
else:
return (text, label)
def __repr__(self):
return f'<Transform ({self.name}), prob={self.prob}, level={self.level}>'
def get_prob(self):
if (self.prob == 1):
return self.prob
return random.random()
def get_level(self):
return (random.randint(0, (10 ** PRECISION)) / float((10 ** PRECISION))) |
def conv3x3(in_planes, out_planes, stride=1, atrous=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=(1 * atrous), dilation=atrous, bias=False) |
class SimModelTestCase(unittest.TestCase):
def test_w2v_sim_batch(self):
model_name = 'w2v-light-tencent-chinese'
print(model_name)
m = Similarity(model_name, similarity_type=SimilarityType.COSINE, embedding_type=EmbeddingType.WORD2VEC)
test_path = os.path.join(pwd_path, '../examples/data/STS-B/STS-B.test.data')
get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/ATEC/ATEC.test.data')
get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/BQ/BQ.test.data')
get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/LCQMC/LCQMC.test.data')
get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/PAWSX/PAWSX.test.data')
get_corr(m, test_path)
def test_sbert_sim_stsb_batch(self):
model_name = 'sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2'
print(model_name)
m = Similarity(model_name, similarity_type=SimilarityType.COSINE, embedding_type=EmbeddingType.BERT, encoder_type='MEAN')
test_path = os.path.join(pwd_path, '../examples/data/STS-B/STS-B.test.data')
get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/ATEC/ATEC.test.data')
get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/BQ/BQ.test.data')
get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/LCQMC/LCQMC.test.data')
get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/PAWSX/PAWSX.test.data')
get_corr(m, test_path)
def test_set_sim_model_batch(self):
m = Similarity('shibing624/text2vec-base-chinese', similarity_type=SimilarityType.COSINE, embedding_type=EmbeddingType.BERT, encoder_type='MEAN')
print(m)
test_path = os.path.join(pwd_path, '../examples/data/STS-B/STS-B.test.data')
c1 = get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/ATEC/ATEC.test.data')
c2 = get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/BQ/BQ.test.data')
c3 = get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/LCQMC/LCQMC.test.data')
c4 = get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/PAWSX/PAWSX.test.data')
c5 = get_corr(m, test_path)
test_path = os.path.join(pwd_path, '../examples/data/SOHU/dd-test.jsonl')
data = load_jsonl(test_path)
(sents1, sents2, labels) = ([], [], [])
for item in data:
sents1.append(item['sentence1'])
sents2.append(item['sentence2'])
labels.append(item['label'])
t1 = time()
scores = m.get_scores(sents1, sents2)
sims = []
for i in range(len(sents1)):
sims.append(scores[i][i])
spend_time = max((time() - t1), 1e-09)
corr = compute_spearmanr(sims, labels)
print('scores:', sims[:10])
print('labels:', labels[:10])
print(f'{test_path} spearman corr:', corr)
print('spend time:', spend_time, ' seconds count:', (len(sents1) * 2), 'qps:', ((len(sents1) * 2) / spend_time))
c6 = corr
test_path = os.path.join(pwd_path, '../examples/data/SOHU/dc-test.jsonl')
data = load_jsonl(test_path)
(sents1, sents2, labels) = ([], [], [])
for item in data:
sents1.append(item['sentence1'])
sents2.append(item['sentence2'])
labels.append(item['label'])
t1 = time()
scores = m.get_scores(sents1, sents2)
sims = []
for i in range(len(sents1)):
sims.append(scores[i][i])
spend_time = max((time() - t1), 1e-09)
corr = compute_spearmanr(sims, labels)
print('scores:', sims[:10])
print('labels:', labels[:10])
print(f'{test_path} spearman corr:', corr)
print('spend time:', spend_time, ' seconds count:', (len(sents1) * 2), 'qps:', ((len(sents1) * 2) / spend_time))
c7 = corr
print('average spearman corr:', (((((((c1 + c2) + c3) + c4) + c5) + c6) + c7) / 7))
def test_uer_sbert_nli_model(self):
pass
def test_ernie3_0_nano_model(self):
pass
def test_ernie3_0_base_model(self):
pass
def test_ernie3_0_xbase_model(self):
pass
def test_hfl_chinese_bert_wwm_ext_model(self):
pass
def test_hfl_chinese_roberta_wwm_ext_model(self):
pass
def test_hfl_chinese_macbert_large_model(self):
pass
def test_m3e_base_model(self):
pass
def test_bge_large_zh_noinstruct_model(self):
pass
def test_bge_large_zh_noinstruct_cosent_model(self):
pass
def test_bge_large_zh_noinstruct_cosent_passage_model(self):
pass
def test_bge_large_zh_noinstruct_bge_model(self):
pass |
def write_model_card(hf_model_name: str, repo_root=DEFAULT_REPO, save_dir=Path('marian_converted'), dry_run=False, extra_metadata={}) -> str:
import pandas as pd
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
opus_name: str = convert_hf_name_to_opus_name(hf_model_name)
if (repo_root not in ('OPUS-MT-train', 'Tatoeba-Challenge')):
raise ValueError(f'Repos root is {repo_root}. Expected either OPUS-MT-train or Tatoeba-Challenge')
opus_readme_path = Path(repo_root).joinpath('models', opus_name, 'README.md')
if (not opus_readme_path.exists()):
raise ValueError(f'Readme file {opus_readme_path} not found')
(opus_src, opus_tgt) = [x.split('+') for x in opus_name.split('-')]
readme_url = f'
(s, t) = (','.join(opus_src), ','.join(opus_tgt))
metadata = {'hf_name': hf_model_name, 'source_languages': s, 'target_languages': t, 'opus_readme_url': readme_url, 'original_repo': repo_root, 'tags': ['translation']}
metadata.update(extra_metadata)
metadata.update(get_system_metadata(repo_root))
extra_markdown = f'''### {hf_model_name}
* source group: {metadata['src_name']}
* target group: {metadata['tgt_name']}
* OPUS readme: [{opus_name}]({readme_url})
'''
content = opus_readme_path.open().read()
content = content.split('\n# ')[(- 1)]
splat = content.split('*')[2:]
print(splat[3])
content = '*'.join(splat)
content = (((FRONT_MATTER_TEMPLATE.format(metadata['src_alpha2']) + extra_markdown) + '\n* ') + content.replace('download', 'download original weights'))
items = '\n\n'.join([f'- {k}: {v}' for (k, v) in metadata.items()])
sec3 = ('\n### System Info: \n' + items)
content += sec3
if dry_run:
return (content, metadata)
sub_dir = (save_dir / f'opus-mt-{hf_model_name}')
sub_dir.mkdir(exist_ok=True)
dest = (sub_dir / 'README.md')
dest.open('w').write(content)
pd.Series(metadata).to_json((sub_dir / 'metadata.json'))
return (content, metadata) |
def test_fails_on_negative_limit():
parser = _get_command_line_parser(['DemoDetector'], [], [])
assert_raises(SystemExit, parser.parse_args, ['publish', 'ex2', 'DemoDetector', '-s', 'site', '--limit', '-1']) |
def find_thres(cm, percentage):
n = (int((len(cm) * (1.0 - percentage))) - 1)
con = sorted(get_neighboring_connectivity(cm))
return con[n] |
def slice_data_grad_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, start=None, stop=None, step=None):
gdx = grad_inputs[0]
gdy = F.slice(gdx, start, stop, step)
return gdy |
def prepare_data(config, model, test_run):
batch_size = config.get('batch_size', 1024)
num_workers = config.get('num_workers', default_workers)
dataset = data.get_dataset_by_name(config['data'])(sampling_rate=100, component_order='ZNE', dimension_order='NCW', cache='full')
restrict_to_phase = config.get('restrict_to_phase', None)
if (restrict_to_phase is not None):
mask = generate_phase_mask(dataset, restrict_to_phase)
dataset.filter(mask, inplace=True)
if ('split' not in dataset.metadata.columns):
logging.warning('No split defined, adding auxiliary split.')
split = np.array((['train'] * len(dataset)))
split[int((0.6 * len(dataset))):int((0.7 * len(dataset)))] = 'dev'
split[int((0.7 * len(dataset))):] = 'test'
dataset._metadata['split'] = split
train_data = dataset.train()
dev_data = dataset.dev()
if test_run:
train_mask = np.zeros(len(train_data), dtype=bool)
train_mask[:500] = True
train_data.filter(train_mask, inplace=True)
dev_mask = np.zeros(len(dev_data), dtype=bool)
dev_mask[:500] = True
dev_data.filter(dev_mask, inplace=True)
training_fraction = config.get('training_fraction', 1.0)
apply_training_fraction(training_fraction, train_data)
train_data.preload_waveforms(pbar=True)
dev_data.preload_waveforms(pbar=True)
train_generator = sbg.GenericGenerator(train_data)
dev_generator = sbg.GenericGenerator(dev_data)
train_generator.add_augmentations(model.get_train_augmentations())
dev_generator.add_augmentations(model.get_val_augmentations())
train_loader = DataLoader(train_generator, batch_size=batch_size, shuffle=True, num_workers=num_workers, worker_init_fn=worker_seeding, drop_last=True)
dev_loader = DataLoader(dev_generator, batch_size=batch_size, num_workers=num_workers, worker_init_fn=worker_seeding)
return (train_loader, dev_loader) |
def p_simple_statement(s, first_statement=0):
if (s.sy == 'global'):
node = p_global_statement(s)
elif (s.sy == 'nonlocal'):
node = p_nonlocal_statement(s)
elif (s.sy == 'print'):
node = p_print_statement(s)
elif (s.sy == 'exec'):
node = p_exec_statement(s)
elif (s.sy == 'del'):
node = p_del_statement(s)
elif (s.sy == 'break'):
node = p_break_statement(s)
elif (s.sy == 'continue'):
node = p_continue_statement(s)
elif (s.sy == 'return'):
node = p_return_statement(s)
elif (s.sy == 'raise'):
node = p_raise_statement(s)
elif (s.sy in ('import', 'cimport')):
node = p_import_statement(s)
elif (s.sy == 'from'):
node = p_from_import_statement(s, first_statement=first_statement)
elif (s.sy == 'yield'):
node = p_yield_statement(s)
elif (s.sy == 'assert'):
node = p_assert_statement(s)
elif (s.sy == 'pass'):
node = p_pass_statement(s)
else:
node = p_expression_or_assignment(s)
return node |
def t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_pipedream():
return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True) |
def add_prefix(name, prefix=None, split='.'):
if (prefix is not None):
return '{}{}{}'.format(prefix, split, name)
else:
return name |
def dictConvert(inDict):
key_list = list(inDict.keys())
out = {}
for t in key_list:
D = inDict[t].split('_')
out.update({t: [D[0], int((100 * float(D[1]))), int((100 * float(D[2]))), D[3]]})
return out |
def has_onnx(model_type):
config_mapping = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING
if (model_type not in config_mapping):
return False
config = config_mapping[model_type]
config_module = config.__module__
module = transformers_module
for part in config_module.split('.')[1:]:
module = getattr(module, part)
config_name = config.__name__
onnx_config_name = config_name.replace('Config', 'OnnxConfig')
return hasattr(module, onnx_config_name) |
def _reload_instrumentation_loader(coverage_metrics: set[config.CoverageMetric], dynamic_constant_provider: (DynamicConstantProvider | None), tracer: ExecutionTracer):
module_name = config.configuration.module_name
module = importlib.import_module(module_name)
tracer.current_thread_identifier = threading.current_thread().ident
first_finder: (InstrumentationFinder | None) = None
for finder in sys.meta_path:
if isinstance(finder, InstrumentationFinder):
first_finder = finder
break
assert (first_finder is not None)
first_finder.update_instrumentation_metrics(tracer=tracer, coverage_metrics=coverage_metrics, dynamic_constant_provider=dynamic_constant_provider)
importlib.reload(module) |
class EllipticCurvePoint_finite_field(EllipticCurvePoint_field):
def _magma_init_(self, magma):
E = self.curve()._magma_init_(magma)
(x, y) = self.xy()
return ('%s![%s,%s]' % (E, x, y))
def _acted_upon_(self, other, side):
k = ZZ(other)
E = self.curve()
try:
pariQ = pari.ellmul(E, self, k)
except PariError as err:
if (str(err.errdata().component(1)) == 'Fp_inv'):
val = err.errdata().component(2)
a = val.lift()
N = val.mod()
N1 = N.gcd(a)
N2 = (N // N1)
raise ZeroDivisionError(f'Inverse of {a} does not exist (characteristic = {N} = {N1}*{N2})')
pariQ = None
if (pariQ is not None):
if (pariQ == [0]):
vQ = 0
else:
assert (len(pariQ) == 2)
vQ = Sequence((tuple(pariQ) + (1,)), E.base_ring())
Q = EllipticCurvePoint_finite_field(E, vQ, check=False)
else:
Q = IntegerMulAction(ZZ, self.parent())._act_(k, self)
n = getattr(self, '_order', None)
if (n is not None):
Q._order = (n // n.gcd(k))
return Q
def discrete_log(self, Q):
if (Q not in self.parent()):
raise ValueError('not a point on the same curve')
n = self.order()
if (n * Q):
raise ValueError('ECDLog problem has no solution (order of Q does not divide order of P)')
E = self.curve()
F = E.base_ring()
p = F.cardinality()
if (F.is_prime_field() and (n == p)):
return self.padic_elliptic_logarithm(Q, p)
elif (hasattr(E, '_order') and (E._order.gcd((n ** 2)) == n)):
pass
elif (self.weil_pairing(Q, n) != 1):
raise ValueError('ECDLog problem has no solution (non-trivial Weil pairing)')
return ZZ(pari.elllog(self.curve(), Q, self, n))
def padic_elliptic_logarithm(self, Q, p):
E = self.curve()
F = E.base()
if Q.is_zero():
k = 0
else:
for k in range(0, p):
Eqp = EllipticCurve(Qp(p, 2), [(ZZ(t) + (k * p)) for t in E.a_invariants()])
P_Qps = Eqp.lift_x(ZZ(self.xy()[0]), all=True)
for P_Qp in P_Qps:
if (F(P_Qp.xy()[1]) == self.xy()[1]):
break
Q_Qps = Eqp.lift_x(ZZ(Q.xy()[0]), all=True)
for Q_Qp in Q_Qps:
if (F(Q_Qp.xy()[1]) == Q.xy()[1]):
break
pP = (p * P_Qp)
pQ = (p * Q_Qp)
if (pP.is_zero() or pQ.is_zero()):
continue
else:
break
(x_P, y_P) = pP.xy()
(x_Q, y_Q) = pQ.xy()
phi_P = (- (x_P / y_P))
phi_Q = (- (x_Q / y_Q))
k = (phi_Q / phi_P)
return ZZ((k % p))
def has_finite_order(self):
return True
def order(self):
try:
return self._order
except AttributeError:
pass
E = self.curve()
if (getattr(E, '_order', None) is None):
E._order = Integer(E.pari_curve().ellcard())
self._order = Integer(E.pari_curve().ellorder(self, E._order))
return self._order
additive_order = order |
def adjust_learning_rate(optimizer, epoch, lr=0.01, step1=30, step2=60, step3=90):
if (epoch >= step3):
lr = (lr * 0.001)
elif (epoch >= step2):
lr = (lr * 0.01)
elif (epoch >= step1):
lr = (lr * 0.1)
else:
lr = lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
def h_maxima(image, h, footprint=None):
if (h > np.ptp(image)):
return np.zeros(image.shape, dtype=np.uint8)
if (np.issubdtype(type(h), np.floating) and np.issubdtype(image.dtype, np.integer)):
if ((h % 1) != 0):
warn('possible precision loss converting image to floating point. To silence this warning, ensure image and h have same data type.', stacklevel=2)
image = image.astype(float)
else:
h = image.dtype.type(h)
if (h == 0):
raise ValueError('h = 0 is ambiguous, use local_maxima() instead?')
if np.issubdtype(image.dtype, np.floating):
resolution = ((2 * np.finfo(image.dtype).resolution) * np.abs(image))
shifted_img = ((image - h) - resolution)
else:
shifted_img = _subtract_constant_clip(image, h)
rec_img = grayreconstruct.reconstruction(shifted_img, image, method='dilation', footprint=footprint)
residue_img = (image - rec_img)
return (residue_img >= h).astype(np.uint8) |
def test_exclusive_policy_negative_examples_1(digraph, features_1d, labels):
policy = ExclusivePolicy(digraph, features_1d, labels)
ground_truth = [False, False, True, True, True, True, True, True]
result = policy.negative_examples('1')
assert_array_equal(ground_truth, result) |
.parametrize('name, location, exists', (('X-Key', 'header', True), ('X-Key2', 'header', False), ('X-Key', 'cookie', False), ('X-Key', 'query', False), ('key', 'query', True), ('bla', 'body', False), ('body', 'body', True), ('unknown', 'unknown', False)))
def test_get_parameter(empty_open_api_3_schema, name, location, exists):
empty_open_api_3_schema['paths'] = {'/data/': {'get': {'parameters': [{'name': name, 'in': location, 'required': True, 'schema': {'type': 'string'}} for (name, location) in (('X-Key', 'header'), ('key', 'query'))], 'requestBody': {'required': True, 'content': {'text/plain': {'schema': {'type': 'string'}}, 'application/json': {'schema': {'type': 'array'}}}}, 'responses': {'200': {'description': 'OK'}}}}}
empty_open_api_3_schema['components'] = {'securitySchemes': {'ApiKeyAuth': {'type': 'apiKey', 'name': 'X-Key', 'in': 'header'}}}
empty_open_api_3_schema['security'] = [{'ApiKeyAuth': []}]
schema = schemathesis.from_dict(empty_open_api_3_schema, validate_schema=True)
parameter = schema['/data/']['GET'].get_parameter(name, location)
assert ((parameter is not None) is exists)
if exists:
assert (parameter.name == name)
assert (parameter.location == location) |
def clear_class_registry():
torch._C._jit_clear_class_registry()
torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
torch.jit._state._clear_class_state() |
class TileDescription():
def __init__(self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute, cluster_shape=[1, 1, 1]):
self.threadblock_shape = threadblock_shape
self.stages = stages
self.warp_count = warp_count
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
self.cluster_shape = cluster_shape
def procedural_name(self):
if (self.minimum_compute_capability >= 90):
return '{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{s}'.format(tbm=self.threadblock_shape[0], tbn=self.threadblock_shape[1], tbk=self.threadblock_shape[2], cm=self.cluster_shape[0], cn=self.cluster_shape[1], ck=self.cluster_shape[2], s=self.stages)
else:
return ('%dx%d_%dx%d' % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages)) |
def test__minimize_assertions():
config.configuration.test_case_output.assertion_generation = config.AssertionGenerator.CHECKED_MINIMIZING
result = MagicMock()
with mock.patch.object(result, 'accept') as result_accept_mock:
gen._minimize_assertions(result)
result_accept_mock.assert_called_once()
assert isinstance(result_accept_mock.call_args.args[0], pp.AssertionMinimization) |
def order_sim(im, s):
YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1)) - im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))
score = (- YmX.clamp(min=0).pow(2).sum(2).sqrt().t())
return score |
def main():
(x, y) = Reals('x y')
soft_constraints = [(x > 2), (x < 1), (x < 0), Or(((x + y) > 0), (y < 0)), Or((y >= 0), (x >= 0)), Or((y < 0), (x < 0)), Or((y > 0), (x < 0))]
hard_constraints = BoolVal(True)
solver = MSSSolver(hard_constraints, soft_constraints)
for lits in enumerate_sets(solver):
print(('%s' % lits)) |
class AudioPlayer():
def __init__(self, wav):
self.p = pyaudio.PyAudio()
self.pos = 0
self.stream = None
self._open(wav)
def callback(self, in_data, frame_count, time_info, status):
data = self.wf.readframes(frame_count)
self.pos += frame_count
return (data, pyaudio.paContinue)
def _open(self, wav):
self.wf = wave.open(wav, 'rb')
self.stream = self.p.open(format=self.p.get_format_from_width(self.wf.getsampwidth()), channels=self.wf.getnchannels(), rate=self.wf.getframerate(), output=True, stream_callback=self.callback)
self.pause()
def play(self):
self.stream.start_stream()
def pause(self):
self.stream.stop_stream()
def seek(self, seconds=0.0):
sec = (seconds * self.wf.getframerate())
self.pos = int(sec)
self.wf.setpos(int(sec))
def time(self):
return (float(self.pos) / self.wf.getframerate())
def playing(self):
return self.stream.is_active()
def close(self):
self.stream.close()
self.wf.close()
self.p.terminate() |
def _configure_logging(args):
kwargs = {'format': '%(asctime)s %(levelname)-8s %(message)s', 'datefmt': '%Y-%m-%d %H:%M', 'level': (logging.DEBUG if args.debug else logging.INFO)}
if (args.log_file is not None):
kwargs['filename'] = args.log_file
logging.basicConfig(**kwargs) |
def get_sentences_html(doc, language):
html_strings = []
nlp = spacy.blank('en')
sentences_to_visualize = []
for sentence in doc.sentences:
(words, lemmas, heads, deps, tags) = ([], [], [], [], [])
if is_right_to_left(language):
sent_len = len(sentence.words)
for word in reversed(sentence.words):
words.append(word.text)
lemmas.append(word.lemma)
deps.append(word.deprel)
tags.append(word.upos)
if (word.head == 0):
heads.append((sent_len - word.id))
else:
heads.append((sent_len - word.head))
else:
for word in sentence.words:
words.append(word.text)
lemmas.append(word.lemma)
deps.append(word.deprel)
tags.append(word.upos)
if (word.head == 0):
heads.append((word.id - 1))
else:
heads.append((word.head - 1))
document_result = Doc(nlp.vocab, words=words, lemmas=lemmas, heads=heads, deps=deps, pos=tags)
sentences_to_visualize.append(document_result)
for line in sentences_to_visualize:
html_strings.append(displacy.render(line, style='dep', options={'compact': True, 'word_spacing': 30, 'distance': 100, 'arrow_spacing': 20}, jupyter=False))
return html_strings |
class AllToAllOp(torch.autograd.Function):
def forward(ctx, x, output_split_sizes, input_split_sizes, group, async_op):
out = torch.empty(((sum(output_split_sizes),) + x.shape[1:]), device=x.device, dtype=x.dtype)
ctx.input_shape = x.shape
ctx.output_split_sizes = output_split_sizes
ctx.input_split_sizes = input_split_sizes
ctx.group = group
handle = torch.distributed.all_to_all_single(out, x, output_split_sizes=output_split_sizes, input_split_sizes=input_split_sizes, group=group, async_op=async_op)
return (out, handle)
def backward(ctx, grad, _):
if ctx.needs_input_grad[0]:
out = torch.empty(ctx.input_shape, device=grad.device, dtype=grad.dtype)
torch.distributed.all_to_all_single(out, grad, output_split_sizes=ctx.input_split_sizes, input_split_sizes=ctx.output_split_sizes, group=ctx.group)
return (out, None, None, None, None)
return (None, None, None, None, None) |
def getSubsetCore(num_samples, seed, embeddings_file, labels_file, balanced):
labels_raw = pd.read_csv(labels_file)
labels_raw = labels_raw.astype('int32')
labels_raw['btype'] = labels_raw['btype'].values.astype('int8')
labels_raw['rtype'] = labels_raw['rtype'].values.astype('int8')
btype_targets = [1, 2, 4]
rtype_targets = [3, 4, 5]
labels_raw.loc[((~ labels_raw['btype'].isin(btype_targets)), 'btype')] = (- 1)
labels_raw.loc[((~ labels_raw['rtype'].isin(rtype_targets)), 'rtype')] = (- 1)
for (new_class, old_class) in enumerate(btype_targets):
labels_raw.loc[((labels_raw['btype'] == old_class), 'btype')] = new_class
for (new_class, old_class) in enumerate(rtype_targets):
labels_raw.loc[((labels_raw['rtype'] == old_class), 'rtype')] = new_class
np.random.seed(seed)
if (balanced != None):
assert (balanced in ['btype', 'rtype']), balanced
n_samples_class = (num_samples // len(labels_raw[balanced].unique()))
tosample = []
np.random.seed(seed)
for clazz in labels_raw[balanced].unique():
if (clazz != (- 1)):
urn = np.where((labels_raw[balanced] == clazz))[0]
tosample.append(np.random.choice(urn, n_samples_class, replace=False))
tosample = np.concatenate(tosample)
else:
tosample = np.random.choice(len(labels_raw), num_samples, replace=False)
subset = []
with gzip.open(embeddings_file, 'rb') as f:
header = f.readline().decode('ascii').replace('\n', '')
for (i, line) in enumerate(f):
if (i in tosample):
a = line.decode('ascii').replace('\n', '').split(',')
a = [float(i) for i in a]
subset.append(a)
aheader = header.replace(' ', '').split(',')
aheader = aheader[:len(subset[0])]
data = pd.DataFrame(subset, columns=aheader)
create_index(data, remove_meta=True)
data = data.astype('float32')
create_index(labels_raw)
labels = labels_raw.loc[data.index]
return (data, labels) |
def complete_text(prompt, log_file, model, **kwargs):
if model.startswith('claude'):
completion = complete_text_claude(prompt, stop_sequences=[anthropic.HUMAN_PROMPT, 'Observation:'], log_file=log_file, model=model, **kwargs)
elif ('/' in model):
completion = complete_text_crfm(prompt, stop_sequences=['Observation:'], log_file=log_file, model=model, **kwargs)
else:
completion = complete_text_openai(prompt, stop_sequences=['Observation:'], log_file=log_file, model=model, **kwargs)
return completion |
def load_audio_input(elem: Dict[(str, Any)], model_cfg=CLAP_MODEL_CFG, enable_fusion=False, target_sr=48000) -> Dict[(str, Any)]:
f = elem['file']
(audio_waveform, _) = read_wav(f, target_sr=target_sr)
audio_waveform = int16_to_float32(float32_to_int16(audio_waveform))
audio_waveform = torch.from_numpy(audio_waveform).float()
audio_features_dict = {}
audio_features_dict = get_audio_features(audio_features_dict, audio_waveform, target_sr, data_truncating=('fusion' if enable_fusion else 'rand_trunc'), data_filling='repeatpad', audio_cfg=model_cfg, require_grad=audio_waveform.requires_grad)
elem['audio_features'] = [audio_features_dict]
return elem |
def get_full_output_dir(output_dir):
os.makedirs(output_dir, exist_ok=True)
return output_dir |
def check_output_types(self, func, ref_outputs, args, kwargs):
graph = getattr(func, 'last_graph', None)
types = [o.type() for o in graph.outputs()]
self.assertTrue((len(types) == 1))
t = types[0]
torch._C._jit_assert_is_instance(ref_outputs, t) |
def CalculateDistributionSecondaryStr(ProteinSequence):
result = CalculateDistribution(ProteinSequence, _SecondaryStr, '_SecondaryStr')
return result |
def _get_compute_cap(device):
caps_str = device.physical_device_desc
m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
major = m.group(1)
minor = m.group(2)
return (major, minor) |
class LayerNormLinearFn(torch.autograd.Function):
_fwd
def forward(ctx, x, norm_weight, norm_bias, linear_weight, linear_bias, residual=None, eps=1e-06, prenorm=False, residual_in_fp32=False, is_rms_norm=False):
x_shape_og = x.shape
x = x.reshape((- 1), x.shape[(- 1)])
if (x.stride((- 1)) != 1):
x = x.contiguous()
if (residual is not None):
assert (residual.shape == x_shape_og)
residual = residual.reshape((- 1), residual.shape[(- 1)])
if (residual.stride((- 1)) != 1):
residual = residual.contiguous()
norm_weight = norm_weight.contiguous()
if (norm_bias is not None):
norm_bias = norm_bias.contiguous()
residual_dtype = (residual.dtype if (residual is not None) else (torch.float32 if residual_in_fp32 else None))
(y, mean, rstd, residual_out) = _layer_norm_fwd(x, norm_weight, norm_bias, eps, residual, out_dtype=(None if (not torch.is_autocast_enabled()) else torch.get_autocast_gpu_dtype()), residual_dtype=residual_dtype, is_rms_norm=is_rms_norm)
y = y.reshape(x_shape_og)
dtype = (torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype)
linear_weight = linear_weight.to(dtype)
linear_bias = (linear_bias.to(dtype) if (linear_bias is not None) else None)
out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias)
ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd)
ctx.x_shape_og = x_shape_og
ctx.eps = eps
ctx.is_rms_norm = is_rms_norm
ctx.has_residual = (residual is not None)
ctx.prenorm = prenorm
ctx.x_dtype = x.dtype
ctx.linear_bias_is_none = (linear_bias is None)
return (out if (not prenorm) else (out, residual_out.reshape(x_shape_og)))
_bwd
def backward(ctx, dout, *args):
(x, norm_weight, norm_bias, linear_weight, mean, rstd) = ctx.saved_tensors
dout = dout.reshape((- 1), dout.shape[(- 1)])
dy = F.linear(dout, linear_weight.t())
dlinear_bias = (None if ctx.linear_bias_is_none else dout.sum(0))
if (dy.stride((- 1)) != 1):
dy = dy.contiguous()
assert (dy.shape == x.shape)
if ctx.prenorm:
dresidual = args[0]
dresidual = dresidual.reshape((- 1), dresidual.shape[(- 1)])
if (dresidual.stride((- 1)) != 1):
dresidual = dresidual.contiguous()
assert (dresidual.shape == x.shape)
else:
dresidual = None
(dx, dnorm_weight, dnorm_bias, dresidual_in, y) = _layer_norm_bwd(dy, x, norm_weight, norm_bias, ctx.eps, mean, rstd, dresidual, ctx.has_residual, ctx.is_rms_norm, x_dtype=ctx.x_dtype, recompute_output=True)
dlinear_weight = torch.einsum('bo,bi->oi', dout, y)
return (dx.reshape(ctx.x_shape_og), dnorm_weight, dnorm_bias, dlinear_weight, dlinear_bias, (dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None), None, None, None, None) |
class JasperEncoder(nn.Module):
def __init__(self, config: JasperEncoderConfig, device: torch.device) -> None:
super(JasperEncoder, self).__init__()
self.config = config
self.device = device
self.layers = nn.ModuleList()
self.layers.append(JasperSubBlock(in_channels=self.config.preprocess_block['in_channels'], out_channels=self.config.preprocess_block['out_channels'], kernel_size=self.config.preprocess_block['kernel_size'], stride=self.config.preprocess_block['stride'], dilation=self.config.preprocess_block['dilation'], dropout_p=self.config.preprocess_block['dropout_p'], activation='relu', bias=False).to(self.device))
self.layers.extend([JasperBlock(num_sub_blocks=self.config.num_sub_blocks, in_channels=self.config.block['in_channels'][i], out_channels=self.config.block['out_channels'][i], kernel_size=self.config.block['kernel_size'][i], dilation=self.config.block['dilation'][i], dropout_p=self.config.block['dropout_p'][i], activation='relu', bias=False).to(self.device) for i in range(config.num_blocks)])
self.residual_connections = self._create_jasper_dense_residual_connections(self.config.num_blocks)
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[(Tensor, Tensor)]:
(prev_outputs, prev_output_lengths) = (list(), list())
residual = None
for (i, layer) in enumerate(self.layers[:(- 1)]):
(inputs, input_lengths) = layer(inputs, input_lengths, residual)
prev_outputs.append(inputs)
prev_output_lengths.append(input_lengths)
residual = self._get_jasper_dencse_residual(prev_outputs, prev_output_lengths, i)
(output, output_lengths) = self.layers[(- 1)](inputs, input_lengths, residual)
del prev_outputs, prev_output_lengths, residual, inputs, input_lengths
return (output, output_lengths)
def _get_jasper_dencse_residual(self, prev_outputs: list, prev_output_lengths: list, index: int):
residual = None
for item in zip(prev_outputs, prev_output_lengths, self.residual_connections[index]):
(prev_output, prev_output_length, residual_modules) = item
(conv1x1, batch_norm) = residual_modules
if (residual is None):
residual = conv1x1(prev_output, prev_output_length)[0]
else:
residual += conv1x1(prev_output, prev_output_length)[0]
residual = batch_norm(residual)
return residual
def _create_jasper_dense_residual_connections(self, num_blocks: int) -> nn.ModuleList:
residual_connections = nn.ModuleList()
for i in range(num_blocks):
residual_modules = nn.ModuleList()
for j in range((i + 1)):
residual_modules.append(nn.ModuleList([MaskConv1d(self.config.block['in_channels'][j], self.config.block['out_channels'][i], kernel_size=1), nn.BatchNorm1d(self.config.block['out_channels'][i], eps=0.001, momentum=0.1)]))
residual_connections.append(residual_modules)
return residual_connections |
(data=st.data())
(deadline=None, suppress_health_check=SUPPRESSED_HEALTH_CHECKS, max_examples=MAX_EXAMPLES)
def test_no_unsatisfiable_schemas(data):
schema = {'type': 'object', 'required': ['foo']}
mutated_schema = data.draw(mutated(schema, {}, location='body', media_type='application/json'))
assert (canonicalish(mutated_schema) != FALSEY) |
class FindNgrams():
def __init__(self, min_count=0, min_pmi=0, language='en'):
self.min_count = min_count
self.min_pmi = min_pmi
self.words = defaultdict(int)
(self.ngrams, self.pairs) = (defaultdict(int), defaultdict(int))
self.total = 0.0
self.language = language
def text_filter(self, sentence):
cleaned_text = []
index = 0
for (i, w) in enumerate(sentence):
if re.match(u'[^\u0600---0-9a-zA-Z]+', w):
if (i > index):
cleaned_text.append([w.lower() for w in sentence[index:i]])
index = (1 + i)
if (index < len(sentence)):
cleaned_text.append([w.lower() for w in sentence[index:]])
return cleaned_text
def count_ngram(self, texts, n):
self.ngrams = defaultdict(int)
for sentence in texts:
sub_sentence = sentence.split()
for i in range(n):
n_len = (i + 1)
for j in range((len(sub_sentence) - i)):
ngram = tuple([w for w in sub_sentence[j:(j + n_len)]])
self.ngrams[ngram] += 1
self.ngrams = {i: j for (i, j) in self.ngrams.items() if (j > self.min_count)}
def find_ngrams_pmi(self, texts, n, freq_threshold):
for sentence in texts:
sub_sentence = sentence.split()
self.words[sub_sentence[0]] += 1
for i in range((len(sub_sentence) - 1)):
self.words[sub_sentence[(i + 1)]] += 1
self.pairs[(sub_sentence[i], sub_sentence[(i + 1)])] += 1
self.total += 1
self.words = {i: j for (i, j) in self.words.items() if (j > self.min_count)}
self.pairs = {i: j for (i, j) in self.pairs.items() if (j > self.min_count)}
min_mi = math.inf
max_mi = (- math.inf)
self.strong_segments = set()
for (i, j) in self.pairs.items():
if ((i[0] in self.words) and (i[1] in self.words)):
mi = math.log(((self.total * j) / (self.words[i[0]] * self.words[i[1]])))
if (mi > max_mi):
max_mi = mi
if (mi < min_mi):
min_mi = mi
if (mi >= self.min_pmi):
self.strong_segments.add(i)
self.ngrams = defaultdict(int)
for sentence in texts:
sub_sentence = sentence.split()
s = [sub_sentence[0]]
for i in range((len(sub_sentence) - 1)):
if ((sub_sentence[i], sub_sentence[(i + 1)]) in self.strong_segments):
s.append(sub_sentence[(i + 1)])
else:
self.ngrams[tuple(s)] += 1
s = [sub_sentence[(i + 1)]]
self.ngrams = {i: j for (i, j) in self.ngrams.items() if ((j > self.min_count) and (len(i) <= n))}
self.renew_ngram_by_freq(texts, freq_threshold, n)
def renew_ngram_by_freq(self, all_sentences, min_feq, ngram_len=10):
new_ngram2count = {}
new_all_sentences = []
for sentence in all_sentences:
sentence = sentence.split()
sen = sentence
for i in range(len(sen)):
for n in range(1, (ngram_len + 1)):
if ((i + n) > len(sentence)):
break
n_gram = tuple(sentence[i:(i + n)])
if (n_gram not in self.ngrams):
continue
if (n_gram not in new_ngram2count):
new_ngram2count[n_gram] = 1
else:
new_ngram2count[n_gram] += 1
self.ngrams = {gram: c for (gram, c) in new_ngram2count.items() if (c > min_feq)} |
(base=10)
def plot_loglog(funcs, *args, **kwds):
return plot(funcs, *args, scale='loglog', **kwds) |
def test_z():
circuit = Circuit(1)
circuit.z(0)
expect = array([[1, 0], [0, (- 1)]])
assert array_equal(expect, circuit.get_unitary_matrix()) |
class OpenAIGPTForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_without_pscore_item_position():
n_unique_action = 80
len_list = 3
dim_context = 2
reward_type = 'binary'
random_state = 12345
n_rounds = 100
dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, random_state=random_state, behavior_policy_function=linear_behavior_policy_logit)
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds, return_pscore_item_position=False)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
assert (bandit_feedback['pscore_item_position'] is None), f"pscore marginal must be None, but {bandit_feedback['pscore_item_position']}"
dataset2 = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, random_state=random_state, behavior_policy_function=linear_behavior_policy_logit)
bandit_feedback2 = dataset2.obtain_batch_bandit_feedback(n_rounds=n_rounds, return_pscore_item_position=False)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback2)
assert np.allclose(bandit_feedback['expected_reward_factual'], bandit_feedback2['expected_reward_factual'])
if (reward_type == 'binary'):
assert (set(np.unique(bandit_feedback['reward'])) == set([0, 1])) |
class ResNeSt(nn.Module):
def __init__(self, last_stride, block, layers, radix=1, groups=1, bottleneck_width=64, dilated=False, dilation=1, deep_stem=False, stem_width=64, avg_down=False, rectified_conv=False, rectify_avg=False, avd=False, avd_first=False, final_drop=0.0, dropblock_prob=0, last_gamma=False, norm_layer='BN'):
if (last_stride == 1):
dilation = 2
self.cardinality = groups
self.bottleneck_width = bottleneck_width
self.inplanes = ((stem_width * 2) if deep_stem else 64)
self.avg_down = avg_down
self.last_gamma = last_gamma
self.radix = radix
self.avd = avd
self.avd_first = avd_first
super().__init__()
self.rectified_conv = rectified_conv
self.rectify_avg = rectify_avg
if rectified_conv:
from rfconv import RFConv2d
conv_layer = RFConv2d
else:
conv_layer = nn.Conv2d
conv_kwargs = ({'average_mode': rectify_avg} if rectified_conv else {})
if deep_stem:
self.conv1 = nn.Sequential(conv_layer(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False, **conv_kwargs), get_norm(norm_layer, stem_width), nn.ReLU(inplace=True), conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs), get_norm(norm_layer, stem_width), nn.ReLU(inplace=True), conv_layer(stem_width, (stem_width * 2), kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs))
else:
self.conv1 = conv_layer(3, 64, kernel_size=7, stride=2, padding=3, bias=False, **conv_kwargs)
self.bn1 = get_norm(norm_layer, self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer, is_first=False)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if (dilated or (dilation == 4)):
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer, dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer, dropblock_prob=dropblock_prob)
elif (dilation == 2):
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilation=1, norm_layer=norm_layer, dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2, norm_layer=norm_layer, dropblock_prob=dropblock_prob)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer, dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer, dropblock_prob=dropblock_prob)
self.drop = (nn.Dropout(final_drop) if (final_drop > 0.0) else None)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None, dropblock_prob=0.0, is_first=True):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
down_layers = []
if self.avg_down:
if (dilation == 1):
down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False))
down_layers.append(get_norm(norm_layer, (planes * block.expansion)))
downsample = nn.Sequential(*down_layers)
layers = []
if ((dilation == 1) or (dilation == 2)):
layers.append(block(self.inplanes, planes, stride, downsample=downsample, radix=self.radix, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, avd_first=self.avd_first, dilation=1, is_first=is_first, rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg, norm_layer=norm_layer, dropblock_prob=dropblock_prob, last_gamma=self.last_gamma))
elif (dilation == 4):
layers.append(block(self.inplanes, planes, stride, downsample=downsample, radix=self.radix, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, avd_first=self.avd_first, dilation=2, is_first=is_first, rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg, norm_layer=norm_layer, dropblock_prob=dropblock_prob, last_gamma=self.last_gamma))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, radix=self.radix, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, avd_first=self.avd_first, dilation=dilation, rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg, norm_layer=norm_layer, dropblock_prob=dropblock_prob, last_gamma=self.last_gamma))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x |
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[(- 1)]
assert args.out.endswith('.sh'), f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert (args.out or args.run), 'Please specify at least one operation (save/run/ the script) with the argument "--out" or "--run"'
partition = args.partition
root_name = './tools'
train_script_name = osp.join(root_name, 'slurm_train.sh')
stdout_cfg = '>/dev/null'
max_keep_ckpts = args.max_keep_ckpts
commands = []
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for (i, cfg) in enumerate(model_cfgs):
cfg = cfg.strip()
if (len(cfg) == 0):
continue
echo_info = f"echo '{cfg}' &"
commands.append(echo_info)
commands.append('\n')
(fname, _) = osp.splitext(osp.basename(cfg))
out_fname = osp.join(root_name, 'work_dir', fname)
if (cfg.find('16x') >= 0):
command_info = f'GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 {train_script_name} '
elif ((cfg.find('gn-head_4x4_1x_coco.py') >= 0) or (cfg.find('gn-head_4x4_2x_coco.py') >= 0)):
command_info = f'GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 {train_script_name} '
else:
command_info = f'GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 {train_script_name} '
command_info += f'{partition} '
command_info += f'{fname} '
command_info += f'{cfg} '
command_info += f'{out_fname} '
if max_keep_ckpts:
command_info += (f'--cfg-options checkpoint_config.max_keep_ckpts={max_keep_ckpts}' + ' ')
command_info += f'{stdout_cfg} &'
commands.append(command_info)
if (i < len(model_cfgs)):
commands.append('\n')
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str) |
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def synchronize(self):
if (not utils.is_dist_avail_and_initialized()):
return
t = torch.tensor([self.sum, self.count], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.sum = int(t[0])
self.count = t[1]
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__) |
def synset2idx(path_to_yaml='data/index_synset.yaml'):
with open(path_to_yaml) as f:
di2s = yaml.load(f)
return dict(((v, k) for (k, v) in di2s.items())) |
class MagicPoint(BaseModel):
input_spec = {'image': {'shape': [None, None, None, 1], 'type': tf.float32}}
required_config_keys = []
default_config = {'data_format': 'channels_first', 'kernel_reg': 0.0, 'grid_size': 8, 'detection_threshold': 0.4, 'homography_adaptation': {'num': 0}, 'nms': 0, 'top_k': 0}
def _model(self, inputs, mode, **config):
config['training'] = (mode == Mode.TRAIN)
image = inputs['image']
def net(image):
if (config['data_format'] == 'channels_first'):
image = tf.transpose(image, [0, 3, 1, 2])
features = vgg_backbone(image, **config)
outputs = detector_head(features, **config)
return outputs
if ((mode == Mode.PRED) and config['homography_adaptation']['num']):
outputs = homography_adaptation(image, net, config['homography_adaptation'])
else:
outputs = net(image)
prob = outputs['prob']
if config['nms']:
prob = tf.map_fn((lambda p: box_nms(p, config['nms'], min_prob=config['detection_threshold'], keep_top_k=config['top_k'])), prob)
outputs['prob_nms'] = prob
pred = tf.to_int32(tf.greater_equal(prob, config['detection_threshold']))
outputs['pred'] = pred
return outputs
def _loss(self, outputs, inputs, **config):
if (config['data_format'] == 'channels_first'):
outputs['logits'] = tf.transpose(outputs['logits'], [0, 2, 3, 1])
return detector_loss(inputs['keypoint_map'], outputs['logits'], valid_mask=inputs['valid_mask'], **config)
def _metrics(self, outputs, inputs, **config):
pred = (inputs['valid_mask'] * outputs['pred'])
labels = inputs['keypoint_map']
precision = (tf.reduce_sum((pred * labels)) / tf.reduce_sum(pred))
recall = (tf.reduce_sum((pred * labels)) / tf.reduce_sum(labels))
return {'precision': precision, 'recall': recall} |
def print_mem(info=None):
if info:
print(info, end=' ')
mem_allocated = round((torch.cuda.memory_allocated() / 1048576))
mem_cached = round((torch.cuda.memory_cached() / 1048576))
print(f'Mem allocated: {mem_allocated}MB, Mem cached: {mem_cached}MB') |
class ValueFnTests(tf.test.TestCase):
def test_label_attention_fn(self):
with self.test_session():
mode = tf.estimator.ModeKeys.TRAIN
label_embeddings = tf.constant([[0.1, 0.1, 0.1, 0.1], [0.3, 0.3, 0.3, 0.3], [0.5, 0.5, 0.5, 0.5], [10, 10, 10, 10], [1.0, 1.0, 1.0, 1.0]])
label_scores = tf.constant([[[(- 10.0), 10.0, (- 10.0), (- 10.0), (- 10.0)], [(- 10.0), (- 10.0), (- 10.0), 10.0, (- 10.0)], [10.0, 10.0, 10.0, (- 10.0), (- 10.0)]], [[(- 10.0), (- 10.0), 10.0, (- 10.0), (- 10.0)], [10.0, (- 10.0), (- 10.0), (- 10.0), (- 10.0)], [(- 10.0), 10.0, (- 10.0), (- 10.0), (- 10.0)]]])
expected = tf.constant([[[0.3, 0.3, 0.3, 0.3], [10.0, 10.0, 10.0, 10.0], [0.3, 0.3, 0.3, 0.3]], [[0.5, 0.5, 0.5, 0.5], [0.1, 0.1, 0.1, 0.1], [0.3, 0.3, 0.3, 0.3]]])
result = value_fns.label_attention(mode, label_scores, label_scores, label_embeddings)
self.assertAllCloseAccordingToType(result.eval(), expected) |
def conv_block(input_mat, num_filters, kernel_size, batch_norm):
X = Conv2D(num_filters, kernel_size=(kernel_size, kernel_size), strides=(1, 1), padding='same')(input_mat)
if batch_norm:
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Conv2D(num_filters, kernel_size=(kernel_size, kernel_size), strides=(1, 1), padding='same')(X)
if batch_norm:
X = BatchNormalization()(X)
X = Activation('relu')(X)
return X |
class TestLinalg(TestCase):
exact_dtype = True
((not TEST_NUMPY), 'NumPy not found')
({torch.bfloat16: 0.1})
(*torch.testing.get_all_dtypes())
def test_outer(self, device, dtype):
def run_test_case(a, b):
if (dtype == torch.bfloat16):
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
expected = np.outer(a_np, b_np)
self.assertEqual(torch.outer(a, b), expected)
self.assertEqual(torch.Tensor.outer(a, b), expected)
self.assertEqual(torch.ger(a, b), expected)
self.assertEqual(torch.Tensor.ger(a, b), expected)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.outer(a, b, out=out)
self.assertEqual(out, expected)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.ger(a, b, out=out)
self.assertEqual(out, expected)
a = torch.randn(50).to(device=device, dtype=dtype)
b = torch.randn(50).to(device=device, dtype=dtype)
run_test_case(a, b)
zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)
run_test_case(zero_strided, b)
run_test_case(a, zero_strided)
((not TEST_NUMPY), 'NumPy not found')
({torch.bfloat16: 0.1})
(*torch.testing.get_all_dtypes())
def test_addr(self, device, dtype):
def run_test_case(m, a, b, beta=1, alpha=1):
if (dtype == torch.bfloat16):
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
m_np = m.to(torch.double).cpu().numpy()
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
m_np = m.cpu().numpy()
if (beta == 0):
expected = (alpha * np.outer(a_np, b_np))
else:
expected = ((beta * m_np) + (alpha * np.outer(a_np, b_np)))
self.assertEqual(torch.addr(m, a, b, beta=beta, alpha=alpha), expected)
self.assertEqual(torch.Tensor.addr(m, a, b, beta=beta, alpha=alpha), expected)
result_dtype = torch.addr(m, a, b, beta=beta, alpha=alpha).dtype
out = torch.empty_like(m, dtype=result_dtype)
torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
self.assertEqual(out, expected)
a = torch.randn(50).to(device=device, dtype=dtype)
b = torch.randn(50).to(device=device, dtype=dtype)
m = torch.randn(50, 50).to(device=device, dtype=dtype)
run_test_case(m, a, b, beta=0.0, alpha=2)
run_test_case(m, a, b, beta=0.5, alpha=2)
m_transpose = torch.transpose(m, 0, 1)
run_test_case(m_transpose, a, b, beta=0.5, alpha=2)
zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)
run_test_case(m, zero_strided, b, beta=0.5, alpha=2)
m_scalar = torch.tensor(1, device=device, dtype=dtype)
run_test_case(m_scalar, a, b)
(*itertools.product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_outer_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
(*itertools.product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_addr_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
m = torch.randn(5, 5).to(device=device, dtype=torch.result_type(a, b))
for op in (torch.addr, torch.Tensor.addr):
desired_dtype = torch.result_type(m, 1)
result = op(m, a, b)
self.assertEqual(result.dtype, desired_dtype)
desired_dtype = torch.result_type(m, 2.0)
result = op(m, a, b, beta=0, alpha=2.0)
self.assertEqual(result.dtype, desired_dtype)
def test_outer_ger_addr_legacy_tests(self, device):
for size in ((0, 0), (0, 5), (5, 0)):
a = torch.rand(size[0], device=device)
b = torch.rand(size[1], device=device)
self.assertEqual(torch.outer(a, b).shape, size)
self.assertEqual(torch.ger(a, b).shape, size)
m = torch.empty(size, device=device)
self.assertEqual(torch.addr(m, a, b).shape, size)
m = torch.randn(5, 6, device=device)
a = torch.randn(5, device=device)
b = torch.tensor(6, device=device)
self.assertRaises(RuntimeError, (lambda : torch.outer(a, b)))
self.assertRaises(RuntimeError, (lambda : torch.outer(b, a)))
self.assertRaises(RuntimeError, (lambda : torch.ger(a, b)))
self.assertRaises(RuntimeError, (lambda : torch.ger(b, a)))
self.assertRaises(RuntimeError, (lambda : torch.addr(m, a, b)))
self.assertRaises(RuntimeError, (lambda : torch.addr(m, b, a)))
((not TEST_NUMPY), 'NumPy not found')
(torch.double)
def test_det(self, device, dtype):
tensors = (torch.randn((2, 2), device=device, dtype=dtype), torch.randn((129, 129), device=device, dtype=dtype), torch.randn((3, 52, 52), device=device, dtype=dtype), torch.randn((4, 2, 26, 26), device=device, dtype=dtype))
ops = (torch.det, torch.Tensor.det, torch.linalg.det)
for t in tensors:
expected = np.linalg.det(t.cpu().numpy())
for op in ops:
actual = op(t)
self.assertEqual(actual, expected)
t = torch.randn(1, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(t)
def test_norm_dtype(self, device):
def run_test_case(input_size, ord, keepdim, from_dtype, to_dtype, compare_dtype):
msg = f'input_size={input_size}, ord={ord}, keepdim={keepdim}, from_dtype={from_dtype}, to_dtype={to_dtype}'
input = torch.randn(*input_size, dtype=from_dtype, device=device)
result = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=from_dtype)
self.assertEqual(result.dtype, from_dtype, msg=msg)
result_converted = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype)
self.assertEqual(result_converted.dtype, to_dtype, msg=msg)
self.assertEqual(result.to(compare_dtype), result_converted.to(compare_dtype), msg=msg)
result_out_converted = torch.empty_like(result_converted)
torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype, out=result_out_converted)
self.assertEqual(result_out_converted.dtype, to_dtype, msg=msg)
self.assertEqual(result_converted, result_out_converted, msg=msg)
ord_vector = [0, 1, (- 1), 2, (- 2), 3, (- 3), 4.5, (- 4.5), inf, (- inf), None]
ord_matrix = [1, (- 1), 2, (- 2), inf, (- inf), None]
S = 10
test_cases = [((S,), ord_vector), ((S, S), ord_matrix)]
for keepdim in [True, False]:
for (input_size, ord_settings) in test_cases:
for ord in ord_settings:
run_test_case(input_size, ord, keepdim, torch.float, torch.double, torch.float)
run_test_case(input_size, ord, keepdim, torch.double, torch.double, torch.float)
dtype_pairs = [(torch.float, torch.double), (torch.double, torch.float)]
for keepdim in [True, False]:
for (input_size, ord_settings) in test_cases:
for ord in ord_settings:
for (dtype, out_dtype) in dtype_pairs:
input = torch.rand(*input_size)
result = torch.Tensor().to(out_dtype)
with self.assertRaisesRegex(RuntimeError, 'provided dtype must match dtype of result'):
torch.linalg.norm(input, ord=ord, keepdim=keepdim, dtype=dtype, out=result)
for ord in ['nuc', 'fro']:
input = torch.randn(10, 10, device=device)
with self.assertRaisesRegex(RuntimeError, f"ord='{ord}' does not yet support the dtype argument"):
torch.linalg.norm(input, ord, dtype=torch.float)
((not TEST_NUMPY), 'NumPy not found')
(torch.float, torch.double)
def test_norm_vector(self, device, dtype):
def run_test_case(input, p, dim, keepdim):
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
torch.linalg.norm(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
ord_vector = [0, 1, (- 1), 2, (- 2), 3, (- 3), 4.5, (- 4.5), inf, (- inf), None]
S = 10
test_cases = [((S,), ord_vector, None), ((S,), ord_vector, 0), ((S, S, S), ord_vector, 0), ((S, S, S), ord_vector, 1), ((S, S, S), ord_vector, 2), ((S, S, S), ord_vector, (- 1)), ((S, S, S), ord_vector, (- 2))]
L = 1000000
if (dtype == torch.double):
test_cases.append(((L,), ord_vector, None))
for keepdim in [True, False]:
for (input_size, ord_settings, dim) in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_test_case(input, ord, dim, keepdim)
((not TEST_NUMPY), 'NumPy not found')
(torch.float, torch.double)
def test_norm_matrix(self, device, dtype):
def run_test_case(input, p, dim, keepdim):
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
torch.linalg.norm(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
ord_matrix = [1, (- 1), 2, (- 2), inf, (- inf), 'nuc', 'fro', None]
S = 10
test_cases = [((S, S), ord_matrix, None), ((S, S), ord_matrix, (0, 1)), ((S, S), ord_matrix, (1, 0)), ((S, S, S, S), ord_matrix, (2, 0)), ((S, S, S, S), ord_matrix, ((- 1), (- 2))), ((S, S, S, S), ord_matrix, ((- 1), (- 3))), ((S, S, S, S), ord_matrix, ((- 3), 2))]
L = 1000
if (dtype == torch.double):
test_cases.append(((L, L), ord_matrix, None))
for keepdim in [True, False]:
for (input_size, ord_settings, dim) in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_test_case(input, ord, dim, keepdim)
(torch.float, torch.double)
def test_autograd_and_jit(self, device, dtype):
torch.manual_seed(0)
S = 10
NO_ARGS = None
test_cases = [('norm', (S,), (), 'default_1d'), ('norm', (S, S), (), 'default_2d'), ('norm', (S, S, S), (), 'default_3d'), ('norm', (S,), (inf,), 'vector_inf'), ('norm', (S,), (3.5,), 'vector_3_5'), ('norm', (S,), (2,), 'vector_2'), ('norm', (S,), (1,), 'vector_1'), ('norm', (S,), (0,), 'vector_0'), ('norm', (S,), ((- inf),), 'vector_neg_inf'), ('norm', (S,), ((- 3.5),), 'vector_neg_3_5'), ('norm', (S,), (2,), 'vector_neg_2'), ('norm', (S,), (1,), 'vector_neg_1'), ('norm', (S, S), (inf,), 'matrix_inf'), ('norm', (S, S), (2,), 'matrix_2', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]), ('norm', (S, S), (1,), 'matrix_1'), ('norm', (S, S), ((- inf),), 'matrix_neg_inf'), ('norm', (S, S), ((- 2),), 'matrix_neg_2', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]), ('norm', (S, S), ((- 1),), 'matrix_neg_1'), ('norm', (S, S), ('fro',), 'fro'), ('norm', (S, S), ('fro', [0, 1]), 'fro_dim'), ('norm', (S, S), ('nuc',), 'nuc', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]), ('norm', (S, S), ('nuc', [0, 1]), 'nuc_dim', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma])]
for test_case in test_cases:
func_name = test_case[0]
func = getattr(torch.linalg, func_name)
input_size = test_case[1]
args = list(test_case[2])
test_case_name = (test_case[3] if (len(test_case) >= 4) else None)
mapping_funcs = (list(test_case[6]) if (len(test_case) >= 7) else None)
if (mapping_funcs is not None):
def decorated_func(self, device, dtype):
pass
for mapping_func in mapping_funcs:
decorated_func = mapping_func(decorated_func)
try:
decorated_func(self, device, dtype)
except unittest.SkipTest:
continue
msg = f'function name: {func_name}, case name: {test_case_name}'
input = torch.randn(*input_size, dtype=dtype, device=device)
input_script = input.clone().detach()
(script_method, tensors) = gen_script_fn_and_args('linalg.norm', 'functional', input_script, *args)
self.assertEqual(func(input, *args), script_method(input_script), msg=msg)
if (dtype == torch.double):
input = torch.randn(*input_size, dtype=dtype, device=device, requires_grad=True)
def run_func(input):
return func(input, *args)
self.assertTrue(gradcheck(run_func, input), msg=msg)
((not TEST_NUMPY), 'NumPy not found')
(torch.float, torch.double)
def test_norm_errors(self, device, dtype):
def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex):
test_case_info = f'test case input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
with self.assertRaisesRegex(error_type, error_regex, msg=test_case_info):
torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
msg = f'numpy does not raise error but pytorch does, for case "{test_case_info}"'
with self.assertRaises(Exception, msg=test_case_info):
np.linalg.norm(input_numpy, ord, dim, keepdim)
S = 10
error_test_cases = [((S,), ['fro'], None, RuntimeError, 'order "fro" can only be used if either len\\(dim\\) == 2'), ((S,), ['nuc'], None, RuntimeError, 'order "nuc" can only be used if either len\\(dim\\) == 2'), ((S, S), [3.5], None, RuntimeError, 'Order 3.5 not supported for matrix norm'), ((S, S), [0], None, RuntimeError, 'Order 0 not supported for matrix norm'), ((S, S), ['nuc'], 0, RuntimeError, 'order "nuc" can only be used if either len\\(dim\\) == 2'), ((S, S), ['fro'], 0, RuntimeError, 'order "fro" can only be used if either len\\(dim\\) == 2'), ((S, S), ['nuc'], (0, 0), RuntimeError, 'duplicate or invalid dimensions'), ((S, S), ['fro', 0], (0, 0), RuntimeError, 'Expected dims to be different'), ((S, S), ['fro', 'nuc', 0], (0, 4), IndexError, 'Dimension out of range'), ((S,), [0], (4,), IndexError, 'Dimension out of range'), ((S,), [None], (0, 0), RuntimeError, 'Expected dims to be different, got this instead'), ((S, S, S), [1], (0, 1, 2), RuntimeError, "'dim' must specify 1 or 2 dimensions"), ((S, S, S), [1], None, RuntimeError, "'dim' must specify 1 or 2 dimensions"), ((S, S), ['garbage'], (0, 1), RuntimeError, 'Invalid norm order: garbage')]
for keepdim in [True, False]:
for (input_size, ord_settings, dim, error_type, error_regex) in error_test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_error_test_case(input, ord, dim, keepdim, error_type, error_regex)
((not TEST_NUMPY), 'Numpy not found')
(torch.cfloat, torch.cdouble)
def test_norm_complex(self, device, dtype):
def gen_error_message(input_size, ord, keepdim, dim=None):
return ('complex norm failed for input size %s, ord=%s, keepdim=%s, dim=%s' % (input_size, ord, keepdim, dim))
if (self.device_type == 'cpu'):
supported_vector_ords = [0, 1, 3, inf, (- 1), (- 2), (- 3), (- inf)]
supported_matrix_ords = ['nuc', 1, 2, inf, (- 1), (- 2), (- inf)]
unsupported_vector_ords = [(2, 'norm with p=2 not supported for complex tensors'), (None, 'norm with p=2 not supported for complex tensors')]
unsupported_matrix_ords = [('fro', 'frobenius norm not supported for complex tensors'), (None, 'norm with p=2 not supported for complex tensors')]
elif (self.device_type == 'cuda'):
supported_vector_ords = [inf, (- inf)]
supported_matrix_ords = [1, inf, (- 1), (- inf)]
unsupported_vector_ords = [(0, 'norm_cuda" not implemented for \\\'Complex'), (1, 'norm_cuda" not implemented for \\\'Complex'), (2, 'norm with p=2 not supported for complex tensors'), ((- 1), 'norm_cuda" not implemented for \\\'Complex'), ((- 2), 'norm_cuda" not implemented for \\\'Complex'), (None, 'norm with p=2 not supported for complex tensors')]
unsupported_matrix_ords = [(None, 'norm with p=2 not supported for complex tensors'), ('fro', 'frobenius norm not supported for complex tensors'), (2, '"svd_cuda" not implemented for \\\'Complex'), ((- 2), '"svd_cuda" not implemented for \\\'Complex'), ('nuc', '"svd_cuda" not implemented for \\\'Complex')]
for keepdim in [False, True]:
x = torch.randn(25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in supported_vector_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
x = torch.randn(25, 25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in supported_matrix_ords:
if ((ord == 'nuc') and (dtype == torch.cdouble)):
continue
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
x = torch.randn(25, device=device, dtype=dtype)
for (ord, error_msg) in unsupported_vector_ords:
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.norm(x, ord)
x = torch.randn(25, 25, device=device, dtype=dtype)
for (ord, error_msg) in unsupported_matrix_ords:
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.norm(x, ord)
(IS_WINDOWS, 'Skipped on Windows!')
(IS_MACOS, 'Skipped on MacOS!')
((not TEST_NUMPY), 'Numpy not found')
def test_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, (- 1), (- 2), (- 3), (- inf)]
matrix_ords = ['fro', 'nuc', 1, 2, inf, (- 1), (- 2), (- inf)]
vectors = []
matrices = []
for pair in itertools.product([inf, (- inf), 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
matrices.append([[pair[0], pair[1]]])
matrices.append([[pair[0]], [pair[1]]])
for vector in vectors:
x = torch.tensor(vector).to(device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
def is_broken_matrix_norm_case(ord, x):
if (self.device_type == 'cuda'):
if (x.size() == torch.Size([1, 2])):
if ((ord in ['nuc', 2, (- 2)]) and isnan(x[0][0]) and (x[0][1] == 1)):
return True
return False
for matrix in matrices:
x = torch.tensor(matrix).to(device)
x_n = x.cpu().numpy()
for ord in matrix_ords:
msg = f'ord={ord}, matrix={matrix}'
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
if is_broken_matrix_norm_case(ord, x):
continue
else:
self.assertEqual(result, result_n, msg=msg)
(TEST_WITH_ASAN, 'Skipped on ASAN since it checks for undefined behavior.')
(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_vector_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
if should_error:
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
else:
if ((dtype in [torch.cfloat, torch.cdouble]) and (ord in [2, None])):
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
return
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_vector = [0, 0.5, 1, 2, 3, inf, (- 0.5), (- 1), (- 2), (- 3), (- inf), None]
S = 10
test_cases = [((0,), [inf, (- inf)], None), ((0, S), [inf, (- inf)], 0), ((0, S), [], 1), ((S, 0), [], 0), ((S, 0), [inf, (- inf)], 1)]
for keepdim in [True, False]:
for (input_size, error_ords, dim) in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_vector:
run_test_case(input, ord, dim, keepdim, (ord in error_ords))
((not TEST_NUMPY), 'Numpy not found')
(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_matrix_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
if ((dtype in [torch.cfloat, torch.cdouble]) and (ord in ['fro', None])):
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
return
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
if should_error:
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
else:
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = ['fro', 'nuc', 1, 2, inf, (- 1), (- 2), (- inf), None]
S = 10
test_cases = [((0, 0), [1, 2, inf, (- 1), (- 2), (- inf)], None), ((0, S), [2, inf, (- 2), (- inf)], None), ((S, 0), [1, 2, (- 1), (- 2)], None), ((S, S, 0), [], (0, 1)), ((1, S, 0), [], (0, 1)), ((0, 0, S), [1, 2, inf, (- 1), (- 2), (- inf)], (0, 1)), ((0, 0, S), [1, 2, inf, (- 1), (- 2), (- inf)], (1, 0))]
for keepdim in [True, False]:
for (input_size, error_ords, dim) in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_matrix:
run_test_case(input, ord, dim, keepdim, (ord in error_ords))
def test_norm_fastpaths(self, device):
x = torch.randn(3, 5, device=device)
result = torch.linalg.norm(x, 4.5, 1)
expected = torch.pow(x.abs().pow(4.5).sum(1), (1.0 / 4.5))
self.assertEqual(result, expected)
result = torch.linalg.norm(x, 0, 1)
expected = (x != 0).type_as(x).sum(1)
self.assertEqual(result, expected)
result = torch.linalg.norm(x, 1, 1)
expected = x.abs().sum(1)
self.assertEqual(result, expected)
result = torch.linalg.norm(x, 2, 1)
expected = torch.sqrt(x.pow(2).sum(1))
self.assertEqual(result, expected)
result = torch.linalg.norm(x, 3, 1)
expected = torch.pow(x.pow(3).abs().sum(1), (1.0 / 3.0))
self.assertEqual(result, expected) |
class DummyOntology_Generic():
def get_children(self, parent_code: str) -> List[str]:
if (parent_code == 'OMOP_CONCEPT_A'):
return ['OMOP_CONCEPT_A_CHILD', 'OMOP_CONCEPT_A_CHILD2']
elif (parent_code == 'OMOP_CONCEPT_B'):
return ['OMOP_CONCEPT_B_CHILD']
elif (parent_code == 'OMOP_CONCEPT_A_CHILD'):
return ['OMOP_CONCEPT_A_CHILD_CHILD']
else:
return [] |
def test_eval_chebyt():
n = np.arange(0, 10000, 7, dtype=np.dtype('long'))
x = ((2 * np.random.rand()) - 1)
v1 = np.cos((n * np.arccos(x)))
v2 = _ufuncs.eval_chebyt(n, x)
assert_(np.allclose(v1, v2, rtol=1e-15)) |
def ref_grad_clip_grad_by_value(x, min_, max_, dy, **kw):
dx = dy
idx_min = np.where((dy < min_))
idx_max = np.where((dy > max_))
dx[idx_min] = min_[idx_min]
dx[idx_max] = max_[idx_max]
return dx.flatten() |
class TorchSequentialValidationBatch(NamedTuple):
query_id: torch.LongTensor
padding_mask: torch.BoolTensor
features: TensorMap
ground_truth: torch.LongTensor
train: torch.LongTensor |
class MNRParaphraseTrainer():
def __init__(self, args: MNRParaphraseArgs):
self.args = args
self.base_model = models.Transformer(self.args.model)
self.pooler = self._create_pooler()
def train(self):
model = SentenceTransformer(modules=[self.base_model, self.pooler])
loss = losses.MultipleNegativesRankingLoss(model)
loader: Any = self._load_data()
evaluator = self._load_evaluator()
warmup_steps = int(((len(loader) * self.args.num_train_epochs) * self.args.warmup_ratio))
logging.info('Beginning training')
model.fit(train_objectives=[(loader, loss)], epochs=self.args.num_train_epochs, warmup_steps=warmup_steps, output_path=self.args.output_path, show_progress_bar=False, use_amp=self.args.fp16, evaluation_steps=(self.args.eval_steps if (self.args.eval_path is not None) else 0), evaluator=evaluator, checkpoint_path=self.args.output_path, checkpoint_save_steps=self.args.eval_steps, checkpoint_save_total_limit=5, optimizer_params={'lr': self.args.lr})
def _load_data(self):
input_paths = [input_path.strip() for input_path in self.args.input_paths.split(',')]
samples = []
for input_path in input_paths:
self._load_file(input_path, samples)
logging.info('Loaded %d examples', len(samples))
return NoDuplicatesDataLoader(samples, self.args.batch_size)
def _load_evaluator(self):
if (self.args.eval_path is None):
return None
(sentences1, sentences2, scores) = ([], [], [])
with open(self.args.eval_path, 'r', encoding='utf-8') as input_file:
for line in input_file:
value = json.loads(line.strip())
(sent1, sent2, label) = (value['sent1'], value['sent2'], value['score']['similarity'])
sentences1.append(sent1)
sentences2.append(sent2)
scores.append(label)
dataset_name = os.path.basename(self.args.eval_path)
return EmbeddingSimilarityEvaluator(sentences1, sentences2, scores, write_csv=False, name=dataset_name)
def _load_file(self, input_path: str, samples: List[InputExample]):
logging.info('Loading examples from %s', input_path)
with open(input_path, 'r', encoding='utf-8') as input_file:
for line in tqdm(input_file):
value = json.loads(line.strip())
levenshtein = value['score']['levenshtein']
if (levenshtein > self.args.max_levenshtein):
continue
(sent1, sent2) = (value['sent1'], value['sent2'])
label = value['score']['label']
if ((label is not None) and (label != 'ENTAILMENT')):
continue
samples.append(InputExample(texts=[sent1, sent2]))
def _create_pooler(self):
pooler = self.args.pooling_type
emb_dim = self.base_model.get_word_embedding_dimension()
if (pooler in ('mean', 'max', 'cls')):
return models.Pooling(emb_dim, pooling_mode=pooler)
elif (pooler == 'lstm'):
return LSTMPooling(emb_dim, self.args.pooler_output_dim)
elif (pooler == 'bilstm'):
return LSTMPooling(emb_dim, int((self.args.pooler_output_dim / 2)), bidirectional=True)
else:
raise ValueError(f'Unsupported pooler {pooler}') |
_numpy_output(non_zero=True, check_dtype=True)
def test_ufunc_tan_u(A: dace.uint32[10]):
return np.tan(A) |
def _lookup_app_object(name):
top = _app_ctx_stack.top
if (top is None):
raise RuntimeError(_app_ctx_err_msg)
return getattr(top, name) |
def register_scheduler(key: str, module: Any=None):
return register_base(scheduler_dict, key, module) |
def build_backbone(in_channels, backbone, output_stride, BatchNorm, Fusion=False):
return resnet.ResNet50(in_channels, output_stride, BatchNorm, pretrained=False, Fusion=Fusion) |
def get_reduction_schedule(in_array: Array, axes: List[int], use_vectorization=True, use_mini_warps=True, warp_size=32, wide_load_bytes=16):
class ReductionSchedule():
grid: List[Size]
block: List[Size]
sequential: List[Size]
shared_mem_size: int
in_shape: List[Size]
in_strides: List[Size]
out_shape: List[Size]
out_strides: List[Size]
axes: List[int]
contiguous_dim: bool
vectorize: bool
vec_len: int
mini_warps: bool
num_mini_warps: int
one_d_reduction: bool
multi_axes: bool
additional_grid: List[Size]
changed_in_shape: List[Size]
changed_in_strides: List[Size]
changed_axes: List[int]
error: str
schedule = ReductionSchedule([], [], [], 0, [], [], [], [], [], False, False, 1, False, 1, False, False, [], [], [], [], '')
initial_shape = in_array.shape
initial_strides = in_array.strides
dtype = in_array.dtype
bytes = dtype.bytes
shape = []
strides = []
num_loaded_elements = 1
if use_vectorization:
num_loaded_elements = (wide_load_bytes // bytes)
schedule.vec_len = num_loaded_elements
degenerate_dims_indices = [i for (i, s) in enumerate(initial_shape) if (s == 1)]
shape = [s for (i, s) in enumerate(initial_shape) if (i not in degenerate_dims_indices)]
strides = [s for (i, s) in enumerate(initial_strides) if (i not in degenerate_dims_indices)]
iteration = 0
for i in degenerate_dims_indices:
i -= iteration
iteration += 1
for j in range(len(axes)):
if (axes[j] > i):
axes[j] -= 1
(shape, strides, axes, out_shape, out_strides) = simplify_input(shape, strides, axes)
schedule.in_shape = shape
schedule.in_strides = strides
schedule.axes = axes
schedule.out_shape = out_shape
schedule.out_strides = out_strides
for (i, s) in enumerate(strides):
if (s == 1):
contiguous_dimension = i
if (len(axes) > 1):
schedule.multi_axes = True
schedule.additional_grid = [shape[i] for i in axes[:(- 1)]]
schedule.changed_in_shape = [s for (i, s) in enumerate(schedule.in_shape) if (i not in axes[:(- 1)])]
schedule.changed_axes = [((axes[(- 1)] - len(axes)) + 1)]
schedule.changed_in_strides = [s for (i, s) in enumerate(schedule.in_strides) if (i not in axes[:(- 1)])]
axes = schedule.changed_axes
shape = schedule.changed_in_shape
for (i, s) in enumerate(schedule.changed_in_strides):
if (s == 1):
contiguous_dimension = i
if (contiguous_dimension in axes):
schedule.contiguous_dim = True
if (((shape[contiguous_dimension] > 32) == True) and (((shape[contiguous_dimension] % num_loaded_elements) == 0) == True) and use_vectorization):
schedule.vectorize = True
for (i, s) in enumerate(shape):
if (i != contiguous_dimension):
schedule.grid.append(s)
if (schedule.grid == []):
schedule.grid = [1]
threads_per_block = (shape[contiguous_dimension] if ((shape[contiguous_dimension] < warp_size) == True) else warp_size)
schedule.block = [threads_per_block]
stride = ((warp_size * num_loaded_elements) if schedule.vectorize else warp_size)
schedule.sequential.append([0, shape[contiguous_dimension], stride])
if ((len(schedule.in_shape) == 1) and (schedule.out_shape == [1])):
schedule.one_d_reduction = True
schedule.grid = [(((schedule.in_shape[0] + 1024) - 1) // 1024)]
else:
schedule.grid = shape[:axes[0]]
grid_dim = symbolic.int_ceil(shape[contiguous_dimension], 32)
schedule.grid.append(grid_dim)
schedule.block = [16, 32]
schedule.shared_mem_size = 32
schedule.sequential = [shape[axes[0]]]
if (use_mini_warps and ((shape[contiguous_dimension] <= 16) == True)):
schedule.mini_warps = True
schedule.num_mini_warps = (warp_size // shape[contiguous_dimension])
schedule.block = [16, shape[contiguous_dimension]]
schedule.shared_mem_size = shape[contiguous_dimension]
num_threads = 1
for t in schedule.block:
num_threads *= t
if ((num_threads > 1024) == True):
schedule.error = 'Schedule is invalid (more than 1024 threads per block). Falling back to pure expansion.'
whole_grid = (schedule.additional_grid + schedule.grid)
last_grid_dim = 1
for b in whole_grid[:(- 2)]:
last_grid_dim *= b
if (((whole_grid[(- 1)] > ) == True) or (((len(whole_grid) > 1) and (whole_grid[(- 2)] > 65535)) == True) or ((last_grid_dim > 65535) == True)):
schedule.error = 'Schedule is invalid (some grid dimension too large). Falling back to pure expansion.'
return schedule |
def show_im_bboxes(k, coordinates):
im = np.array(Image.open('../images/{}.jpg'.format(k)), dtype=np.uint8)
height = im.shape[0]
width = im.shape[1]
(fig, ax) = plt.subplots(1)
ax.imshow(im)
colors = ['red', 'yellow', 'black', 'blue', 'orange', 'grey', 'cyan', 'green', 'purple']
for coordinate in coordinates:
(x, y) = ((coordinate[0] * width), (coordinate[1] * height))
(w, h) = (((coordinate[2] - coordinate[0]) * width), ((coordinate[3] - coordinate[1]) * height))
color = random.choice(colors)
rect1 = patches.Rectangle((x, y), w, h, linewidth=1, edgecolor=color, facecolor='none')
ax.add_patch(rect1)
plt.show() |
class FNetTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if ((channel == self.channel) and (self.window.data.type() == img1.data.type())):
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.