code stringlengths 101 5.91M |
|---|
class LabelEncoder(object):
def __init__(self, dictionary):
self.dictionary = dictionary
def __call__(self, label):
return self.dictionary.encode_line(label, append_eos=False, add_if_not_exist=False) |
def test_lang_setting(corenlp_client):
ann = corenlp_client.annotate(GERMAN_DOC, properties_key='german', output_format='text')
assert (ann.strip() == GERMAN_DOC_GOLD.strip()) |
def _worker_populate_task(G, env, policy, scope=None):
G = _get_scoped_G(G, scope)
G.env = pickle.loads(env)
G.policy = pickle.loads(policy) |
class TestFrenchPipeline():
(scope='class')
def pipeline(self):
pipeline = stanza.Pipeline(processors='tokenize,mwt,pos,lemma,depparse', dir=TEST_MODELS_DIR, lang='fr')
return pipeline
def test_single(self, pipeline):
doc = pipeline(FR_MWT_SENTENCE)
compare_ignoring_whitespace(str(doc), EXPECTED_RESULT)
def test_bulk(self, pipeline):
NUM_DOCS = 10
raw_text = ([FR_MWT_SENTENCE] * NUM_DOCS)
raw_doc = [Document([], text=doccontent) for doccontent in raw_text]
result = pipeline(raw_doc)
assert (len(result) == NUM_DOCS)
for doc in result:
compare_ignoring_whitespace(str(doc), EXPECTED_RESULT)
assert (len(doc.sentences) == 1)
assert (doc.num_words == 26)
assert (doc.num_tokens == 24)
def test_on_gpu(self, pipeline):
check_on_gpu(pipeline)
def test_on_cpu(self):
pipeline = stanza.Pipeline('fr', dir=TEST_MODELS_DIR, use_gpu=False)
check_on_cpu(pipeline) |
def _get_neighbors(adj, nodes):
sp_nodes = _sp_row_vec_from_idx_list(list(nodes), adj.shape[1])
sp_neighbors = sp_nodes.dot(adj)
neighbors = set(ssp.find(sp_neighbors)[1])
return neighbors |
class Kernel(Module, metaclass=abc.ABCMeta):
def __init__(self):
super().__init__()
def K(self, x: Tensor, y: Tensor) -> Tensor:
pass
def trK(self, x: Tensor) -> Tensor:
pass
def diagK(self, x: Tensor) -> Tensor:
pass
def forward(self, x: Tensor, y: Tensor) -> Tensor:
return self.K(x, y) |
class TestLMContextWindow(unittest.TestCase):
def test_eval_dataloader(self):
dictionary = test_utils.dummy_dictionary(10)
assert (len(dictionary) == 14)
assert (dictionary.pad() == 1)
dataset = test_utils.TestDataset([torch.tensor([4, 5, 6, 7], dtype=torch.long), torch.tensor([8, 9, 10, 11], dtype=torch.long), torch.tensor([12, 13], dtype=torch.long)])
dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary)
config = LanguageModelingConfig(tokens_per_sample=4)
task = LanguageModelingTask(config, dictionary)
eval_dataloader = task.eval_lm_dataloader(dataset=dataset, batch_size=1, context_window=2)
batch = next(eval_dataloader)
assert (batch['net_input']['src_tokens'][0].tolist() == [4, 5, 6, 7, 1, 1])
assert (batch['target'][0].tolist() == [4, 5, 6, 7, 1, 1])
batch = next(eval_dataloader)
assert (batch['net_input']['src_tokens'][0].tolist() == [6, 7, 8, 9, 10, 11])
assert (batch['target'][0].tolist() == [1, 1, 8, 9, 10, 11])
batch = next(eval_dataloader)
assert (batch['net_input']['src_tokens'][0].tolist() == [10, 11, 12, 13])
assert (batch['target'][0].tolist() == [1, 1, 12, 13]) |
def _ensure_html_header(response):
content_type = response.headers.get('Content-Type', '')
if (not content_type.lower().startswith('text/html')):
raise _NotHTML(content_type, response.request.method) |
def build_model(cfg, gpu_id=None):
if torch.cuda.is_available():
assert (cfg.NUM_GPUS <= torch.cuda.device_count()), 'Cannot use more GPU devices than available'
else:
assert (cfg.NUM_GPUS == 0), 'Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.'
name = cfg.MODEL.MODEL_NAME
model = MODEL_REGISTRY.get(name)(cfg)
if cfg.NUM_GPUS:
if (gpu_id is None):
cur_device = torch.cuda.current_device()
else:
cur_device = gpu_id
model = model.cuda(device=cur_device)
if (cfg.NUM_GPUS > 1):
model = torch.nn.parallel.DistributedDataParallel(module=model, device_ids=[cur_device], output_device=cur_device)
return model |
def test_invalid_given_usage(testdir):
testdir.make_test('\nlazy_schema = schemathesis.from_pytest_fixture("simple_schema")\n\_schema.parametrize()\_schema.given()\ndef test(case):\n pass\n ')
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.re_match_lines(['.+given must be called with at least one argument']) |
class OptionParser(object):
def __init__(self, ctx=None):
self.ctx = ctx
self.allow_interspersed_args = True
self.ignore_unknown_options = False
if (ctx is not None):
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = {'-', '--'}
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None):
if (obj is None):
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
if (obj is None):
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if ((self.ctx is None) or (not self.ctx.resilient_parsing)):
raise
return (state.opts, state.largs, state.order)
def _process_args_for_args(self, state):
(pargs, args) = _unpack_args((state.largs + state.rargs), [x.nargs for x in self._args])
for (idx, arg) in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
if (arg == '--'):
return
elif ((arg[:1] in self._opt_prefixes) and (arglen > 1)):
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
def _match_long_opt(self, opt, explicit_value, state):
if (opt not in self._long_opt):
possibilities = [word for word in self._long_opt if word.startswith(opt)]
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
if (explicit_value is not None):
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if (len(state.rargs) < nargs):
_error_opt_args(nargs, opt)
elif (nargs == 1):
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
elif (explicit_value is not None):
raise BadOptionUsage(opt, '{} option does not take a value'.format(opt))
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt((prefix + ch), self.ctx)
option = self._short_opt.get(opt)
i += 1
if (not option):
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
if (i < len(arg)):
state.rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if (len(state.rargs) < nargs):
_error_opt_args(nargs, opt)
elif (nargs == 1):
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
else:
value = None
option.process(value, state)
if stop:
break
if (self.ignore_unknown_options and unknown_options):
state.largs.append('{}{}'.format(prefix, ''.join(unknown_options)))
def _process_opts(self, arg, state):
explicit_value = None
if ('=' in arg):
(long_opt, explicit_value) = arg.split('=', 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
if (arg[:2] not in self._opt_prefixes):
return self._match_short_opt(arg, state)
if (not self.ignore_unknown_options):
raise
state.largs.append(arg) |
def extract_video(vid_filename, output_folder):
cmd = ['ffmpeg', '-i', vid_filename, f'{output_folder}/%06d.jpg', '-threads', '16']
print(' '.join(cmd))
try:
subprocess.call(cmd)
except OSError:
print('OSError') |
def build_feature_connector(t_channel, s_channel):
C = [nn.Conv2d(s_channel, t_channel, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(t_channel)]
for m in C:
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return nn.Sequential(*C) |
def __dblquad(f, lims, args=(), epsrel=1e-11):
def int_x(y, *args):
return quad(f, lims[0], lims[1], args=(y, *args), epsrel=(0.01 * epsrel))[0]
return quad(int_x, lims[2], lims[3], args=args, epsrel=epsrel)[0] |
def _read_input(filename_queue):
class DataRecord(object):
pass
reader = tf.WholeFileReader()
(key, value) = reader.read(filename_queue)
record = DataRecord()
decoded_image = tf.image.decode_jpeg(value, channels=NUM_OF_CHANNELS)
decoded_image_4d = tf.expand_dims(decoded_image, 0)
resized_image = tf.image.resize_bilinear(decoded_image_4d, [IMAGE_SIZE, IMAGE_SIZE])
record.input_image = tf.squeeze(resized_image, squeeze_dims=[0])
cropped_image = tf.cast(tf.image.crop_to_bounding_box(decoded_image, 55, 35, MODEL_IMAGE_SIZE, MODEL_IMAGE_SIZE), tf.float32)
decoded_image_4d = tf.expand_dims(cropped_image, 0)
resized_image = tf.image.resize_bilinear(decoded_image_4d, [IMAGE_SIZE, IMAGE_SIZE])
record.input_image = tf.squeeze(resized_image, squeeze_dims=[0])
return record |
def _run_on_dask(jobs, verbose):
try:
import dask
except ImportError as ie:
ie.msg += '\n\nIt seems like `dask` is not installed.\nPlease install `dask` and `distributed` using:\n\n pip install dask distributed'
raise
scorer = dask.delayed(_run_job)
persisted = dask.persist(*[scorer(args) for args in jobs])
if verbose:
try:
progress(persisted)
except ValueError:
pass
return dask.compute(*persisted) |
def get_default_config(dataset, algorithm='ERM', data_fraction=1.0):
config = Namespace(dataset=dataset, algorithm=algorithm, model_kwargs={}, optimizer_kwargs={}, loader_kwargs={}, dataset_kwargs={}, scheduler_kwargs={}, train_transform=None, eval_transform=None, no_group_logging=True, distinct_groups=True, frac=data_fraction, scheduler=None)
return populate_defaults(config) |
class Metric(ABC):
_logger: Optional[logging.Logger] = None
_scala_udf_name: Optional[str] = None
def __init__(self, use_scala_udf: bool=False) -> None:
self._use_scala_udf = use_scala_udf
def logger(self) -> logging.Logger:
if (self._logger is None):
self._logger = logging.getLogger('replay')
return self._logger
def scala_udf_name(self) -> str:
if self._scala_udf_name:
return self._scala_udf_name
else:
raise NotImplementedError(f'Scala UDF not implemented for {type(self).__name__} class!')
def __str__(self):
return type(self).__name__
def __call__(self, recommendations: DataFrameLike, ground_truth: DataFrameLike, k: IntOrList, ground_truth_users: Optional[DataFrameLike]=None) -> Union[(Dict[(int, NumType)], NumType)]:
recs = get_enriched_recommendations(recommendations, ground_truth, max_k=(k if isinstance(k, int) else max(k)), ground_truth_users=ground_truth_users)
return self._mean(recs, k)
_k
def _conf_interval(self, recs: SparkDataFrame, k_list: list, alpha: float):
res = {}
quantile = norm.ppf(((1 + alpha) / 2))
for k in k_list:
distribution = self._get_metric_distribution(recs, k)
value = distribution.agg(sf.stddev('value').alias('std'), sf.count('value').alias('count')).select(sf.when((sf.isnan(sf.col('std')) | sf.col('std').isNull()), sf.lit(0.0)).otherwise(sf.col('std')).cast('float').alias('std'), 'count').first()
res[k] = ((quantile * value['std']) / (value['count'] ** 0.5))
return res
_k
def _median(self, recs: SparkDataFrame, k_list: list):
res = {}
for k in k_list:
distribution = self._get_metric_distribution(recs, k)
value = distribution.agg(sf.expr('percentile_approx(value, 0.5)').alias('value')).first()['value']
res[k] = value
return res
_k
def _mean(self, recs: SparkDataFrame, k_list: list):
res = {}
for k in k_list:
distribution = self._get_metric_distribution(recs, k)
value = distribution.agg(sf.avg('value').alias('value')).first()['value']
res[k] = value
return res
def _get_metric_distribution(self, recs: SparkDataFrame, k: int) -> SparkDataFrame:
if self._use_scala_udf:
metric_value_col = self.get_scala_udf(self.scala_udf_name, [sf.lit(k).alias('k'), *recs.columns[1:]]).alias('value')
return recs.select('user_idx', metric_value_col)
cur_class = self.__class__
distribution = recs.rdd.flatMap((lambda x: [(x[0], float(cur_class._get_metric_value_by_user(k, *x[1:])))])).toDF(f"user_idx {recs.schema['user_idx'].dataType.typeName()}, value double")
return distribution
def _get_metric_value_by_user(k, pred, ground_truth) -> float:
def user_distribution(self, log: DataFrameLike, recommendations: DataFrameLike, ground_truth: DataFrameLike, k: IntOrList, ground_truth_users: Optional[DataFrameLike]=None) -> PandasDataFrame:
log = convert2spark(log)
count = log.groupBy('user_idx').count()
if hasattr(self, '_get_enriched_recommendations'):
recs = self._get_enriched_recommendations(recommendations, ground_truth, max_k=(k if isinstance(k, int) else max(k)), ground_truth_users=ground_truth_users)
else:
recs = get_enriched_recommendations(recommendations, ground_truth, max_k=(k if isinstance(k, int) else max(k)), ground_truth_users=ground_truth_users)
if isinstance(k, int):
k_list = [k]
else:
k_list = k
res = PandasDataFrame()
for cut_off in k_list:
dist = self._get_metric_distribution(recs, cut_off)
val = count.join(dist, on='user_idx', how='right').fillna(0, subset='count')
val = val.groupBy('count').agg(sf.avg('value').alias('value')).orderBy(['count']).select('count', 'value').toPandas()
res = res.append(val, ignore_index=True)
return res
def get_scala_udf(udf_name: str, params: List) -> Column:
sc = State().session.sparkContext
scala_udf = getattr(sc._jvm.org.apache.spark.replay.utils.ScalaPySparkUDFs, udf_name)()
return Column(scala_udf.apply(_to_seq(sc, params, _to_java_column))) |
def failed_files_in_labels(labels_replaced, failed_files):
for (key, values) in labels_replaced.items():
val_new = values
for value in values:
if (value in failed_files):
print('Attention we couldnt read in the relevant file {}, therefore we now remove it from the labels'.format(value))
val_new.remove(value)
labels_replaced.update({key: val_new})
print('updated dictionary to new pair {} for key {}'.format(val_new, key))
with open(os.path.join(output_dir, 'labels_duplicates_removed_failed_files.pkl'), 'wb') as f:
pickle.dump(labels_replaced, f)
return labels_replaced |
def conll2004_demo():
return JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end', relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail', verbose=False).read('data/conll2004/demo.conll04_train.json') |
def ComputeRHS(rhs, ur_hat, solver, work, K, K2, K_over_K2, P_hat, T, Tp, VM, VMp, ur_dealias, mask, **context):
rhs = solver.conv(rhs, ur_hat, work, T, Tp, VM, VMp, K, ur_dealias)
if (mask is not None):
rhs.mask_nyquist(mask)
rhs = solver.add_pressure_diffusion(rhs, ur_hat, P_hat, K_over_K2, K, K2, params.nu, params.Ri, params.Pr)
return rhs |
def create_pool_all_agree():
return ([create_base_classifier(return_value=np.zeros(1), return_prob=np.array([[0.61, 0.39]]))] * 100) |
class Schema():
db_id = attr.ib()
tables = attr.ib()
columns = attr.ib()
foreign_key_graph = attr.ib()
orig = attr.ib() |
class PoseSegmentsDataset(Dataset):
def __init__(self, data: List[PoseSegmentsDatum], hand_normalization=False, optical_flow=False, only_optical_flow=False, classes='bio'):
self.data = data
self.cached_data: List[Any] = ([None] * len(data))
self.hand_normalization = hand_normalization
self.optical_flow = optical_flow
self.only_optical_flow = only_optical_flow
self.classes = classes
def __len__(self):
return len(self.data)
def build_classes_vectors(self, datum) -> Tuple[(SegmentsDict, BIODict)]:
pose = datum['pose']
pose_length = len(pose.body.data)
timestamps = torch.div(torch.arange(0, pose_length), pose.body.fps)
sign_segments = [segment for sentence_segments in datum['segments'] for segment in sentence_segments]
sentence_segments = [{'start_time': segments[0]['start_time'], 'end_time': segments[(- 1)]['end_time']} for segments in datum['segments']]
segments = {'sign': sign_segments, 'sentence': sentence_segments}
b_tag = ('B' if (self.classes == 'bio') else 'I')
bio = {kind: build_bio(datum['id'], timestamps, s, b_tag=b_tag) for (kind, s) in segments.items()}
return (segments, bio)
def add_optical_flow(self, pose):
calculator = OpticalFlowCalculator(fps=pose.body.fps, distance=DistanceRepresentation())
flow = calculator(pose.body.data)
flow = np.expand_dims(flow, axis=(- 1))
flow = np.concatenate([np.zeros((1, *flow.shape[1:]), dtype=flow.dtype), flow], axis=0)
if self.only_optical_flow:
pose.body.data = ma.array(flow, dtype=np.float32)
else:
pose.body.data = np.concatenate([pose.body.data, flow], axis=(- 1)).astype(np.float32)
def process_datum(self, datum: PoseSegmentsDatum):
pose = datum['pose']
if self.hand_normalization:
normalize_hands_3d(pose)
if self.optical_flow:
self.add_optical_flow(pose)
pose_data = pose.body.torch().data.zero_filled().squeeze(1)
(segments, bio) = self.build_classes_vectors(datum)
return {'id': datum['id'], 'segments': segments, 'bio': bio, 'mask': torch.ones(len(bio['sign']), dtype=torch.float), 'pose': {'obj': pose, 'data': pose_data}}
def __getitem__(self, index):
if (self.cached_data[index] is None):
datum = self.data[index]
self.cached_data[index] = self.process_datum(datum)
return self.cached_data[index]
def inverse_classes_ratio(self, kind: str) -> List[float]:
print(f'Calculating inverse classes ratio for {kind}...')
counter = Counter()
for item in tqdm(iter(self), total=len(self), desc='Calculating inverse classes ratio'):
counter += Counter(item['bio'][kind].numpy().tolist())
sum_counter = sum(counter.values())
return [(sum_counter / counter[i]) for (c, i) in BIO.items()] |
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
import pybind11
include_dirs = [pybind11.get_include(True), pybind11.get_include(False)]
config = Configuration('_pocketfft', parent_package, top_path)
ext = config.add_extension('pypocketfft', sources=['pypocketfft.cxx'], depends=['pocketfft_hdronly.h'], include_dirs=include_dirs, language='c++')
ext._pre_build_hook = pre_build_hook
config.add_data_files('LICENSE.md')
config.add_data_dir('tests')
return config |
def load_tf_mixed7a(weights, layer):
if (len(weights) != 28):
raise ValueError(f'Number of weight arrays ({len(weights)}) not equal to 28')
load_tf_basicConv2d(weights[:4], layer.branch0[0])
load_tf_basicConv2d(weights[4:8], layer.branch0[1])
load_tf_basicConv2d(weights[8:12], layer.branch1[0])
load_tf_basicConv2d(weights[12:16], layer.branch1[1])
load_tf_basicConv2d(weights[16:20], layer.branch2[0])
load_tf_basicConv2d(weights[20:24], layer.branch2[1])
load_tf_basicConv2d(weights[24:28], layer.branch2[2]) |
def remote_exec(bash_script, remote_machine, stdout=None, stderr=None, env={}, python_venv=None, port=22):
full_cmd = ' '.join(map((lambda k: ('export %s=%s;' % (k[0], k[1]))), env.items()))
if (python_venv is not None):
full_cmd += (' source %s/bin/activate; ' % python_venv)
full_cmd += bash_script
remote_cmd = ('ssh -tt -p %d %s \'bash -c "%s"\' </dev/null' % (port, remote_machine, full_cmd))
parallax_log.warning(colored(('\n$ %s' % remote_cmd), 'red'))
proc = subprocess.Popen(args=remote_cmd, shell=True, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)
return proc |
class TestOneHotEncoding():
def test__validate_inputs(self):
with pytest.raises(AggregateConstraintsError) as error:
OneHotEncoding._validate_inputs(not_column_names=None, something_else=None)
err_msg = 'Missing required values {(.*)} in a OneHotEncoding constraint.\\n\\nInvalid values {(.*)} are present in a OneHotEncoding constraint.'
groups = re.search(err_msg, str(error.value))
assert (groups is not None)
assert (str(eval(groups.group(1))) == 'column_names')
assert (set(eval(groups.group(2))) == {'not_column_names', 'something_else'})
def test__validate_metadata_columns(self):
metadata = Mock()
metadata.columns = {'a': 1, 'b': 2}
OneHotEncoding._validate_metadata_columns(metadata, column_names=['a', 'b'])
def test__validate_metadata_columns_raises_error(self):
metadata = Mock()
metadata.columns = {'a': 1, 'b': 2}
error_message = re.escape("A OneHotEncoding constraint is being applied to invalid column names {'c'}. The columns must exist in the table.")
with pytest.raises(ConstraintMetadataError, match=error_message):
OneHotEncoding._validate_metadata_columns(metadata, column_names=['a', 'c'])
def test_reverse_transform(self):
instance = OneHotEncoding(column_names=['a', 'b'])
table_data = pd.DataFrame({'a': [0.1, 0.5, 0.8], 'b': [0.8, 0.1, 0.9], 'c': [1, 2, 3]})
out = instance.reverse_transform(table_data)
expected_out = pd.DataFrame({'a': [0.0, 1.0, 0.0], 'b': [1.0, 0.0, 1.0], 'c': [1, 2, 3]})
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid(self):
instance = OneHotEncoding(column_names=['a', 'b', 'c'])
table_data = pd.DataFrame({'a': [1.0, 1.0, 0.0, 0.5, 1.0], 'b': [0.0, 1.0, 0.0, 0.5, 0.0], 'c': [0.0, 2.0, 0.0, 0.0, np.nan], 'd': [1, 2, 3, 4, 5]})
out = instance.is_valid(table_data)
expected_out = pd.Series([True, False, False, False, False])
pd.testing.assert_series_equal(expected_out, out) |
class EMA():
def __init__(self, weighting=0.9):
self.weighting = weighting
self.val = None
def update(self, val):
if (self.val is None):
self.val = val
else:
self.val = ((self.weighting * val) + ((1 - self.weighting) * self.val))
def value(self):
return self.val
def __str__(self):
return f'{self.val:.2e}' |
def get_note_density(mid):
duration = mid.get_end_time()
n_notes = sum([1 for instrument in mid.instruments for note in instrument.notes])
density = (n_notes / duration)
return density |
def _calculate_record_field_size_b(data_schema: Dict[(str, SizeData)], field_name: str) -> int:
schema = data_schema[field_name]
element_size_b = np.dtype(schema.dtype).itemsize
record_field_size_b = (reduce(mul, schema.shape) * element_size_b)
return record_field_size_b |
def test_clipgroups():
data_home_dir = 'tests/resources/sound_datasets'
for dataset_name in DATASETS:
module = importlib.import_module('soundata.datasets.{}'.format(dataset_name))
dataset = module.Dataset(os.path.join(TEST_DATA_HOME, dataset_name))
if (dataset_name in CUSTOM_TEST_MCLIPS):
clipgroup_id = CUSTOM_TEST_MCLIPS[dataset_name]
else:
continue
try:
clipgroup_default = dataset.ClipGroup(clipgroup_id)
except:
assert False, '{}: {}'.format(dataset_name, sys.exc_info()[0])
data_home = os.path.join(data_home_dir, dataset_name)
dataset_specific = soundata.initialize(dataset_name, data_home=data_home)
try:
clipgroup_test = dataset_specific.ClipGroup(clipgroup_id, data_home=data_home)
except:
assert False, '{}: {}'.format(dataset_name, sys.exc_info()[0])
assert isinstance(clipgroup_test, core.ClipGroup), '{}.ClipGroup must be an instance of type core.ClipGroup'.format(dataset_name)
assert hasattr(clipgroup_test, 'to_jams'), '{}.ClipGroup must have a to_jams method'.format(dataset_name)
try:
jam = clipgroup_test.to_jams()
except:
assert False, '{}: {}'.format(dataset_name, sys.exc_info()[0])
assert jam.validate(), 'Jams validation failed for {}.ClipGroup({})'.format(dataset_name, clipgroup_id) |
def save_model(model, optimizer, save_variable_list, args):
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
torch.save({**save_variable_list, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, os.path.join(args.save_path, 'checkpoint'))
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(os.path.join(args.save_path, 'entity_embedding'), entity_embedding)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(os.path.join(args.save_path, 'relation_embedding'), relation_embedding) |
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, p):
super(Encoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
self.lisht = LiSHT()
def forward(self, x):
embedding = self.dropout(self.lisht(self.embedding(x)))
(outputs, (hidden, cell)) = self.rnn(embedding)
return (hidden, cell) |
class DeiTFeatureExtractor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
class MocBackbone(object):
def __init__(self, configer):
self.configer = configer
def __call__(self):
arch = self.configer.sub_arch
from lib.models.backbones.hrnet.moc_config import MODEL_CONFIGS
if (arch in ['moc_small', 'moc_base', 'moct_small']):
arch_net = HighResolutionNet(MODEL_CONFIGS[arch], bn_type='torchbn', bn_momentum=0.1)
arch_net.init_weights(pretrained=self.configer.pretrained_backbone)
else:
raise Exception('Architecture undefined!')
return arch_net |
class ScaledSetBBreakoutWorld(RandomScaledBreakoutWorld):
warnings.warn('This env. parameter was dropped and should no longer be used.', DeprecationWarning)
scale_range_start = 0.95
scale_range_end = 1.0 |
def apply_taggers(documents, taggers, ngrams=6, stopwords=[]):
markup = defaultdict((lambda : defaultdict(list)))
for doc in documents:
for name in taggers:
tags = taggers[name].tag(doc, ngrams=ngrams, stopwords=stopwords)
for layer in tags:
markup[doc.name][layer] = tags[layer]
return markup |
class So3Block(nn.Module):
def __init__(self, b_in, b_out, f_in, f_out):
super(So3Block, self).__init__()
self.grid_so3 = so3_near_identity_grid(n_alpha=(2 * b_in), n_beta=2, n_gamma=2)
self.cnn = SO3Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_so3)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x |
def predict(args, model, data, device, tokenizer, executor):
model.eval()
(count, correct) = (0, 0)
with torch.no_grad():
all_outputs = []
for batch in tqdm(data, total=len(data)):
source_ids = batch[0].to(device)
outputs = model.generate(input_ids=source_ids, max_length=500)
all_outputs.extend(outputs.cpu().numpy())
outputs = [tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True) for output_id in all_outputs]
with open(os.path.join(args.save_dir, 'predict.txt'), 'w') as f:
for output in tqdm(outputs):
chunks = output.split('<func>')
func_list = []
inputs_list = []
for chunk in chunks:
chunk = chunk.strip()
res = chunk.split('<arg>')
res = [_.strip() for _ in res]
if (len(res) > 0):
func = res[0]
inputs = []
if (len(res) > 1):
for x in res[1:]:
inputs.append(x)
else:
inputs = []
func_list.append(func)
inputs_list.append(inputs)
ans = executor.forward(func_list, inputs_list, ignore_error=True)
if (ans is None):
ans = 'no'
if (isinstance(ans, list) and (len(ans) > 0)):
ans = ans[0]
if (isinstance(ans, list) and (len(ans) == 0)):
ans = 'None'
f.write((ans + '\n')) |
def display_hypothesis_output(hypothesis_output: list[str]) -> None:
if hypothesis_output:
display_section_name('HYPOTHESIS OUTPUT')
output = '\n'.join(hypothesis_output)
click.secho(output, fg='red') |
class BigBirdForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_default(arg, default, msg_if_none=None):
if (arg is None):
out = default
else:
out = arg
if ((out is None) and (msg_if_none is not None)):
raise ValueError(msg_if_none)
return out |
class Encoder(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=None, complex=False, padding_mode='zeros'):
super().__init__()
if (padding is None):
padding = [((i - 1) // 2) for i in kernel_size]
if complex:
conv = complex_nn.ComplexConv2d
bn = complex_nn.ComplexBatchNorm2d
else:
conv = nn.Conv2d
bn = nn.BatchNorm2d
self.conv = conv(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, padding_mode=padding_mode)
self.bn = bn(out_channels)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x |
class AttachmentMetric(Metric):
def __init__(self, eps=1e-12):
super().__init__()
self.eps = eps
self.n = 0.0
self.n_ucm = 0.0
self.n_lcm = 0.0
self.total = 0.0
self.correct_arcs = 0.0
self.correct_rels = 0.0
def __repr__(self):
s = f'UCM: {self.ucm:6.2%} LCM: {self.lcm:6.2%} '
s += f'UAS: {self.uas:6.2%} LAS: {self.las:6.2%}'
return s
def __call__(self, arc_preds, rel_preds, arc_golds, rel_golds, mask):
lens = mask.sum(1)
arc_mask = (arc_preds.eq(arc_golds) & mask)
rel_mask = (rel_preds.eq(rel_golds) & arc_mask)
(arc_mask_seq, rel_mask_seq) = (arc_mask[mask], rel_mask[mask])
self.n += len(mask)
self.n_ucm += arc_mask.sum(1).eq(lens).sum().item()
self.n_lcm += rel_mask.sum(1).eq(lens).sum().item()
self.total += len(arc_mask_seq)
self.correct_arcs += arc_mask_seq.sum().item()
self.correct_rels += rel_mask_seq.sum().item()
return self
def score(self):
return self.las
def ucm(self):
return (self.n_ucm / (self.n + self.eps))
def lcm(self):
return (self.n_lcm / (self.n + self.eps))
def uas(self):
return (self.correct_arcs / (self.total + self.eps))
def las(self):
return (self.correct_rels / (self.total + self.eps)) |
def load_image(path):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, (224, 224))
image = tf.cast(image, tf.uint8)
return image |
def quote_args(args):
args = list(args)
for i in range(len(args)):
a = args[i]
if ((' ' in a) and (a[0] not in '"\'')):
args[i] = ('"%s"' % a)
return args |
.parametrize('observation_shape', [(8,), (3, 84, 84)])
.parametrize('action_size', [2])
.parametrize('length', [100])
.parametrize('size', [10])
.parametrize('terminated', [True, False])
.parametrize('n_frames', [1, 4])
def test_frame_stack_trajectory_slicer(observation_shape: Sequence[int], action_size: int, length: int, size: int, terminated: bool, n_frames: int) -> None:
episode = create_episode(observation_shape, action_size, length, terminated=terminated)
returns_to_go = np.reshape((np.sum(episode.rewards) - np.cumsum(np.reshape(episode.rewards, [(- 1)]))), [(- 1), 1])
slicer = FrameStackTrajectorySlicer(n_frames)
stacked_shape = list(observation_shape)
stacked_shape[0] *= n_frames
picker = FrameStackTransitionPicker(n_frames)
for i in range(size):
end_index = i
traj = slicer(episode, end_index, size)
assert isinstance(traj.observations, np.ndarray)
assert (traj.observations.shape == (size, *stacked_shape))
assert (traj.actions.shape == (size, action_size))
assert (traj.rewards.shape == (size, 1))
assert (traj.returns_to_go.shape == (size, 1))
assert (traj.terminals.shape == (size, 1))
assert (traj.timesteps.shape == (size,))
assert (traj.masks.shape == (size,))
assert (traj.length == size)
pad_size = ((size - i) - 1)
end = (end_index + 1)
start = max((end - size), 0)
observations = []
for j in range((end - start)):
transition = picker(episode, (j + start))
observations.append(transition.observation)
ref_stacked_observations = np.array(observations)
assert np.all((traj.observations[pad_size:] == ref_stacked_observations))
assert np.all((traj.observations[:pad_size] == 0.0))
assert np.all((traj.actions[pad_size:] == episode.actions[start:end]))
assert np.all((traj.actions[:pad_size] == 0.0))
assert np.all((traj.rewards[pad_size:] == episode.rewards[start:end]))
assert np.all((traj.rewards[:pad_size] == 0.0))
assert np.all((traj.returns_to_go[pad_size:] == returns_to_go[start:end]))
assert np.all((traj.returns_to_go[:pad_size] == 0.0))
assert np.all((traj.terminals == 0.0))
assert (np.all((traj.timesteps[pad_size:] == np.arange(start, end))) + 1)
assert np.all((traj.timesteps[:pad_size] == 0.0))
assert np.all((traj.masks[pad_size:] == 1.0))
assert np.all((traj.masks[:pad_size] == 0.0))
traj = slicer(episode, (episode.size() - 1), size)
if terminated:
assert (traj.terminals[(- 1)][0] == 1.0)
assert np.all((traj.terminals[:(- 1)] == 0.0))
else:
assert np.all((traj.terminals == 0.0)) |
def extract_instances_for_current_subtask(task_instances, sub_task):
return task_instances[sub_task] |
def inputInt():
while True:
try:
user_input = int(input('Enter a number: '))
except ValueError:
print('Invalid input')
continue
print('The number is: ', user_input)
return user_input
break
return user_input |
def normal_kl(mean1, logvar1, mean2, logvar2):
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert (tensor is not None), 'at least one argument must be a Tensor'
(logvar1, logvar2) = [(x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)) for x in (logvar1, logvar2)]
return (0.5 * (((((- 1.0) + logvar2) - logvar1) + th.exp((logvar1 - logvar2))) + (((mean1 - mean2) ** 2) * th.exp((- logvar2))))) |
class FractionSpecializationMorphism(Morphism):
def __init__(self, domain, D):
if (not is_FractionField(domain)):
raise TypeError('domain must be a fraction field')
self._specialization = SpecializationMorphism(domain.base(), D)
self._repr_type_str = 'Fraction Specialization'
Morphism.__init__(self, domain, self._specialization.codomain().fraction_field())
def _call_(self, p):
if (not isinstance(p, FractionFieldElement)):
raise TypeError('p must be a fraction field element')
numerator = self._specialization._call_(p.numerator())
denominator = self._specialization._call_(p.denominator())
return (numerator / denominator) |
def outer_sqrt_with_intermediate(Y: dace.float32[(3, 3)]):
intermediate = dace.define_local([3, 3], dace.float32)
W = dace.define_local([3, 3], dace.float32)
intermediate[:] = dace.elementwise((lambda x: sqrt(x)), Y)
W[:] = middle_sqrt_no_sum(intermediate)
Z = np.sum(W)
return Z |
class TransferNet(nn.Module):
def __init__(self, args, dim_word, dim_hidden, vocab):
super().__init__()
self.args = args
self.vocab = vocab
self.kg = KnowledgeGraph(args, vocab)
num_words = len(vocab['word2id'])
num_entities = len(vocab['entity2id'])
num_relations = len(vocab['relation2id'])
self.num_steps = args.num_steps
self.aux_hop = args.aux_hop
self.question_encoder = BiGRU(dim_word, dim_hidden, num_layers=1, dropout=0.2)
self.word_embeddings = nn.Embedding(num_words, dim_word)
self.word_dropout = nn.Dropout(0.2)
self.step_encoders = []
for i in range(self.num_steps):
m = nn.Sequential(nn.Linear(dim_hidden, dim_hidden), nn.Tanh())
self.step_encoders.append(m)
self.add_module('step_encoders_{}'.format(i), m)
self.rel_classifier = nn.Linear(dim_hidden, num_relations)
self.hop_selector = nn.Linear(dim_hidden, self.num_steps)
def follow(self, e, r):
x = (torch.sparse.mm(self.kg.Msubj, e.t()) * torch.sparse.mm(self.kg.Mrel, r.t()))
return torch.sparse.mm(self.kg.Mobj.t(), x).t()
def forward(self, questions, e_s, answers=None, hop=None):
question_lens = (questions.size(1) - questions.eq(0).long().sum(dim=1))
q_word_emb = self.word_dropout(self.word_embeddings(questions))
(q_word_h, q_embeddings, q_hn) = self.question_encoder(q_word_emb, question_lens)
device = q_word_h.device
bsz = q_word_h.size(0)
dim_h = q_word_h.size((- 1))
last_e = e_s
word_attns = []
rel_probs = []
ent_probs = []
for t in range(self.num_steps):
cq_t = self.step_encoders[t](q_embeddings)
q_logits = torch.sum((cq_t.unsqueeze(1) * q_word_h), dim=2)
q_dist = torch.softmax(q_logits, 1).unsqueeze(1)
word_attns.append(q_dist.squeeze(1))
ctx_h = (q_dist q_word_h).squeeze(1)
rel_dist = torch.softmax(self.rel_classifier(ctx_h), 1)
rel_probs.append(rel_dist)
last_e = self.follow(last_e, rel_dist)
m = last_e.gt(1).float()
z = ((m * last_e) + (1 - m)).detach()
last_e = (last_e / z)
if (t > 0):
prev_rel = torch.argmax(rel_probs[(- 2)], dim=1)
curr_rel = torch.argmax(rel_probs[(- 1)], dim=1)
prev_prev_ent_prob = (ent_probs[(- 2)] if (len(ent_probs) >= 2) else e_s)
m = torch.zeros((bsz, 1)).to(device)
m[((torch.abs((prev_rel - curr_rel)) == 1) & (torch.remainder(torch.min(prev_rel, curr_rel), 2) == 0))] = 1
ent_m = (m.float() * prev_prev_ent_prob.gt(0.9).float())
last_e = ((1 - ent_m) * last_e)
ent_probs.append(last_e)
hop_res = torch.stack(ent_probs, dim=1)
hop_logit = self.hop_selector(q_embeddings)
hop_attn = torch.softmax(hop_logit, dim=1)
last_e = torch.sum((hop_res * hop_attn.unsqueeze(2)), dim=1)
m = (hop_attn.argmax(dim=1).eq(1).float().unsqueeze(1) * e_s)
last_e = ((1 - m) * last_e)
if (not self.training):
return {'e_score': last_e, 'word_attns': word_attns, 'rel_probs': rel_probs, 'ent_probs': ent_probs}
else:
weight = ((answers * 9) + 1)
loss_score = torch.mean((weight * torch.pow((last_e - answers), 2)))
loss = {'loss_score': loss_score}
if self.aux_hop:
loss_hop = nn.CrossEntropyLoss()(hop_logit, (hop - 1))
loss['loss_hop'] = (0.01 * loss_hop)
return loss |
class KLConcrete(nn.Module):
def __init__(self, K, M, kl_type='categorical', logits_p='train', tau_p=1.0):
super().__init__()
l = torch.ones(M, K)
if (logits_p == 'uniform'):
self.logits_p = move_to_device(l, cuda_device)
elif (logits_p == 'train'):
self.logits_p = nn.Parameter(l)
self.kl_type = kl_type
t = torch.FloatTensor(M, 1)
if (kl_type == 'relaxed'):
if (tau_p == 'train'):
t.fill_(1.0)
self.tau_p = nn.Parameter(t)
else:
assert ((type(tau_p) in [int, float]) and (tau_p > 0))
self.tau_p = t.fill_(tau_p)
self.tau_p = move_to_device(self.tau_p, cuda_device)
def forward(self, q, z, logits_q):
if (self.kl_type == 'categorical'):
kl = self.kl_categorical(logits_q)
elif (self.kl_type == 'relaxed'):
kl = self.kl_relaxed(q, z)
return kl
def kl_relaxed(self, q, z):
self.tau_p.data.clamp_(min=0.01, max=10.0)
p = RelaxedOneHotCategorical(logits=self.logits_p.expand_as(z), temperature=self.tau_p)
KL_qp = (q.log_prob(z) - p.log_prob(z))
if torch.isnan(KL_qp).any():
log.info(('q tau:' + str(q.temperature.squeeze())))
log.info(('p tau:' + str(p.temperature.squeeze())))
log.info('Error: nan log probibility for relaxed kl')
nan_mask = torch.isnan(KL_qp)
KL_qp = torch.where(torch.isnan(KL_qp), torch.zeros_like(KL_qp), KL_qp)
log.info('nan length %d out of %d\n', nan_mask.sum().item(), torch.tensor(nan_mask.shape).prod(dim=0).item())
raise RuntimeError('nan log probibility for relaxed kl')
return KL_qp
def kl_categorical(self, logits_q):
p_cat = OneHotCategorical(logits=self.logits_p.expand_as(logits_q))
q_cat = OneHotCategorical(logits=logits_q)
KL_qp = kl_divergence(q_cat, p_cat)
return KL_qp |
def ctcdc(xs, y, k=3, base=2, warning=True):
xis = [centropydc(column(xs, i), y, k, base, warning) for i in range(0, len(xs[0]))]
return (np.sum(xis) - centropydc(xs, y, k, base, warning)) |
class TestClipReward():
def test_clip_reward(self):
env = DummyRewardBoxEnv(random=True)
env_wrap = ClipReward(env)
env.reset()
env_wrap.reset()
(_, reward, _, _) = env.step(0)
(_, reward_wrap, _, _) = env_wrap.step(0)
assert (reward == 10)
assert (reward_wrap == 1)
(_, reward, _, _) = env.step(1)
(_, reward_wrap, _, _) = env_wrap.step(1)
assert (reward == (- 10))
assert (reward_wrap == (- 1)) |
def test_build_optimizer_constructor():
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
optim_constructor_cfg = dict(type='DefaultOptimizerConstructor', optimizer_cfg=optimizer_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
assert (type(optim_constructor) is DefaultOptimizerConstructor)
_BUILDERS.register_module()
class MyOptimizerConstructor(DefaultOptimizerConstructor):
pass
optim_constructor_cfg = dict(type='MyOptimizerConstructor', optimizer_cfg=optimizer_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
assert (type(optim_constructor) is MyOptimizerConstructor)
with pytest.raises(KeyError):
build_optimizer_constructor(dict(type='A')) |
def convert_percentiles(idx):
pdf = [(300, 2.1), (350, 4.2), (400, 5.4), (450, 6.5), (500, 7.9), (550, 9.6), (600, 12.0), (650, 13.8), (700, 17.0), (750, 15.8), (800, 5.7), (850, 0)]
def convert_one(x):
partial = 0
for ((v, s), (v2, _)) in zip(pdf, pdf[1:]):
if ((partial + s) >= x):
return (v + (((v2 - v) * (x - partial)) / s))
partial += s
return np.array(list(map(convert_one, idx))) |
def configure_logger(level: (int | str)=logging.INFO) -> None:
if isinstance(level, str):
level = logging.getLevelName(level)
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format_string, level=level)
for handler in logging.getLogger('pytorch_lightning').handlers:
handler.setFormatter(logging.Formatter(format_string))
handler.setLevel(level) |
def get_dataloader(args):
if (args.dataset == 'gaussian'):
dataset = GaussianDataNumpy(mu_pos=(np.ones(args.dimension) * 0), mu_neg=(np.ones(args.dimension) * args.negative_gaussian_mean), cov_pos=np.identity(10), cov_neg=np.identity(10), n_pos_tr=args.training_samples, n_neg_tr=args.training_samples, n_pos_va=args.validation_samples, n_neg_va=args.validation_samples, label_noise=args.label_noise)
elif (args.dataset == 'spiral'):
dataset = SpiralDataNumpy(label_noise=args.label_noise, noise_level=args.noise_level, n_pos_tr=args.training_samples, n_neg_tr=args.training_samples, n_pos_va=args.validation_samples, n_neg_va=args.validation_samples)
elif (args.dataset == 'sinusoid'):
dataset = SinusoidDataNumpy(dim=args.dimension, label_noise=args.label_noise, n_tr=args.training_samples, n_va=args.validation_samples)
elif (args.dataset == 'sinusoid2d'):
dataset = Sinusoid2dimDataNumpy(label_noise=args.label_noise, n_tr=args.training_samples, n_va=args.validation_samples)
else:
raise RuntimeError('Dataset name is invalid.')
(x_tr, r_tr, y_tr, y_tr_cl, x_va, r_va, y_va, y_va_cl, x_te, r_te, y_te, y_te_cl) = dataset.makeData()
(tr_dataset, va_dataset, te_dataset) = (MyDataset(x_tr, y_tr, r_tr), MyDataset(x_va, y_va, r_va), MyDataset(x_te, y_te, r_te))
(tr_cl_dataset, va_cl_dataset, te_cl_dataset) = (MyDataset(x_tr, y_tr_cl, r_tr), MyDataset(x_va, y_va_cl, r_va), MyDataset(x_te, y_te_cl, r_te))
train_loader = torch.utils.data.DataLoader(tr_dataset, batch_size=args.batch_size_tr, shuffle=True, pin_memory=True)
vali_loader = torch.utils.data.DataLoader(va_dataset, batch_size=args.batch_size_te, pin_memory=True)
test_loader = torch.utils.data.DataLoader(te_dataset, batch_size=args.batch_size_te, pin_memory=True)
train_cl_loader = torch.utils.data.DataLoader(tr_cl_dataset, batch_size=args.batch_size_tr, shuffle=True, pin_memory=True)
vali_cl_loader = torch.utils.data.DataLoader(va_cl_dataset, batch_size=args.batch_size_te, pin_memory=True)
test_cl_loader = torch.utils.data.DataLoader(te_cl_dataset, batch_size=args.batch_size_te, pin_memory=True)
return (train_loader, vali_loader, test_loader, train_cl_loader, vali_cl_loader, test_cl_loader) |
def format_trace_inputs(declaration):
gather_tensor_options = 'TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory)'
def dispatch_trace_input(arg_spec):
(name, value, simple_type, nullable) = arg_spec
if ((simple_type == 'TensorList') and nullable):
return 'jit::tracer::addInputs(node, "{}", {}, {});'.format(name, value, 'true')
elif (value == 'options'):
result = ''
result += (ADD_TRACE_INPUT.substitute(name=name, input='optTypeMetaToScalarType(options.dtype_opt())') + '\n')
result += (ADD_TRACE_INPUT.substitute(name=name, input='options.layout()') + '\n')
result += (ADD_TRACE_INPUT.substitute(name=name, input='options.device()') + '\n')
result += ADD_TRACE_INPUT.substitute(name=name, input='options.pinned_memory()')
return result
else:
return ADD_TRACE_INPUT.substitute(name=name, input=value)
if (declaration['use_c10_dispatcher'] == 'full'):
trace_inputs = declaration['schema_order_arguments']
else:
trace_inputs = declaration['arguments']
if is_out_overload(declaration):
out_input = trace_inputs[0]
trace_inputs = trace_inputs[1:]
if (declaration['use_c10_dispatcher'] == 'full'):
trace_input_spec = [(i['name'], i['name'], i['type'], i.get('is_nullable')) for i in trace_inputs]
else:
trace_input_spec = [(i['name'], i['name'], i['simple_type'], i.get('is_nullable')) for i in trace_inputs]
trace_inputs = '\n'.join((dispatch_trace_input(arg_spec) for arg_spec in trace_input_spec))
if is_out_overload(declaration):
value = out_input['name']
inplace = ADD_TRACE_INPUT.substitute(name=out_input['name'], input=value)
trace_name = uninplace_api_name(declaration['api_name'])
has_factory_name = (trace_name in FACTORY_FUNCTION_NAMES)
if has_factory_name:
outplace = ''
outplace += (ADD_TRACE_INPUT.substitute(name='out', input='optTypeMetaToScalarType(out.options().dtype_opt())') + '\n')
outplace += (ADD_TRACE_INPUT.substitute(name='out', input='out.options().layout()') + '\n')
outplace += (ADD_TRACE_INPUT.substitute(name='out', input='out.options().device()') + '\n')
outplace += ADD_TRACE_INPUT.substitute(name='out', input='out.options().pinned_memory()')
else:
outplace = ''
trace_inputs += '\n'
trace_inputs += SELECT.substitute(cond='tracer_state->force_outplace', true=outplace, false=inplace)
return trace_inputs |
_test(run_synthesis=False)
def test_axpy_unroll_3():
(csdfg, sdfg) = _exec_hbmtransform((lambda : create_vadd_sdfg('axpy_unroll_3')), [('x', 'HBM', '3:6'), ('y', 'HBM', '0:3'), ('z', 'HBM', '6:9')])
validate_vadd_sdfg(csdfg, [3, 20])
return sdfg |
def dep_bigram(corpus, dep, lemma=True, lower=True, pron=False, dep_upos=None, head_upos=None, dep_text=None, head_text=None):
(bi_freq, dep_freq, head_freq, range_freq) = ({}, {}, {}, {})
match_sentences = []
def dicter(item, d):
if (item not in d):
d[item] = 1
else:
d[item] += 1
textid = 0
for text in corpus:
textid += 1
range_list = []
doc = nlp(text)
for sentence in doc.sents:
index_start = 0
sent_text = []
dep_headi = []
first_token = True
for token in sentence:
if first_token:
index_start = token.i
first_token = False
sent_text.append(token.text)
if (token.dep_ == dep):
dep_tg = token.pos_
head_tg = token.head.pos_
if lemma:
if (not pron):
if (token.lemma_ == '-PRON-'):
dependent = token.text.lower()
headt = token.head.text.lower()
elif lower:
dependent = token.lemma_.lower()
headt = token.head.lemma_.lower()
else:
dependent = token.lemma_
headt = token.head.lemma_
else:
dependent = token.lemma_
headt = token.head.lemma_
if (not lemma):
if lower:
dependent = token.text.lower()
headt = token.head.text.lower()
else:
dependent = token.text
headt = token.head.text
if ((dep_upos is not None) and (dep_upos != dep_tg)):
continue
if ((head_upos is not None) and (head_upos != head_tg)):
continue
if ((dep_text is not None) and (dep_text != dependent)):
continue
if ((head_text is not None) and (head_text != headt)):
continue
dep_headi.append([(token.i - index_start), (token.head.i - index_start)])
dep_bigram = ((dependent + '_') + headt)
range_list.append(dep_bigram)
dicter(dep_bigram, bi_freq)
dicter(dependent, dep_freq)
dicter(headt, head_freq)
for x in dep_headi:
temp_sent = sent_text.copy()
depi = (((sent_text[x[0]] + '_') + dep) + '_dep')
headi = (((sent_text[x[1]] + '_') + dep) + '_head')
temp_sent[x[0]] = depi
temp_sent[x[1]] = headi
temp_sent.append(str(textid))
match_sentences.append(temp_sent)
for x in list(set(range_list)):
dicter(x, range_freq)
bigram_dict = {'bi_freq': bi_freq, 'dep_freq': dep_freq, 'head_freq': head_freq, 'range': range_freq, 'samples': match_sentences}
return bigram_dict |
def main(argv):
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i', '--glsl-path', help='', default='.')
parser.add_argument('-c', '--glslc-path', required=True, help='')
parser.add_argument('-t', '--tmp-dir-path', required=True, help='/tmp')
parser.add_argument('-o', '--output-path', required=True, help='')
parser.add_argument('--env', metavar='KEY=VALUE', nargs='*', help='Set a number of key-value pairs')
options = parser.parse_args()
env = DEFAULT_ENV
for (key, value) in parse_arg_env(options.env).items():
env[key] = value
if (not os.path.exists(options.output_path)):
os.makedirs(options.output_path)
if (not os.path.exists(options.tmp_dir_path)):
os.makedirs(options.tmp_dir_path)
genCppH(hFilePath=(options.output_path + '/spv.h'), cppFilePath=(options.output_path + '/spv.cpp'), srcDirPath=options.glsl_path, glslcPath=options.glslc_path, tmpDirPath=options.tmp_dir_path, env=env) |
class SemanticMatcher():
def __init__(self, reverse_properties, relation_dr, relations, upper_types, types):
self.reverse_properties = reverse_properties
self.relation_dr = relation_dr
self.relations = relations
self.upper_types = upper_types
self.types = types
def same_logical_form(self, form1, form2):
if (form1.__contains__('') or form2.__contains__('')):
return False
try:
G1 = self.logical_form_to_graph(lisp_to_nested_expression(form1))
except Exception:
return False
try:
G2 = self.logical_form_to_graph(lisp_to_nested_expression(form2))
except Exception:
return False
def node_match(n1, n2):
if ((n1['id'] == n2['id']) and (n1['type'] == n2['type'])):
func1 = n1.pop('function', 'none')
func2 = n2.pop('function', 'none')
tc1 = n1.pop('tc', 'none')
tc2 = n2.pop('tc', 'none')
if ((func1 == func2) and (tc1 == tc2)):
return True
else:
return False
else:
return False
def multi_edge_match(e1, e2):
if (len(e1) != len(e2)):
return False
values1 = []
values2 = []
for v in e1.values():
values1.append(v['relation'])
for v in e2.values():
values2.append(v['relation'])
return (sorted(values1) == sorted(values2))
return nx.is_isomorphic(G1, G2, node_match=node_match, edge_match=multi_edge_match)
def get_symbol_type(self, symbol: str) -> int:
if symbol.__contains__('^^'):
return 2
elif (symbol in self.types):
return 3
elif (symbol in self.relations):
return 4
else:
return 1
def logical_form_to_graph(self, expression: List) -> nx.MultiGraph:
G = self._get_graph(expression)
G.nodes[len(G.nodes())]['question_node'] = 1
return G
def _get_graph(self, expression: List) -> nx.MultiGraph:
if isinstance(expression, str):
G = nx.MultiDiGraph()
if (self.get_symbol_type(expression) == 1):
G.add_node(1, id=expression, type='entity')
elif (self.get_symbol_type(expression) == 2):
G.add_node(1, id=expression, type='literal')
elif (self.get_symbol_type(expression) == 3):
G.add_node(1, id=expression, type='class')
elif (self.get_symbol_type(expression) == 4):
(domain, rang) = self.relation_dr[expression]
G.add_node(1, id=rang, type='class')
G.add_node(2, id=domain, type='class')
G.add_edge(2, 1, relation=expression)
if (expression in self.reverse_properties):
G.add_edge(1, 2, relation=self.reverse_properties[expression])
return G
if (expression[0] == 'R'):
G = self._get_graph(expression[1])
size = len(G.nodes())
mapping = {}
for n in G.nodes():
mapping[n] = ((size - n) + 1)
G = nx.relabel_nodes(G, mapping)
return G
elif (expression[0] in ['JOIN', 'le', 'ge', 'lt', 'gt']):
G1 = self._get_graph(expression=expression[1])
G2 = self._get_graph(expression=expression[2])
size = len(G2.nodes())
qn_id = size
if (G1.nodes[1]['type'] == G2.nodes[qn_id]['type'] == 'class'):
if (G2.nodes[qn_id]['id'] in self.upper_types[G1.nodes[1]['id']]):
G2.nodes[qn_id]['id'] = G1.nodes[1]['id']
mapping = {}
for n in G1.nodes():
mapping[n] = ((n + size) - 1)
G1 = nx.relabel_nodes(G1, mapping)
G = nx.compose(G1, G2)
if (expression[0] != 'JOIN'):
G.nodes[1]['function'] = function_map[expression[0]]
return G
elif (expression[0] == 'AND'):
G1 = self._get_graph(expression[1])
G2 = self._get_graph(expression[2])
size1 = len(G1.nodes())
size2 = len(G2.nodes())
if (G1.nodes[size1]['type'] == G2.nodes[size2]['type'] == 'class'):
G2.nodes[size2]['id'] = G1.nodes[size1]['id']
mapping = {}
for n in G1.nodes():
mapping[n] = ((n + size2) - 1)
G1 = nx.relabel_nodes(G1, mapping)
G2 = nx.relabel_nodes(G2, {size2: ((size1 + size2) - 1)})
G = nx.compose(G1, G2)
return G
elif (expression[0] == 'COUNT'):
G = self._get_graph(expression[1])
size = len(G.nodes())
G.nodes[size]['function'] = 'count'
return G
elif expression[0].__contains__('ARG'):
G1 = self._get_graph(expression[1])
size1 = len(G1.nodes())
G2 = self._get_graph(expression[2])
size2 = len(G2.nodes())
G2.nodes[1]['id'] = 0
G2.nodes[1]['type'] = 'literal'
G2.nodes[1]['function'] = expression[0].lower()
if (G1.nodes[size1]['type'] == G2.nodes[size2]['type'] == 'class'):
G2.nodes[size2]['id'] = G1.nodes[size1]['id']
mapping = {}
for n in G1.nodes():
mapping[n] = ((n + size2) - 1)
G1 = nx.relabel_nodes(G1, mapping)
G2 = nx.relabel_nodes(G2, {size2: ((size1 + size2) - 1)})
G = nx.compose(G1, G2)
return G
elif (expression[0] == 'TC'):
G = self._get_graph(expression[1])
size = len(G.nodes())
G.nodes[size]['tc'] = (expression[2], expression[3])
return G |
class SegDataParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SEGDATAPARAMETER |
def create_h5(path):
import h5py
path = get_absolute_path(path)
make_parent_dir(path)
return h5py.File(path, 'w') |
class UnidirectionalRNNEncoder(Encoder):
def __init__(self, params, mode, name='forward_rnn_encoder'):
super(UnidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params['rnn_cell'] = _toggle_dropout(self.params['rnn_cell'], mode)
def default_params():
return {'rnn_cell': _default_rnn_cell_params(), 'init_scale': 0.04}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer((- self.params['init_scale']), self.params['init_scale']))
cell = training_utils.get_rnn_cell(**self.params['rnn_cell'])
(outputs, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputs, sequence_length=sequence_length, dtype=tf.float32, **kwargs)
return EncoderOutput(outputs=outputs, final_state=state, attention_values=outputs, attention_values_length=sequence_length) |
def preprocessor(output_directory, filepath, stats, hip_clang_launch, is_pytorch_extension, clean_ctx):
fin_path = os.path.join(output_directory, filepath)
with open(fin_path, 'r', encoding='utf-8') as fin:
output_source = fin.read()
fout_path = os.path.join(output_directory, get_hip_file_path(filepath))
if (not os.path.exists(os.path.dirname(fout_path))):
clean_ctx.makedirs(os.path.dirname(fout_path))
def pt_repl(m):
return PYTORCH_MAP[m.group(0)]
if is_pytorch_extension:
output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)
elif is_pytorch_file(filepath):
output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)
else:
def c2_repl(m):
return CAFFE2_MAP[m.group(0)]
output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source)
def mk_repl(templ):
def repl(m):
f = m.group(1)
if (f.startswith('ATen/cuda') or f.startswith('ATen/native/cuda') or f.startswith('ATen/native/quantized/cuda') or f.startswith('ATen/native/sparse/cuda') or f.startswith('THC/') or f.startswith('THCUNN/') or (f.startswith('THC') and (not f.startswith('THCP')))):
return templ.format(get_hip_file_path(m.group(1)))
return m.group(0)
return repl
output_source = RE_QUOTE_HEADER.sub(mk_repl('#include "{0}"'), output_source)
output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>'), output_source)
output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE "{0}"'), output_source)
if filepath.endswith('CMakeLists.txt'):
output_source = output_source.replace('CUDA', 'HIP')
output_source = output_source.replace('THC', 'THH')
output_source = RE_CU_SUFFIX.sub('.hip', output_source)
if (not hip_clang_launch):
output_source = processKernelLaunches(output_source, stats)
if ((filepath.endswith('.cu') or filepath.endswith('.cuh')) and ('PowKernel' not in filepath)):
output_source = replace_math_functions(output_source)
output_source = hip_header_magic(output_source)
output_source = replace_extern_shared(output_source)
do_write = True
if os.path.exists(fout_path):
with open(fout_path, 'r', encoding='utf-8') as fout_old:
do_write = (fout_old.read() != output_source)
if do_write:
with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout:
fout.write(output_source)
return 'ok'
else:
return 'skipped' |
class TestSampling(unittest.TestCase):
def test_sampling(self):
n_trials = 5
train = load_dataset('gnad10')['train']
for n_examples_per_label in [2, 8, 16]:
texts_per_trial = []
for i in range(n_trials):
try:
train_sample = sample(train, seed=i, n_examples_per_label=n_examples_per_label)
except KeyError as k_error:
raise ValueError(i) from k_error
labels = Counter(train_sample['label'])
self.assertEqual(len(labels), train.features['label'].num_classes)
for (label, count) in labels.most_common():
self.assertEqual(count, n_examples_per_label)
texts_per_trial.append(train_sample['text'])
for i in range(1, len(texts_per_trial)):
self.assertNotEqual(texts_per_trial[0], texts_per_trial[i])
for i in range(5):
train_sample = sample(train, seed=i, n_examples_per_label=n_examples_per_label)
self.assertEqual(texts_per_trial[i], train_sample['text']) |
class Discriminator(nn.Module):
def __init__(self, conv_dim=64, repeat_num=6):
super(Discriminator, self).__init__()
self._name = 'global_d'
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, (curr_dim * 2), kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = (curr_dim * 2)
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
assert (x.shape[3] == 128), 'error'
h = self.main(x)
out_real = self.conv1(h)
return out_real.squeeze() |
class CleanObjectAction(BaseAction):
valid_actions = {'PutObject', 'PickupObject', 'ToggleObjectOn', 'ToggleObjectOff'}
def get_reward(self, state, prev_state, expert_plan, goal_idx):
if (state.metadata['lastAction'] not in self.valid_actions):
(reward, done) = (self.rewards['invalid_action'], False)
return (reward, done)
subgoal = expert_plan[goal_idx]['planner_action']
(reward, done) = (self.rewards['neutral'], False)
clean_object = get_object(subgoal['cleanObjectId'], state.metadata)
if (clean_object is not None):
is_obj_clean = (clean_object['objectId'] in self.env.cleaned_objects)
(reward, done) = ((self.rewards['positive'], True) if is_obj_clean else (self.rewards['negative'], False))
return (reward, done) |
_REGISTRY.register()
class ResNet(nn.Module):
def __init__(self, cfg):
super(ResNet, self).__init__()
self.num_pathways = 1
self._construct_network(cfg)
def _compute_dim_in(self, idx, trans_func, width_per_group):
if (trans_func == 'basic_transform'):
factor = (1 if (idx == 0) else (2 ** (idx - 1)))
elif (trans_func == 'bottleneck_transform'):
factor = (1 if (idx == 0) else (2 * (2 ** idx)))
else:
raise NotImplementedError('Does not support {} transfomration'.format(trans_func))
dim_in = [(width_per_group * factor)]
return dim_in
def _compute_dim_out(self, idx, trans_func, width_per_group):
if (trans_func == 'basic_transform'):
factor = (2 ** idx)
elif (trans_func == 'bottleneck_transform'):
factor = (4 * (2 ** idx))
else:
raise NotImplementedError('Does not support {} transfomration'.format(trans_func))
dim_out = [(width_per_group * factor)]
return dim_out
def _construct_network(self, cfg):
assert (cfg.VIS.ARCH in _POOL1.keys())
pool_size = _POOL1[cfg.VIS.ARCH]
assert (len({len(pool_size), self.num_pathways}) == 1)
assert (cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys())
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = (num_groups * width_per_group)
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.VIS.ARCH]
self.s1 = stem_helper.VideoModelStem(dim_in=cfg.DATA.INPUT_CHANNEL_NUM, dim_out=[width_per_group], kernel=[(temp_kernel[0][0] + [7, 7])], stride=[[2, 2, 2]], padding=[[(temp_kernel[0][0][0] // 2), 3, 3]], eps=cfg.MODEL.EPSILON, bn_mmt=cfg.MODEL.MOMENTUM)
dim_in_l = [self._compute_dim_in(i, cfg.RESNET.TRANS_FUNC, width_per_group) for i in range(4)]
dim_out_l = [self._compute_dim_out(i, cfg.RESNET.TRANS_FUNC, width_per_group) for i in range(4)]
self.s2 = resnet_helper.ResStage(dim_in=dim_in_l[0], dim_out=dim_out_l[0], dim_inner=[dim_inner], temp_kernel_sizes=temp_kernel[1], stride=cfg.RESNET.SPATIAL_STRIDES[0], num_blocks=[d2], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[0], eps=cfg.MODEL.EPSILON, bn_mmt=cfg.MODEL.MOMENTUM)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(kernel_size=pool_size[pathway], stride=pool_size[pathway], padding=[0, 0, 0])
self.add_module('pathway{}_pool'.format(pathway), pool)
self.s3 = resnet_helper.ResStage(dim_in=dim_in_l[1], dim_out=dim_out_l[1], dim_inner=[(dim_inner * 2)], temp_kernel_sizes=temp_kernel[2], stride=cfg.RESNET.SPATIAL_STRIDES[1], num_blocks=[d3], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[1], eps=cfg.MODEL.EPSILON, bn_mmt=cfg.MODEL.MOMENTUM)
self.s4 = resnet_helper.ResStage(dim_in=dim_in_l[2], dim_out=dim_out_l[2], dim_inner=[(dim_inner * 4)], temp_kernel_sizes=temp_kernel[3], stride=cfg.RESNET.SPATIAL_STRIDES[2], num_blocks=[d4], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[2], eps=cfg.MODEL.EPSILON, bn_mmt=cfg.MODEL.MOMENTUM)
self.s5 = resnet_helper.ResStage(dim_in=dim_in_l[3], dim_out=dim_out_l[3], dim_inner=[(dim_inner * 8)], temp_kernel_sizes=temp_kernel[4], stride=cfg.RESNET.SPATIAL_STRIDES[3], num_blocks=[d5], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[3], eps=cfg.MODEL.EPSILON, bn_mmt=cfg.MODEL.MOMENTUM)
_num_frames = (cfg.DATA.NUM_FRAMES // 2)
self.head = head_helper.ResNetPoolingHead(dim_in=dim_out_l[3], pool_size=[[(_num_frames // pool_size[0][0]), (math.ceil((cfg.DATA.CROP_SIZE / 32)) // pool_size[0][1]), (math.ceil((cfg.DATA.CROP_SIZE / 32)) // pool_size[0][2])]])
self.output_size = sum(dim_out_l[3])
def get_feature_map(self, x):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, 'pathway{}_pool'.format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
return x
def get_logit(self, feature_map):
return self.head(feature_map)
def forward(self, x):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, 'pathway{}_pool'.format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
x = self.head(x)
return x |
def build_mphf(ksize, records_iter_fn):
all_kmers = set()
sum_kmers = 0
multicounts = set()
records_iter = records_iter_fn()
for (n, record) in enumerate(records_iter):
if (((n % 50000) == 0) and n):
print('... contig', n, end='\r')
kmers = hash_sequence(record.sequence, ksize)
sum_kmers += len(kmers)
these_kmers = set(kmers)
seen_before = all_kmers.intersection(these_kmers)
if seen_before:
multicounts.update(seen_before)
all_kmers.update(these_kmers)
n_contigs = (n + 1)
print(f'''loaded {n_contigs} contigs.
''')
if multicounts:
print('NOTE: likely hash collisions (or duplicate k-mers?) in input cDBG')
print(f'NOTE: {len(multicounts)} k-mer hash values are present more than once.')
print('NOTE: these k-mers are being removed from consideration.')
all_kmers -= multicounts
else:
print('NOTE: no multicount hashvals detected.')
print(f'building MPHF for {len(all_kmers)} k-mers in {n_contigs} nodes.')
table = BBHashTable()
table.initialize(list(all_kmers))
del all_kmers
print('second pass.')
records_iter = records_iter_fn()
sizes = {}
max_cdbg_id = 0
for (n, record) in enumerate(records_iter):
if (((n % 50000) == 0) and n):
print('... contig {} of {}'.format(n, n_contigs), end='\r')
cdbg_id = int(record.name)
kmers = hash_sequence(record.sequence, ksize)
for kmer in (set(kmers) - multicounts):
table[kmer] = cdbg_id
sizes[cdbg_id] = len(kmers)
if (cdbg_id > max_cdbg_id):
max_cdbg_id = cdbg_id
print(f'''loaded {n_contigs} contigs in pass2.
''')
assert (n <= UNSET_VALUE)
max_table_value = max(table.mphf_to_value)
assert (max_table_value != UNSET_VALUE)
assert (n == max_cdbg_id)
assert (n == max_table_value), (n, max_table_value)
return (table, sizes) |
class SkipQuantModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub = InnerModule()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
return self.fc(self.sub(x))
def fuse_modules(self):
self.sub.fuse_modules() |
class AtrousPyramid3D(nn.Module):
def __init__(self, in_channels, pyramid_channels, dilation_rates, out_channels=None, include_1x1_conv=True):
super().__init__()
pyramid_channels = ([pyramid_channels] * len(dilation_rates))
atrous_convs = [nn.Conv3d(in_channels, channels, 3, padding=rate, dilation=rate, bias=False) for (channels, rate) in zip(pyramid_channels, dilation_rates)]
if include_1x1_conv:
atrous_convs.append(nn.Conv3d(in_channels, pyramid_channels[0], 1, bias=False))
total_channels = (sum(pyramid_channels) + pyramid_channels[0])
else:
total_channels = sum(pyramid_channels)
self.atrous_convs = nn.ModuleList(atrous_convs)
if out_channels:
self.conv_out = nn.Sequential(nn.ReLU(inplace=True), nn.Conv3d(total_channels, out_channels, 1, bias=False))
else:
self.conv_out = nn.Identity()
def forward(self, x):
x = torch.cat([conv(x) for conv in self.atrous_convs], dim=1)
return self.conv_out(x) |
def ring_env(render='drgb'):
name = 'ring'
network_name = RingNetwork
env_name = WaveAttenuationPOEnv
net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS)
initial_config = InitialConfig(spacing='uniform', shuffle=False)
vehicles = VehicleParams()
vehicles.add('human', acceleration_controller=(IDMController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=21)
vehicles.add(veh_id='rl', acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=1)
sim_params = SumoParams(sim_step=0.5, render=render, save_render=True)
HORIZON = 100
env_params = EnvParams(horizon=HORIZON, additional_params={'max_accel': 1, 'max_decel': 1, 'ring_length': [220, 270]})
flow_params = dict(exp_tag=name, env_name=env_name, network=network_name, simulator='traci', sim=sim_params, env=env_params, net=net_params, veh=vehicles, initial=initial_config)
return flow_params |
class PixelDiscriminator(BaseNetwork):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, active_fn='nn.ReLU'):
super(PixelDiscriminator, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
active_fn = get_active_fn(active_fn)
self.net = [nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), active_fn(0.2), nn.Conv2d(ndf, (ndf * 2), kernel_size=1, stride=1, padding=0, bias=use_bias), norm_layer((ndf * 2)), active_fn(0.2), nn.Conv2d((ndf * 2), 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
return self.net(input) |
def _initialize(module, cfg, wholemodule=False):
func = build_from_cfg(cfg, INITIALIZERS)
func.wholemodule = wholemodule
func(module) |
(func_name='attn_add_fun', noinline=True)
def _attn_add_fun(v, keys, query):
return math_ops.reduce_sum((v * math_ops.tanh((keys + query))), [2]) |
def metrics_for_verification(prediction, gold):
def extract_label(o):
verification_label = 'REFUTES'
if (('is "supports"' in o.lower()) or ('no fact-checking is needed for this claim' in o.lower()) or ('the fact-checking result is not applicable to this response' in o.lower())):
verification_label = 'SUPPORTS'
elif ('the fact-checking result is "not enough info"' in o.lower()):
verification_label = 'NOT ENOUGH INFO'
return verification_label
p = extract_label(prediction)
g = extract_label(gold)
return (((p == g) * 100),) |
def random_sample(batch_size, input_shape, device):
return torch.randn(batch_size, *input_shape, dtype=torch.float).to(device)
a = np.random.rand(batch_size, *input_shape)
b = a.astype(np.float32)
import pdb
pdb.set_trace()
c = torch.tensor(b)
d = c.to(device)
return d |
def get_default_config_path():
directory = os.path.dirname(os.path.abspath(__file__))
configs_dir = os.path.join(directory, '..', 'configs')
fb_defaults = os.path.join(configs_dir, 'fb_defaults.yaml')
if PathManager.exists(fb_defaults):
return fb_defaults
else:
return os.path.join(configs_dir, 'defaults.yaml') |
def _internal_eval(model, global_step, sess, iterator, iterator_feed_dict, summary_writer, label):
sess.run(iterator.initializer, feed_dict=iterator_feed_dict)
ppl = model_helper.compute_perplexity(model, sess, label)
utils.add_summary(summary_writer, global_step, ('%s_ppl' % label), ppl)
return ppl |
class RobertaConverter(Converter):
def converted(self) -> Tokenizer:
ot = self.original_tokenizer
vocab = ot.encoder
merges = list(ot.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False))
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.RobertaProcessing(sep=(ot.sep_token, ot.sep_token_id), cls=(ot.cls_token, ot.cls_token_id), add_prefix_space=ot.add_prefix_space, trim_offsets=True)
return tokenizer |
class LipsDataset(dataset.Dataset):
def __init__(self, root, align_root, flag=1, mode='train', transform=None, seq_len=75):
assert (mode in ['train', 'valid'])
self._root = os.path.expanduser(root)
self._align_root = align_root
self._flag = flag
self._transform = transform
self._exts = ['.jpg', '.jpeg', '.png']
self._seq_len = seq_len
self._mode = mode
self._list_images(self._root)
def _list_images(self, root):
self.labels = []
self.items = []
valid_unseen_sub_idx = [1, 2, 20, 22]
skip_sub_idx = [21]
if (self._mode == 'train'):
sub_idx = [('s' + str(i)) for i in range(1, 35) if (i not in (valid_unseen_sub_idx + skip_sub_idx))]
elif (self._mode == 'valid'):
sub_idx = [('s' + str(i)) for i in valid_unseen_sub_idx]
folder_path = []
for i in sub_idx:
folder_path.extend(glob.glob(os.path.join(root, i, '*')))
for folder in folder_path:
filename = glob.glob(os.path.join(folder, '*'))
if (len(filename) != self._seq_len):
continue
filename.sort()
label = os.path.split(folder)[(- 1)]
self.items.append((filename, label))
def align_generation(self, file_nm, padding=75):
align = Align((((self._align_root + '/') + file_nm) + '.align'))
return nd.array(align.sentence(padding))
def __getitem__(self, idx):
img = list()
for image_name in self.items[idx][0]:
tmp_img = image.imread(image_name, self._flag)
if (self._transform is not None):
tmp_img = self._transform(tmp_img)
img.append(tmp_img)
img = nd.stack(*img)
img = nd.transpose(img, (1, 0, 2, 3))
label = self.align_generation(self.items[idx][1], padding=self._seq_len)
return (img, label)
def __len__(self):
return len(self.items) |
class YelpReviewFull(XiangZhangDataset):
dirname = 'yelp_review_full_csv'
columns = ['rating', 'review'] |
def nested_symbol_dynamic(A: dace.float64[N]):
for i in range(5):
nested(A[0:i], A[0:i], i) |
def test_determine_files_to_download_raies_file_not_found(tmp_path):
file_to_download = files_resources.FilesResource(url=MOCK_URL, download_path=pathlib.Path('foo', 'bar.zip'), file_name='bar.txt', data_dir=str(tmp_path))
with pytest.raises(FileNotFoundError):
download_utils.determine_files_to_download(files_resources=[file_to_download], download=False) |
class ConvBlock(nn.Module):
def __init__(self, ni, no, ks, stride=1, pad=1, use_act=True):
super(ConvBlock, self).__init__()
self.use_act = use_act
self.conv = nn.Conv2d(ni, no, ks, stride=stride, padding=pad)
self.bn = nn.BatchNorm2d(no)
self.act = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
op = self.bn(self.conv(x))
return (self.act(op) if self.use_act else op) |
_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizerBase(SpecialTokensMixin):
vocab_files_names: Dict[(str, str)] = {}
pretrained_vocab_files_map: Dict[(str, Dict[(str, str)])] = {}
pretrained_init_configuration: Dict[(str, Dict[(str, Any)])] = {}
max_model_input_sizes: Dict[(str, Optional[int])] = {}
model_input_names: List[str] = ['token_type_ids', 'attention_mask']
padding_side: str = 'right'
slow_tokenizer_class = None
def __init__(self, **kwargs):
self.init_inputs = ()
self.init_kwargs = copy.deepcopy(kwargs)
self.name_or_path = kwargs.pop('name_or_path', '')
model_max_length = kwargs.pop('model_max_length', kwargs.pop('max_len', None))
self.model_max_length = (model_max_length if (model_max_length is not None) else VERY_LARGE_INTEGER)
self.padding_side = kwargs.pop('padding_side', self.padding_side)
assert (self.padding_side in ['right', 'left']), f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
self.model_input_names = kwargs.pop('model_input_names', self.model_input_names)
self.deprecation_warnings = {}
super().__init__(**kwargs)
def max_len(self) -> int:
warnings.warn('The `max_len` attribute has been deprecated and will be removed in a future version, use `model_max_length` instead.', FutureWarning)
return self.model_max_length
def max_len_single_sentence(self) -> int:
return (self.model_max_length - self.num_special_tokens_to_add(pair=False))
def max_len_sentences_pair(self) -> int:
return (self.model_max_length - self.num_special_tokens_to_add(pair=True))
_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
if ((value == (self.model_max_length - self.num_special_tokens_to_add(pair=False))) and self.verbose):
if (not self.deprecation_warnings.get('max_len_single_sentence', False)):
logger.warning("Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up.")
self.deprecation_warnings['max_len_single_sentence'] = True
else:
raise ValueError("Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up.")
_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
if ((value == (self.model_max_length - self.num_special_tokens_to_add(pair=True))) and self.verbose):
if (not self.deprecation_warnings.get('max_len_sentences_pair', False)):
logger.warning("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.")
self.deprecation_warnings['max_len_sentences_pair'] = True
else:
raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.")
def __repr__(self) -> str:
return f"{('PreTrainedTokenizerFast' if self.is_fast else 'PreTrainedTokenizer')}(name_or_path='{self.name_or_path}', vocab_size={self.vocab_size}, model_max_len={self.model_max_length}, is_fast={self.is_fast}, padding_side='{self.padding_side}', special_tokens={self.special_tokens_map_extended})"
def from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
local_files_only = kwargs.pop('local_files_only', False)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if (pretrained_model_name_or_path in s3_models):
for (file_id, map_list) in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (cls.pretrained_init_configuration and (pretrained_model_name_or_path in cls.pretrained_init_configuration)):
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy()
else:
logger.info("Model name '{}' not found in model shortcut name list ({}). Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path))
if (os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path)):
if (len(cls.vocab_files_names) > 1):
raise ValueError('Calling {}.from_pretrained() with the path to a single file or url is not supported.Use a model identifier or the path to a directory instead.'.format(cls.__name__))
logger.warning('Calling {}.from_pretrained() with the path to a single file or url is deprecated'.format(cls.__name__))
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE, 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE, 'tokenizer_config_file': TOKENIZER_CONFIG_FILE, 'tokenizer_file': FULL_TOKENIZER_FILE}
for (file_id, file_name) in {**cls.vocab_files_names, **additional_files_names}.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
if (not os.path.exists(full_file_name)):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
else:
full_file_name = hf_bucket_url(pretrained_model_name_or_path, filename=file_name, use_cdn=False, mirror=None)
vocab_files[file_id] = full_file_name
try:
resolved_vocab_files = {}
for (file_id, file_path) in vocab_files.items():
if (file_path is None):
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only)
except EnvironmentError:
if (pretrained_model_name_or_path in s3_models):
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = "Model name '{}' was not found in tokenizers model name list ({}). We assumed '{}' was a path or url to a directory containing vocabulary files named {}, but couldn't find such vocabulary files at this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values()))
raise EnvironmentError(msg)
if all(((full_file_name is None) for full_file_name in resolved_vocab_files.values())):
raise EnvironmentError("Model name '{}' was not found in tokenizers model name list ({}). We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files named {} but couldn't find such vocabulary files at this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values())))
for (file_id, file_path) in vocab_files.items():
if (file_path == resolved_vocab_files[file_id]):
logger.info('loading file {}'.format(file_path))
else:
logger.info('loading file {} from cache at {}'.format(file_path, resolved_vocab_files[file_id]))
return cls._from_pretrained(resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, **kwargs)
def _from_pretrained(cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, **kwargs):
if ((('tokenizer_file' not in resolved_vocab_files) or (resolved_vocab_files['tokenizer_file'] is None)) and (cls.slow_tokenizer_class is not None)):
slow_tokenizer = cls.slow_tokenizer_class._from_pretrained(copy.deepcopy(resolved_vocab_files), pretrained_model_name_or_path, copy.deepcopy(init_configuration), *init_inputs, **copy.deepcopy(kwargs))
else:
slow_tokenizer = None
tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None)
if (tokenizer_config_file is not None):
with open(tokenizer_config_file, encoding='utf-8') as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
saved_init_inputs = init_kwargs.pop('init_inputs', ())
if (not init_inputs):
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
init_kwargs.update(kwargs)
def convert_added_tokens(obj: Union[(AddedToken, Any)]):
if (isinstance(obj, dict) and ('__type' in obj) and (obj['__type'] == 'AddedToken')):
obj.pop('__type')
return AddedToken(**obj)
elif isinstance(obj, (list, tuple)):
return list((convert_added_tokens(o) for o in obj))
elif isinstance(obj, dict):
return {k: convert_added_tokens(v) for (k, v) in obj.items()}
return obj
init_kwargs = convert_added_tokens(init_kwargs)
if (pretrained_model_name_or_path in cls.max_model_input_sizes):
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if ((model_max_length is not None) and isinstance(model_max_length, (int, float))):
init_kwargs['model_max_length'] = min(init_kwargs.get('model_max_length', int(1e+30)), model_max_length)
added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
for (args_name, file_path) in resolved_vocab_files.items():
if (args_name not in init_kwargs):
init_kwargs[args_name] = file_path
if (slow_tokenizer is not None):
init_kwargs['__slow_tokenizer'] = slow_tokenizer
init_kwargs['name_or_path'] = pretrained_model_name_or_path
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError('Unable to load vocabulary from file. Please check that the provided vocabulary is accessible and not corrupted.')
special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
if (special_tokens_map_file is not None):
with open(special_tokens_map_file, encoding='utf-8') as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for (key, value) in special_tokens_map.items():
if isinstance(value, dict):
value = AddedToken(**value)
elif isinstance(value, list):
value = [(AddedToken(**token) if isinstance(token, dict) else token) for token in value]
setattr(tokenizer, key, value)
special_tokens = tokenizer.all_special_tokens
if (added_tokens_file is not None):
with open(added_tokens_file, encoding='utf-8') as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
added_tok_encoder_sorted = list(sorted(added_tok_encoder.items(), key=(lambda x: x[1])))
for (token, index) in added_tok_encoder_sorted:
assert (index == len(tokenizer)), f"Non-consecutive added token '{token}' found. Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
tokenizer.add_tokens(token, special_tokens=bool((token in special_tokens)))
added_tokens = tokenizer.sanitize_special_tokens()
if added_tokens:
logger.warning('Special tokens have been added in the vocabulary, make sure the associated word embedding are fine-tuned or trained.')
return tokenizer
def save_pretrained(self, save_directory: str, legacy_format: bool=True, filename_prefix: Optional[str]=None) -> Tuple[str]:
if os.path.isfile(save_directory):
logger.error('Provided path ({}) should be a directory, not a file'.format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
special_tokens_map_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + SPECIAL_TOKENS_MAP_FILE))
tokenizer_config_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + TOKENIZER_CONFIG_FILE))
tokenizer_config = copy.deepcopy(self.init_kwargs)
if (len(self.init_inputs) > 0):
tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
def convert_added_tokens(obj: Union[(AddedToken, Any)], add_type_field=True):
if isinstance(obj, AddedToken):
out = obj.__getstate__()
if add_type_field:
out['__type'] = 'AddedToken'
return out
elif isinstance(obj, (list, tuple)):
return list((convert_added_tokens(o, add_type_field=add_type_field) for o in obj))
elif isinstance(obj, dict):
return {k: convert_added_tokens(v, add_type_field=add_type_field) for (k, v) in obj.items()}
return obj
tokenizer_config = convert_added_tokens(tokenizer_config, add_type_field=True)
with open(tokenizer_config_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
write_dict = convert_added_tokens(self.special_tokens_map_extended, add_type_field=False)
with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(write_dict, ensure_ascii=False))
file_names = (tokenizer_config_file, special_tokens_map_file)
return self._save_pretrained(save_directory=save_directory, file_names=file_names, legacy_format=legacy_format, filename_prefix=filename_prefix)
def _save_pretrained(self, save_directory: str, file_names: Tuple[str], legacy_format: bool=True, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not legacy_format):
raise ValueError('Only fast tokenizers (instances of PretrainedTokenizerFast) can be saved in non legacy format.')
added_tokens_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + ADDED_TOKENS_FILE))
added_vocab = self.get_added_vocab()
if added_vocab:
with open(added_tokens_file, 'w', encoding='utf-8') as f:
out_str = json.dumps(added_vocab, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
return ((file_names + vocab_files) + (added_tokens_file,))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
raise NotImplementedError
_end_docstrings(ENCODE_KWARGS_DOCSTRING, '\n **kwargs: Passed along to the `.tokenize()` method.\n ', '\n Returns:\n :obj:`List[int]`, :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`:\n The tokenized ids of the text.\n ')
def encode(self, text: Union[(TextInput, PreTokenizedInput, EncodedInput)], text_pair: Optional[Union[(TextInput, PreTokenizedInput, EncodedInput)]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=False, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> List[int]:
encoded_inputs = self.encode_plus(text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs)
return encoded_inputs['input_ids']
def num_special_tokens_to_add(self, pair: bool=False) -> int:
raise NotImplementedError
def _get_padding_truncation_strategies(self, padding=False, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs):
old_truncation_strategy = kwargs.pop('truncation_strategy', 'do_not_truncate')
old_pad_to_max_length = kwargs.pop('pad_to_max_length', False)
if ((max_length is not None) and (padding is False) and (truncation is False)):
if verbose:
if (not self.deprecation_warnings.get('Truncation-not-explicitely-activated', False)):
logger.warning("Truncation was not explicitely activated but `max_length` is provided a specific value, please use `truncation=True` to explicitely truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.")
self.deprecation_warnings['Truncation-not-explicitely-activated'] = True
truncation = 'longest_first'
if ((padding is False) and old_pad_to_max_length):
if verbose:
warnings.warn("The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).", FutureWarning)
if (max_length is None):
padding_strategy = PaddingStrategy.LONGEST
else:
padding_strategy = PaddingStrategy.MAX_LENGTH
elif (padding is not False):
if (padding is True):
padding_strategy = PaddingStrategy.LONGEST
elif (not isinstance(padding, PaddingStrategy)):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
if ((truncation is False) and (old_truncation_strategy != 'do_not_truncate')):
if verbose:
warnings.warn("The `truncation_strategy` argument is deprecated and will be removed in a future version, use `truncation=True` to truncate examples to a max length. You can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific truncation strategy selected among `truncation='only_first'` (will only truncate the first sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).", FutureWarning)
truncation_strategy = TruncationStrategy(old_truncation_strategy)
elif (truncation is not False):
if (truncation is True):
truncation_strategy = TruncationStrategy.LONGEST_FIRST
elif (not isinstance(truncation, TruncationStrategy)):
truncation_strategy = TruncationStrategy(truncation)
elif isinstance(truncation, TruncationStrategy):
truncation_strategy = truncation
else:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
if (max_length is None):
if (padding_strategy == PaddingStrategy.MAX_LENGTH):
if (self.model_max_length > LARGE_INTEGER):
if verbose:
if (not self.deprecation_warnings.get('Asking-to-pad-to-max_length', False)):
logger.warning('Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no padding.')
self.deprecation_warnings['Asking-to-pad-to-max_length'] = True
padding_strategy = PaddingStrategy.DO_NOT_PAD
else:
max_length = self.model_max_length
if (truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE):
if (self.model_max_length > LARGE_INTEGER):
if verbose:
if (not self.deprecation_warnings.get('Asking-to-truncate-to-max_length', False)):
logger.warning('Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.')
self.deprecation_warnings['Asking-to-truncate-to-max_length'] = True
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
else:
max_length = self.model_max_length
if ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and ((not self.pad_token) or (self.pad_token_id < 0))):
raise ValueError("Asking to pad but the tokenizer does not have a padding token. Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`.")
if ((truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE) and (padding_strategy != PaddingStrategy.DO_NOT_PAD) and (pad_to_multiple_of is not None) and (max_length is not None) and ((max_length % pad_to_multiple_of) != 0)):
raise ValueError(f'Truncation and padding are both activated but truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of}).')
return (padding_strategy, truncation_strategy, max_length, kwargs)
_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])], text_pair: Optional[Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=False, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
assert (isinstance(text, str) or (isinstance(text, (list, tuple)) and ((len(text) == 0) or (isinstance(text[0], str) or (isinstance(text[0], (list, tuple)) and ((len(text[0]) == 0) or isinstance(text[0][0], str))))))), 'text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) or `List[List[str]]` (batch of pretokenized examples).'
assert ((text_pair is None) or isinstance(text_pair, str) or (isinstance(text_pair, (list, tuple)) and ((len(text_pair) == 0) or (isinstance(text_pair[0], str) or (isinstance(text_pair[0], (list, tuple)) and ((len(text_pair[0]) == 0) or isinstance(text_pair[0][0], str))))))), 'text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) or `List[List[str]]` (batch of pretokenized examples).'
is_batched = bool((((not is_split_into_words) and isinstance(text, (list, tuple))) or (is_split_into_words and isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)))))
if is_batched:
batch_text_or_text_pairs = (list(zip(text, text_pair)) if (text_pair is not None) else text)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[(TextInput, PreTokenizedInput, EncodedInput)], text_pair: Optional[Union[(TextInput, PreTokenizedInput, EncodedInput)]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=False, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
(padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _encode_plus(self, text: Union[(TextInput, PreTokenizedInput, EncodedInput)], text_pair: Optional[Union[(TextInput, PreTokenizedInput, EncodedInput)]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
raise NotImplementedError
_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[(List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair])], add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=False, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
(padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[(List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair])], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
raise NotImplementedError
def pad(self, encoded_inputs: Union[(BatchEncoding, List[BatchEncoding], Dict[(str, EncodedInput)], Dict[(str, List[EncodedInput])], List[Dict[(str, EncodedInput)]])], padding: Union[(bool, str, PaddingStrategy)]=True, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, verbose: bool=True) -> BatchEncoding:
if (isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding))):
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
assert ('input_ids' in encoded_inputs), 'You should supply an encoding or a list of encodings to this method. An encoding is the output of one the encoding methods of the tokenizer, i.e. __call__/encode_plus/batch_encode_plus. '
if (not encoded_inputs['input_ids']):
if return_attention_mask:
encoded_inputs['attention_mask'] = []
return encoded_inputs
first_element = encoded_inputs['input_ids'][0]
if (isinstance(first_element, (list, tuple)) and first_element):
first_element = first_element[0]
if (not isinstance(first_element, int)):
if (is_tf_available() and isinstance(first_element, tf.Tensor)):
return_tensors = ('tf' if (return_tensors is None) else return_tensors)
elif (is_torch_available() and isinstance(first_element, torch.Tensor)):
return_tensors = ('pt' if (return_tensors is None) else return_tensors)
elif isinstance(first_element, np.ndarray):
return_tensors = ('np' if (return_tensors is None) else return_tensors)
else:
raise ValueError(f'type of {first_element} unknown: {type(first_element)}. Should be one of a python, numpy, pytorch or tensorflow object.')
def to_py_obj(obj):
if isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif (is_tf_available() and isinstance(obj, tf.Tensor)):
return obj.numpy().tolist()
elif (is_torch_available() and isinstance(obj, torch.Tensor)):
return obj.cpu().tolist()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
for (key, value) in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
(padding_strategy, _, max_length, _) = self._get_padding_truncation_strategies(padding=padding, max_length=max_length, verbose=verbose)
if (encoded_inputs['input_ids'] and (not isinstance(encoded_inputs['input_ids'][0], (list, tuple)))):
encoded_inputs = self._pad(encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(encoded_inputs['input_ids'])
assert all(((len(v) == batch_size) for v in encoded_inputs.values())), 'Some items in the output dictionary have a different batch size than others.'
if (padding_strategy == PaddingStrategy.LONGEST):
max_length = max((len(inputs) for inputs in encoded_inputs['input_ids']))
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict(((k, v[i]) for (k, v) in encoded_inputs.items()))
outputs = self._pad(inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
for (key, value) in outputs.items():
if (key not in batch_outputs):
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (len(token_ids_0) * [0])
return (([0] * len(token_ids_0)) + ([1] * len(token_ids_1)))
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return token_ids_0
return (token_ids_0 + token_ids_1)
_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, ids: List[int], pair_ids: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=False, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
if ('return_lengths' in kwargs):
if verbose:
warnings.warn('The PreTrainedTokenizerBase.prepare_for_model `return_lengths` parameter is deprecated. Please use `return_length` instead.', FutureWarning)
return_length = kwargs['return_lengths']
(padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
pair = bool((pair_ids is not None))
len_ids = len(ids)
len_pair_ids = (len(pair_ids) if pair else 0)
if ((return_token_type_ids is not None) and (not add_special_tokens)):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if (return_token_type_ids is None):
return_token_type_ids = ('token_type_ids' in self.model_input_names)
if (return_attention_mask is None):
return_attention_mask = ('attention_mask' in self.model_input_names)
encoded_inputs = {}
total_len = ((len_ids + len_pair_ids) + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0))
overflowing_tokens = []
if ((truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE) and max_length and (total_len > max_length)):
(ids, pair_ids, overflowing_tokens) = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=(total_len - max_length), truncation_strategy=truncation_strategy, stride=stride)
if return_overflowing_tokens:
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['num_truncated_tokens'] = (total_len - max_length)
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ((ids + pair_ids) if pair else ids)
token_type_ids = (([0] * len(ids)) + (([0] * len(pair_ids)) if pair else []))
encoded_inputs['input_ids'] = sequence
if return_token_type_ids:
encoded_inputs['token_type_ids'] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs['special_tokens_mask'] = ([0] * len(sequence))
if ((max_length is None) and (len(encoded_inputs['input_ids']) > self.model_max_length) and verbose):
if (not self.deprecation_warnings.get('sequence-length-is-longer-than-the-specified-maximum', False)):
logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this model ({} > {}). Running this sequence through the model will result in indexing errors'.format(len(encoded_inputs['input_ids']), self.model_max_length))
self.deprecation_warnings['sequence-length-is-longer-than-the-specified-maximum'] = True
if ((padding_strategy != PaddingStrategy.DO_NOT_PAD) or return_attention_mask):
encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
if return_length:
encoded_inputs['length'] = len(encoded_inputs['input_ids'])
batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
return batch_outputs
def truncate_sequences(self, ids: List[int], pair_ids: Optional[List[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[(str, TruncationStrategy)]='longest_first', stride: int=0) -> Tuple[(List[int], List[int], List[int])]:
if (num_tokens_to_remove <= 0):
return (ids, pair_ids, [])
if (not isinstance(truncation_strategy, TruncationStrategy)):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
if (truncation_strategy == TruncationStrategy.LONGEST_FIRST):
for _ in range(num_tokens_to_remove):
if ((pair_ids is None) or (len(ids) > len(pair_ids))):
if (not overflowing_tokens):
window_len = min(len(ids), (stride + 1))
else:
window_len = 1
overflowing_tokens.extend(ids[(- window_len):])
ids = ids[:(- 1)]
else:
if (not overflowing_tokens):
window_len = min(len(pair_ids), (stride + 1))
else:
window_len = 1
overflowing_tokens.extend(pair_ids[(- window_len):])
pair_ids = pair_ids[:(- 1)]
elif (truncation_strategy == TruncationStrategy.ONLY_FIRST):
if (len(ids) > num_tokens_to_remove):
window_len = min(len(ids), (stride + num_tokens_to_remove))
overflowing_tokens = ids[(- window_len):]
ids = ids[:(- num_tokens_to_remove)]
else:
logger.error(f"We need to remove {num_tokens_to_remove} to truncate the inputbut the first sequence has a length {len(ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_second'.")
elif ((truncation_strategy == TruncationStrategy.ONLY_SECOND) and (pair_ids is not None)):
if (len(pair_ids) > num_tokens_to_remove):
window_len = min(len(pair_ids), (stride + num_tokens_to_remove))
overflowing_tokens = pair_ids[(- window_len):]
pair_ids = pair_ids[:(- num_tokens_to_remove)]
else:
logger.error(f"We need to remove {num_tokens_to_remove} to truncate the inputbut the second sequence has a length {len(pair_ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_first'.")
return (ids, pair_ids, overflowing_tokens)
def _pad(self, encoded_inputs: Union[(Dict[(str, EncodedInput)], BatchEncoding)], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict:
if (return_attention_mask is None):
return_attention_mask = ('attention_mask' in self.model_input_names)
if (padding_strategy == PaddingStrategy.LONGEST):
max_length = len(encoded_inputs['input_ids'])
if ((max_length is not None) and (pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)):
max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of)
needs_to_be_padded = ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and (len(encoded_inputs['input_ids']) != max_length))
if needs_to_be_padded:
difference = (max_length - len(encoded_inputs['input_ids']))
if (self.padding_side == 'right'):
if return_attention_mask:
encoded_inputs['attention_mask'] = (([1] * len(encoded_inputs['input_ids'])) + ([0] * difference))
if ('token_type_ids' in encoded_inputs):
encoded_inputs['token_type_ids'] = (encoded_inputs['token_type_ids'] + ([self.pad_token_type_id] * difference))
if ('special_tokens_mask' in encoded_inputs):
encoded_inputs['special_tokens_mask'] = (encoded_inputs['special_tokens_mask'] + ([1] * difference))
encoded_inputs['input_ids'] = (encoded_inputs['input_ids'] + ([self.pad_token_id] * difference))
elif (self.padding_side == 'left'):
if return_attention_mask:
encoded_inputs['attention_mask'] = (([0] * difference) + ([1] * len(encoded_inputs['input_ids'])))
if ('token_type_ids' in encoded_inputs):
encoded_inputs['token_type_ids'] = (([self.pad_token_type_id] * difference) + encoded_inputs['token_type_ids'])
if ('special_tokens_mask' in encoded_inputs):
encoded_inputs['special_tokens_mask'] = (([1] * difference) + encoded_inputs['special_tokens_mask'])
encoded_inputs['input_ids'] = (([self.pad_token_id] * difference) + encoded_inputs['input_ids'])
else:
raise ValueError(('Invalid padding strategy:' + str(self.padding_side)))
elif return_attention_mask:
encoded_inputs['attention_mask'] = ([1] * len(encoded_inputs['input_ids']))
return encoded_inputs
def batch_decode(self, sequences: List[List[int]], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=True) -> List[str]:
return [self.decode(seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces) for seq in sequences]
def decode(self, token_ids: List[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=True, **kwargs) -> str:
raise NotImplementedError
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
assert (already_has_special_tokens and (token_ids_1 is None)), 'You cannot use ``already_has_special_tokens=False`` with this tokenizer. Please use a slow (full python) tokenizer to activate this argument.Or set `return_special_token_mask=True` when calling the encoding method to get the special tokens mask in any tokenizer. '
all_special_ids = self.all_special_ids
special_tokens_mask = [(1 if (token in all_special_ids) else 0) for token in token_ids_0]
return special_tokens_mask
def clean_up_tokenization(out_string: str) -> str:
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string |
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
output = None
for (batch_idx, (data, target)) in enumerate(train_loader):
(data, target) = (data.to(device), target.to(device))
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if ((batch_idx % args.log_interval) == 0):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * len(data)), len(train_loader.dataset), ((100.0 * batch_idx) / len(train_loader)), loss.item())) |
def create_ngram_index(light_scenarios: List[LightScenario], n_values: List[int], tokenizer: LightTokenizer, stats_key_counts: Dict[(DataOverlapStatsKey, int)]) -> NgramIndex:
ngram_index: NgramIndex = {n: {} for n in n_values}
for scenario in light_scenarios:
hlog(f'Building ngram indexes for {scenario.scenario_key}')
for n in n_values:
stats_key = DataOverlapStatsKey(light_scenario_key=scenario.scenario_key, overlap_protocol_spec=OverlapProtocolSpec(n=n))
stats_key_counts[stats_key] = len(scenario.instances)
for (i, instance) in enumerate(scenario.instances):
id = instance.id
assert id
input_tokens = tokenizer.tokenize(instance.input)
for input_ngram in ngrams(input_tokens, n):
if (input_ngram not in ngram_index[n]):
ngram_index[n][input_ngram] = set()
ngram_index[n][input_ngram].add(EntryDataOverlapKey(stats_key=stats_key, instance_id=id, part=PART_INPUT))
for reference in instance.references:
reference_unigrams = tokenizer.tokenize(reference)
for reference_ngram in ngrams(reference_unigrams, n):
if (reference_ngram not in ngram_index[n]):
ngram_index[n][reference_ngram] = set()
ngram_index[n][reference_ngram].add(EntryDataOverlapKey(stats_key=stats_key, instance_id=id, part=PART_REF))
return ngram_index |
def flat_transform_bmes_label(start_labels, end_labels, span_labels, ner_cate, threshold=0.5):
bmes_labels = (len(start_labels) * ['O'])
start_labels = [idx for (idx, tmp) in enumerate(start_labels) if (tmp != 0)]
end_labels = [idx for (idx, tmp) in enumerate(end_labels) if (tmp != 0)]
for start_item in start_labels:
bmes_labels[start_item] = 'B-{}'.format(ner_cate)
for end_item in end_labels:
bmes_labels[end_item] = 'E-{}'.format(ner_cate)
for tmp_start in start_labels:
tmp_end = [tmp for tmp in end_labels if (tmp >= tmp_start)]
if (len(tmp_end) == 0):
continue
else:
tmp_end = min(tmp_end)
if (tmp_start != tmp_end):
for i in range((tmp_start + 1), tmp_end):
bmes_labels[i] = 'M-{}'.format(ner_cate)
else:
bmes_labels[tmp_end] = 'S-{}'.format(ner_cate)
'\n if span_labels[tmp_start][tmp_end] >= threshold:\n if tmp_start != tmp_end:\n for i in range(tmp_start+1, tmp_end):\n bmes_labels[i] = "M-{}".format(ner_cate)\n else:\n bmes_labels[tmp_end] = "S-{}".format(ner_cate)\n '
return bmes_labels |
def gausspulse(t, fc=1000, bw=0.5, bwr=(- 6), tpr=(- 60), retquad=False, retenv=False):
if (fc < 0):
raise ValueError(('Center frequency (fc=%.2f) must be >=0.' % fc))
if (bw <= 0):
raise ValueError(('Fractional bandwidth (bw=%.2f) must be > 0.' % bw))
if (bwr >= 0):
raise ValueError(('Reference level for bandwidth (bwr=%.2f) must be < 0 dB' % bwr))
ref = pow(10.0, (bwr / 20.0))
a = ((- (((pi * fc) * bw) ** 2)) / (4.0 * log(ref)))
if isinstance(t, str):
if (t == 'cutoff'):
if (tpr >= 0):
raise ValueError('Reference level for time cutoff must be < 0 dB')
tref = pow(10.0, (tpr / 20.0))
return sqrt(((- log(tref)) / a))
else:
raise ValueError("If `t` is a string, it must be 'cutoff'")
yenv = exp((((- a) * t) * t))
yI = (yenv * cos((((2 * pi) * fc) * t)))
yQ = (yenv * sin((((2 * pi) * fc) * t)))
if ((not retquad) and (not retenv)):
return yI
if ((not retquad) and retenv):
return (yI, yenv)
if (retquad and (not retenv)):
return (yI, yQ)
if (retquad and retenv):
return (yI, yQ, yenv) |
class KNN(Function):
def forward(ctx, k: int, xyz: torch.Tensor, center_xyz: torch.Tensor=None, transposed: bool=False) -> torch.Tensor:
assert ((k > 0) & (k < 100)), 'k should be in range(0, 100)'
if (center_xyz is None):
center_xyz = xyz
if transposed:
xyz = xyz.transpose(2, 1).contiguous()
center_xyz = center_xyz.transpose(2, 1).contiguous()
assert xyz.is_contiguous()
assert center_xyz.is_contiguous()
center_xyz_device = center_xyz.get_device()
assert (center_xyz_device == xyz.get_device()), 'center_xyz and xyz should be put on the same device'
if (torch.cuda.current_device() != center_xyz_device):
torch.cuda.set_device(center_xyz_device)
(B, npoint, _) = center_xyz.shape
N = xyz.shape[1]
idx = center_xyz.new_zeros((B, npoint, k)).int()
dist2 = center_xyz.new_zeros((B, npoint, k)).float()
ext_module.knn_forward(xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k)
idx = idx.transpose(2, 1).contiguous()
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(idx)
return idx
def backward(ctx, a=None):
return (None, None, None) |
class Trainer(object):
def __init__(self, optimizer, max_epochs, hooks):
self.loss = None
self.optimizer = optimizer
self.max_epochs = max_epochs
self.hooks = hooks
def __call__(self, batcher, placeholders, loss, acc_thresh, pretrain, embedd, sep=False, model=None, session=None):
self.loss = loss
minimization_op = self.optimizer.minimize(loss)
close_session_after_training = False
if (session is None):
session = tf.Session()
close_session_after_training = True
init = tf.initialize_all_variables()
if (((pretrain == 'pre') or (pretrain == 'pre_cont')) and (sep == False)):
vars = tf.all_variables()
emb_var = vars[0]
session.run(emb_var.assign(embedd))
elif (((pretrain == 'pre') or (pretrain == 'pre_cont')) and (sep == True)):
vars = tf.all_variables()
emb_var = vars[0]
emb_var2 = vars[1]
session.run(emb_var.assign(embedd))
session.run(emb_var2.assign(embedd))
session.run(init)
epoch = 1
while (epoch < self.max_epochs):
iteration = 1
for values in batcher:
iteration += 1
feed_dict = {}
for i in range(0, len(placeholders)):
feed_dict[placeholders[i]] = values[i]
(_, current_loss) = session.run([minimization_op, loss], feed_dict=feed_dict)
current_loss = sum(current_loss)
for hook in self.hooks:
hook(session, epoch, iteration, model, current_loss)
for hook in self.hooks:
if isinstance(hook, AccuracyHookIgnoreNeutral):
acc = hook(session, epoch, 0, model, 0)
if (acc > acc_thresh):
print('Accuracy threshold reached! Stopping training.')
if close_session_after_training:
session.close()
return epoch
else:
hook(session, epoch, 0, model, 0)
epoch += 1
if close_session_after_training:
session.close()
return (self.max_epochs - 1) |
def test_UnionArray_RecordArray_NumpyArray():
v1 = json.loads('{"class":"UnionArray8_64","tags":"i8","index":"i64","contents":[{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"l","primitive":"int64","parameters":{},"form_key":null}},"parameters":{},"form_key":null},{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","parameters":{},"form_key":null}},"parameters":{},"form_key":null}],"parameters":{},"form_key":null}')
v2 = ak.forms.from_dict(v1).to_dict()
assert (v2 == {'class': 'UnionArray', 'tags': 'i8', 'index': 'i64', 'contents': [{'class': 'RecordArray', 'fields': ['nest'], 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': None}], 'parameters': {}, 'form_key': None}, {'class': 'RecordArray', 'fields': ['nest'], 'contents': [{'class': 'NumpyArray', 'primitive': 'float64', 'inner_shape': [], 'parameters': {}, 'form_key': None}], 'parameters': {}, 'form_key': None}], 'parameters': {}, 'form_key': None}) |
def reduce_process(output_queue, output):
interval_start = default_timer()
period = 100000
ordering_buffer = {}
next_ordinal = 0
while True:
if (next_ordinal in ordering_buffer):
output.write(ordering_buffer.pop(next_ordinal))
next_ordinal += 1
if ((next_ordinal % period) == 0):
interval_rate = (period / (default_timer() - interval_start))
logging.info('Extracted %d articles (%.1f art/s)', next_ordinal, interval_rate)
interval_start = default_timer()
else:
pair = output_queue.get()
if (not pair):
break
(ordinal, text) = pair
ordering_buffer[ordinal] = text |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.