code stringlengths 101 5.91M |
|---|
def test_dialect_perturbation():
data_augmenter = DataAugmenter(perturbations=[DialectPerturbation(prob=1.0, source_class='SAE', target_class='AAVE')])
instance: Instance = Instance(id='id0', input=Input(text='I will remember this day to be the best day of my life.'), references=[Reference(Output(text='Is this ... |
def _mobilenet_v2(net, depth_multiplier, output_stride, reuse=None, scope=None, final_endpoint=None):
with tf.variable_scope(scope, 'MobilenetV2', [net], reuse=reuse) as scope:
return mobilenet_v2.mobilenet_base(net, conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=depth_multiplier, min_depth=(8 if (depth_mu... |
def QDM_21_6_1_1_5():
M = [[None, None, None, None, None], [0, 0, 0, 0, 0], [1, 6, 7, 8, 14], [3, 11, 20, 18, 10], [6, 10, 14, 1, 5], [4, 19, 5, 12, 2]]
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(21)
Mb = [[0, 0, 0, 0, 0, 0]]
for R in zip... |
class classify_model(nn.Module):
def __init__(self, size_question, path_init):
super(classify_model, self).__init__()
self.w_emb = WordEmbedding(size_question, 300, 0.0, False)
self.w_emb.init_embedding(path_init)
self.q_emb = QuestionEmbedding(300, 1024, 1, False, 0.0, 'GRU')
... |
def read_s3_yaml(bucket, name):
s3_client = boto3.client(service_name='s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)
response = s3_client.get_object(Bucket=bucket, Key=name)
return yaml.safe_load(response['Body']) |
class BatchedInput(collections.namedtuple('BatchedInput', ('initializer', 'source', 'target_input', 'target_output', 'source_sequence_length', 'target_sequence_length'))):
pass |
def register_Ns3GrantManagementSubheader_methods(root_module, cls):
cls.add_constructor([param('ns3::GrantManagementSubheader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetInstanceTypeI... |
def validate_no_iban(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(iban.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
... |
def sample(args, data, target):
examples = []
for i in range(args.samples):
while True:
example = data[random.randint(0, (len(data) - 1))]
std = target.step([example])[(- 1)]
if (std['embedding_output'][0].shape[0] > args.max_verify_length):
continue
... |
class SimpleModelNoEMA(nn.Module):
def __init__(self) -> None:
super().__init__()
self.module_a = SimpleModule()
self.module_b = SimpleModule() |
def get_alpaca_farm_model_names():
api = HfApi()
models = api.list_models(author='tatsu-lab', search='alpaca-farm')
model_names = [model.modelId for model in models]
model_names = [name.replace('tatsu-lab/alpaca-farm-', '').replace('-wdiff', '') for name in model_names]
return model_names |
def Genus(A, factored_determinant=None):
if (factored_determinant is None):
D = A.determinant()
D = (2 * D)
D = D.factor()
else:
D = (factored_determinant * 2)
sig_pair = signature_pair_of_matrix(A)
local_symbols = []
for f in D:
p = f[0]
val = f[1]
... |
def test_get_static_parameters_from_properties(operation_with_property_examples):
example = examples.get_static_parameters_from_properties(operation_with_property_examples)
assert ('query' in example)
assert ('param1' in example['query'])
assert ('param2' in example['query'])
assert (example['query'... |
def join_signs(*fsws: str, spacing: int=0):
signs = [fsw_to_sign(fsw) for fsw in fsws]
new_sign: Sign = {'box': {'symbol': 'M', 'position': (500, 500)}, 'symbols': []}
accumulative_offset = 0
for sign in signs:
sign_min_y = min(all_ys(sign))
sign_offset_y = ((accumulative_offset + spacin... |
class Profiler():
def __init__(self, name=None, parent=None, device=None):
self.device = device
self.name = name
self.parent = parent
self.start_time = 0
self.end_time = 0
self.total = 0
self.measurements = {}
def start(self):
self.start = time.per... |
class BaseDocumentState():
def __init__(self, key):
self.doc_key = key
self.sentence_end = []
self.token_end = []
self.tokens = []
self.subtokens = []
self.info = []
self.segments = []
self.subtoken_map = []
self.orig_subtoken_map = []
... |
def create_splits_scenes(verbose: bool=False) -> Dict[(str, List[str])]:
all_scenes = ((train + val) + test)
assert ((len(all_scenes) == 1000) and (len(set(all_scenes)) == 1000)), 'Error: Splits incomplete!'
scene_splits = {'train': train, 'val': val, 'test': test, 'mini_train': mini_train, 'mini_val': mini... |
class _MatrixEntryIterator(object):
def __init__(self, rows, cols, rowMajor):
self.rows = rows
self.cols = cols
self.currentRow = 0
self.currentCol = 0
self.rowMajor = rowMajor
def __iter__(self):
return self
def next(self):
return self.__next__()
... |
def add_subcommand_completions(ctx, incomplete, completions_out):
if isinstance(ctx.command, MultiCommand):
completions_out.extend([(c.name, c.get_short_help_str()) for c in get_visible_commands_starting_with(ctx, incomplete)])
while (ctx.parent is not None):
ctx = ctx.parent
if (isinsta... |
def validate_is_kennitala(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(kennitala.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column !=... |
def fetchPanelResolution():
some_panels = cmds.ls('*qlPattern*')
shape = cmds.listRelatives(some_panels[0], shapes=True, path=True)
return cmds.getAttr((shape[0] + '.resolutionScale')) |
_utils.test()
def test_func_template2():
a = ti.field(dtype=ti.f32)
b = ti.field(dtype=ti.f32)
ti.root.dense(ti.ij, 16).place(a, b)
def sample(x: ti.template(), I):
return x[I]
def fill():
for I in ti.grouped(a):
a[I] = 1.0
def aTob():
for I in ti.grouped(b):
... |
def collect_configurations():
cfgs = []
for (config, network, fourier) in itertools.product(configX, networkX, fourierX):
filename = ('fourier-world-%s-%s-%s' % (config[0], network[0], fourier[0]))
cfgs.append((config[1], network[1:], fourier[1], filename))
return cfgs |
def therefore(text: Optional[str]):
if (text is None):
return False
m = _PAT_THEREFORE.match(text.strip())
return (m is not None) |
def date_time_precision(dt, precision):
result = ''
if ((precision == 'Year') or (precision == 'year')):
result += str(dt.year)
elif ((precision == 'Month') or (precision == 'month')):
result += (str(dt.year) + str(dt.month))
elif ((precision == 'Day') or (precision == 'day')):
r... |
def divergence(vf: ti.template(), divf: ti.template()):
for (i, j) in vf:
divf[(i, j)] = (0.5 * (((vf[((i + 1), j)][0] - vf[((i - 1), j)][0]) + vf[(i, (j + 1))][1]) - vf[(i, (j - 1))][1])) |
class Block():
def __init__(self, var_name, size, start_index=0, reverse=False):
indices = range(start_index, (start_index + size))
if reverse:
indices = reversed(indices)
self.names = [(((var_name + '(') + str(i)) + ')') for i in indices]
self.var_name = var_name
... |
class Repository():
def __init__(self, vcstype: Optional[str], url: Optional[str]):
self.vcstype = vcstype
self.url = url |
def _gather_quantiles_by_indices(y: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
if (y.dim() == 3):
return y.transpose(0, 1)[(torch.arange(y.shape[1]), indices)]
elif (y.dim() == 4):
transposed_y = y.transpose(0, 1).transpose(1, 2)
flat_y = transposed_y.reshape((- 1), y.shape[0]... |
class Example(nn.Module):
def __init__(self):
self.cb = ConvBn(2)
self.cb2 = ConvBn(2)
self.shared1 = Shared(self.cb2)
self.shared2 = Shared(self.cb2)
def call(self, x):
h = self.cb(x)
h = self.cb2(h)
h = self.shared1(h)
h = self.shared2(h)
... |
class BaseNode():
def __init__(self, name: str, framework_attr: Dict[(str, Any)], input_shape: Tuple[Any], output_shape: Tuple[Any], weights: Dict[(str, np.ndarray)], layer_class: type, reuse: bool=False, reuse_group: str=None, quantization_attr: Dict[(str, Any)]=None, has_activation: bool=True, is_custom: bool=Fal... |
def data_preprocessing(params: Params) -> (str, str, int, int):
output_dir = os.path.join(params.output_model_dir, CONST.PREPROCESSING_FOLDER)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
else:
.format(output_dir)
csv_train_output = os.path.join(output_dir, 'updated_train... |
def is_strict_pos_int(arg):
x = int(arg)
if (x <= 0):
raise argparse.ArgumentTypeError('must be strictly positive')
return x |
def extract_all_files(completed_urls, extract_folder, get_extract_name=get_extract_name, completed_extraction={}, debug=False):
extracted_folders = OrderedDict()
for (url, downloaded_file) in set(completed_urls.items()):
if (downloaded_file in completed_extraction):
print(f'{downloaded_file}... |
class AttentionModule(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.attendNode = AttendNodeModule()
self.attnAnd = AndModule()
def forward(self, attn, feat, query):
new_attn = self.attendNode(feat, query)
out = self.attnAnd(attn, new_attn)
retu... |
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main) |
def register_Ns3SpectrumInterference_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetErrorModel', 'void', [param('ns3::Ptr< ns3::SpectrumErrorModel >', 'e')])
cls.add_method('StartRx', 'void', [param('ns3::Ptr< ns3::Pac... |
class TrainerCriterion():
cfg: T.DictConfig
def init_criterion(self) -> T.Loss:
criterion_attr = getattr(torch.nn, self.cfg.criterion.name)
args = self.cfg.criterion.args
if args:
return criterion_attr(**args)
else:
return criterion_attr() |
def resnet152(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(remove_fc(model_zoo.load_url(model_urls['resnet152'])))
return model |
def normalize(im_batch, _range=None, _domain=None):
if (len(im_batch.shape) == 2):
axis = (0, 1)
elif (len(im_batch.shape) == 3):
axis = (0, 1, 2)
elif (len(im_batch.shape) == 4):
axis = (1, 2, 3)
else:
raise ValueError('im_batch must be of rank 2, 3 or 4')
if (_domai... |
class MagmasAndAdditiveMagmas(Category_singleton):
class SubcategoryMethods():
_method
def Distributive(self):
return self._with_axiom('Distributive')
def super_categories(self):
return [Magmas(), AdditiveMagmas()]
def additional_structure(self):
return None
D... |
class Sine(nn.Module):
def __init__(self, w0=1.0):
super().__init__()
self.w0 = w0
def forward(self, x):
return torch.sin((self.w0 * x)) |
def create_args(args=argparse.Namespace()):
args.seed = 42
args.data_bucket_path = '/tmp/dataset_v1/0_raw/train.txt'
args.out_bucket_path = '/tmp/dataset_v1/1_split_raw/{:012d}.txt'
args.out_splits = 1024
args.assert_samples_num =
return args |
class InvalidDataError(Exception):
def __init__(self, errors):
self.errors = errors
def __str__(self):
return ('The provided data does not match the metadata:\n' + '\n\n'.join(map(str, self.errors))) |
def train_one_epoch(train_loader, model, device, criterion, optimizer, epoch, writer, cfg, update_train_step):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
for (i, (input, target)) in enumerate(train_loader):
update_train_step += 1
target = target.to(device)
... |
class ASR_Brain(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(wavs, wav_lens) = batch.sig
(phns, phn_lens) = batch.phn_encoded
if (stage == sb.Stage.TRAIN):
if hasattr(self.hparams, 'augmentation'):
wavs = self.hparams.... |
def write_metadata_from_sxs(out_filename, resolution, metadata, catalog, catalog_resolutions, start_time, peak_time, l_max, log=print):
log('Writing metadata')
names = metadata['alternative_names']
if (not isinstance(names, (list, tuple))):
names = [names]
sxs_id = sxs_id_from_alt_names(names)
... |
class GabidulinCode(AbstractLinearRankMetricCode):
_registered_encoders = {}
_registered_decoders = {}
def __init__(self, base_field, length, dimension, sub_field=None, twisting_homomorphism=None, evaluation_points=None):
twist_fix_field = None
have_twist = (twisting_homomorphism is not None... |
def audio_resample():
audio_path = '../dataset/PEmoDataset/audios/seg'
save_path = './dataset/resample22050'
for fn in tqdm(total):
pt_path = Path(save_path, (fn + '.pt'))
resample = torch_sox_effect_load(Path(audio_path, (fn + '.mp3')), 22050).mean(0, True)
if (not os.path.exists(os... |
def discard_faces(cones):
cones = list(cones)
cones.sort(key=(lambda cone: cone.dim()), reverse=True)
generators = []
for cone in cones:
if (not any((cone.is_face_of(other) for other in generators))):
generators.append(cone)
return generators |
def get_enum(reduction):
if (reduction == 'none'):
ret = 0
elif (reduction == 'mean'):
ret = 1
elif (reduction == 'elementwise_mean'):
warnings.warn("reduction='elementwise_mean' is deprecated, please use reduction='mean' instead.")
ret = 1
elif (reduction == 'sum'):
... |
(spacepy.lib.have_libspacepy, 'No C backend')
class AssocTestsPython(AssocTests):
def setUp(self):
spacepy.lib.have_libspacepy = False
super(AssocTestsPython, self).setUp()
def tearDown(self):
super(AssocTestsPython, self).tearDown()
spacepy.lib.have_libspacepy = True |
def build_AE_config(args: argparse.Namespace):
drop_rates = ((0.0, 0.05, args.drop_rate) if args.use_locked_drop else (args.drop_rate, 0.0, 0.0))
decoder_config = SpanAttrClassificationDecoderConfig(agg_mode=args.agg_mode, neg_sampling_rate=args.neg_sampling_rate, max_size_id=args.max_size_id, size_emb_dim=args... |
_torch
class FunnelModelTest(ModelTesterMixin, unittest.TestCase):
test_head_masking = False
test_pruning = False
all_model_classes = ((FunnelModel, FunnelForMaskedLM, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForTokenClassification) if is_torch_available() else ())
def _prepare_for_class(... |
def find_cuda_device_arch():
if (sys.platform == 'win32'):
return None
cuda_home = find_cuda()
if (cuda_home is None):
return None
cuda_home = Path(cuda_home)
try:
device_query_path = (cuda_home / 'extras/demo_suite/deviceQuery')
if (not device_query_path.exists()):
... |
class RoughScorer(torch.nn.Module):
def __init__(self, features: int, config: Config):
super().__init__()
self.dropout = torch.nn.Dropout(config.dropout_rate)
self.bilinear = torch.nn.Linear(features, features)
self.k = config.rough_k
def forward(self, mentions: torch.Tensor) -> ... |
def tokenize(sentence, grams):
words = sentence.split()
tokens = []
for gram in grams:
for i in range(((len(words) - gram) + 1)):
tokens += ['_*_'.join(words[i:(i + gram)])]
return tokens |
()
class TD3Config(LearnableConfig):
actor_learning_rate: float = 0.0003
critic_learning_rate: float = 0.0003
actor_optim_factory: OptimizerFactory = make_optimizer_field()
critic_optim_factory: OptimizerFactory = make_optimizer_field()
actor_encoder_factory: EncoderFactory = make_encoder_field()
... |
class Issue216CurrentInvocation(ReBenchTestCase):
def setUp(self):
super(Issue216CurrentInvocation, self).setUp()
self._set_path(__file__)
def _records_data_points(self, exp_name, num_data_points):
cnf = Configurator(load_config((self._path + '/issue_216.conf')), DataStore(self.ui), self... |
def es_rule_conditionB2(memory_info: 'MemoryInfo', manager: 'MemoryManager', args: Arguments) -> List['MemoryInfo']:
memory_indices = args['memory_indices']
left = args['left']
right = args['right']
fidelity = args['fidelity']
if ((memory_info.state == 'ENTANGLED') and (memory_info.index in memory_i... |
def _default_dtype_mapping(dtype):
if (dtype in [np.int32, np.int64, int]):
return torch.int32
elif (dtype in [float, np.float32, np.float16]):
return torch.float32
elif (dtype == np.float64):
return torch.float64
elif (dtype in bool):
return torch.float32
else:
... |
_spec_function('quac')
def get_quac_spec() -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.quac_scenario.QuACScenario', args={})
adapter_spec = get_generation_adapter_spec(input_noun=None, output_noun='Answer', max_tokens=100)
return RunSpec(name='quac', scenario_spec=scenario_... |
def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3, bg_label=(- 1), bg_color=None, image_alpha=1, saturation=0):
if (not (0 <= saturation <= 1)):
warn(f'saturation must be in range [0, 1], got {saturation}')
if (colors is None):
colors = DEFAULT_COLORS
colors = [_rgb_vector(c) ... |
def drop_private_keys(full_dict: Dict[(str, Any)]) -> Dict[(str, Any)]:
return {key: value for (key, value) in full_dict.items() if (key[0] != '_')} |
def submit_job(params, use_gpu=False):
if use_gpu:
cmd_line = 'bsub -W 2:00 -n 1 -R "rusage[mem=1000,ngpus_excl_p=1]"'
else:
cmd_line = 'bsub -W 2:00 -n 1 -R "rusage[mem=1000]"'
job_name = '_'.join(map((lambda x: str(x)), params.values()))
cmd_line += (' -J %s -o %s.txt' % (job_name, job... |
def main(opts):
random.seed(opts.seed)
(utt2spk, ihm2sdms) = parse_list(opts.train_scp, opts)
(utt2spk_test, ihm2sdms_test) = parse_list(opts.test_scp, opts)
assert ((utt2spk is not None) and (ihm2sdms is not None)), 'Looks like parsing of {} did not suceed'.format(opts.train_scp)
assert ((utt2spk_t... |
def feed_dictionary(model, batch, rho, gamma, dropout=1, learning_rate=None):
feed_dict = {model.dropout: dropout, model.learning_rate: learning_rate, model.rho: rho, model.gamma: gamma, model.batch_len: batch['len'], model.batch_size: batch['size'], model.enc_inputs: batch['enc_inputs'], model.dec_inputs: batch['d... |
def get_audio_length(filename):
ext = os.path.splitext(filename)[1]
return ffmpeg_get_audio_length(filename) |
class Utils(object):
def rand_cmap(nlabels, type='bright', first_color_black=False, last_color_black=False, verbose=False):
if (type not in ('bright', 'soft')):
print('Please choose "bright" or "soft" for type')
return
if verbose:
print(('Number of labels: ' + str... |
class AttributeFilter(Filter):
def __init__(self, attr: str, value: Any, op: Callable):
self.attr = attr
self.value = value
self.op = op
def __eq__(self, other: Any) -> bool:
if (not isinstance(other, AttributeFilter)):
return False
return ((self.attr == other... |
def _gen_random_datatime_series(size: int, start: str='1/1/2018', end: str='1/1/2019', random_state: Union[(int, np.random.RandomState)]=0) -> pd.Series:
rand = _resolve_random_state(random_state)
population = pd.date_range(start, end)
arr = rand.choice(population, size=size)
return pd.Series(arr) |
def calc_wer_stats(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.stats() |
class TestFitPipeline():
def setup_class(cls):
cls.data = pd.DataFrame({'timestamp': list(range(100)), 'value': ([1] * 100)})
('orion.core.Orion.DEFAULT_PIPELINE', new='dummy')
def test_fit_pipeline_default(self):
orion = functional.fit_pipeline(self.data)
assert isinstance(orion, Or... |
def is_root(html):
try:
return (False if ('omid=' in html) else True)
except TypeError:
return True |
def LF_negex_definite_negation_right(c):
possible_terms = [t['term'].split() for t in negex.dictionary['definite'] if (t['direction'] == 'backward')]
longest = len(max(possible_terms, key=len))
right_window_length = (longest + 2)
v = negex.is_negated(c, 'definite', 'right', right_window_length)
retu... |
def test_python_max2():
def python_max2(a: dace.int64, b: dace.int64):
return max(a, b)
for _ in range(100):
a = random.randint((- 10), 10)
b = random.randint((- 10), 10)
assert (python_max2(a, b)[0] == max(a, b)) |
class SizeCorrectionParams(pymia_fltr.FilterParams):
def __init__(self, reference_shape: tuple) -> None:
self.dims = len(reference_shape)
self.reference_shape = reference_shape |
def is_submodule_of_fake_quant(name, module, named_modules):
(parent_name, _) = _parent_name(name)
return is_activation_post_process(named_modules[parent_name]) |
def register_Ns3DsrDsrLinkStab_methods(root_module, cls):
cls.add_constructor([param('ns3::dsr::DsrLinkStab const &', 'arg0')])
cls.add_constructor([param('ns3::Time', 'linkStab', default_value='ns3::Simulator::Now()')])
cls.add_method('GetLinkStability', 'ns3::Time', [], is_const=True)
cls.add_method('... |
class CloudpickleWrapper(object):
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob) |
def advanced_replace(subgraph: StateSubgraphView, s: str, s_: str) -> None:
subgraph.replace(s, s_)
for node in subgraph.nodes():
if isinstance(node, nodes.MapEntry):
params = [(s_ if (p == s) else p) for p in node.map.params]
node.map.params = params
elif isinstance(node... |
def preprocess(p):
return p.replace(' ', '_').replace('(', '-LRB-').replace(')', '-RRB-').replace(':', '-COLON-').split('#')[0] |
def test_process_routing_invalid_method():
with pytest.raises(TypeError, match='Can only route and process input'):
process_routing(ConsumingClassifier(), 'invalid_method', **{}) |
class MyDatasetFolder(DatasetFolder):
def __init__(self, root, loader, extensions, transform=None, target_transform=None):
(classes, class_to_idx) = find_classes(root)
samples = my_make_dataset(root, class_to_idx, extensions)
if (len(samples) == 0):
raise RuntimeError(((('Found 0... |
def parse_bench_ops_sec(values_dict, fn):
start = re.compile('(read(seq|reverse)|readrandom(writerandom)?|mixgraph)\\s*:.* (\\d+) ops/sec;\\s+([0-9\\.]+) MB/s')
rwrandomstart = re.compile('readrandomwriterandom\\s*:.* (\\d+) ops/sec;')
total_occ_dict = {}
with open(fn) as f:
data = None
... |
def get_logger():
log_format = '[%(asctime)s] [%(levelname)s]: %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger |
def get_emb_sz(to, sz_dict=None):
return [_one_emb_sz(to.classes, n, sz_dict) for n in to.cat_names] |
class disable_logging(object):
def __enter__(self, *args, **kwargs):
logging.disable(logging.CRITICAL)
return self
def __exit__(self, *args, **kwargs):
logging.disable(logging.NOTSET)
def __call__(self, func):
def decorator(*args, **kwargs):
with self:
... |
def delete_atom():
choices = ['[*:1]~[D1:2]>>[*:1]', '[*:1]~[D2:2]~[*:3]>>[*:1]-[*:3]', '[*:1]~[D3:2](~[*;!H0:3])~[*:4]>>[*:1]-[*:3]-[*:4]', '[*:1]~[D4:2](~[*;!H0:3])(~[*;!H0:4])~[*:5]>>[*:1]-[*:3]-[*:4]-[*:5]', '[*:1]~[D4:2](~[*;!H0;!H1:3])(~[*:4])~[*:5]>>[*:1]-[*:3](-[*:4])-[*:5]']
p = [0.25, 0.25, 0.25, 0.18... |
def get_groups(task, reachable_action_params=None):
with timers.timing('Finding invariants', block=True):
invariants = sorted(find_invariants(task, reachable_action_params))
with timers.timing('Checking invariant weight'):
result = list(useful_groups(invariants, task.init))
return result |
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, num_class, num_tpls, num_inprots, nms_thresh=0.5, force_nms=True, nms_topk=400):
if (net is not None):
net = get_symbol(net, data_shape, num_classes=num_class, num_tpls=num_tpls, num_inprots=num_inprots, nms_thresh=nms_thresh, force_nms=forc... |
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, data_source: torch.utils.data.Dataset, replacement=True, seed=None):
super().__init__(data_source)
self.data_source = data_source
self.replacement = replacement
self.seed = utils.seed.get_randstate(seed)
def __it... |
def sweep():
wandb.init()
hyp_dict = vars(wandb.config).get('_items')
opt = parse_opt(known=True)
opt.batch_size = hyp_dict.get('batch_size')
opt.save_dir = str(increment_path((Path(opt.project) / opt.name), exist_ok=(opt.exist_ok or opt.evolve)))
opt.epochs = hyp_dict.get('epochs')
opt.nosa... |
def tracefunc(frame, event, arg):
print(('%s, %s: %d' % (event, frame.f_code.co_filename, frame.f_lineno)))
return tracefunc |
class TestTimeline(unittest.TestCase):
def test_from_file(self):
self.maxDiff = None
dates_to_summaries = {datetime.datetime.strptime('2010-09-19', '%Y-%m-%d').date(): ["The ruptured well is finally sealed and `` effectively dead '' , says the top US federal official overseeing the disaster , Coast ... |
def find_span_with_gt(context, offsets, ground_truth):
best_f1 = 0.0
best_span = ((len(offsets) - 1), (len(offsets) - 1))
gt = normalize_answer(ground_truth).split()
ls = [i for i in range(len(offsets)) if (context[offsets[i][0]:offsets[i][1]].lower() in gt)]
for i in range(len(ls)):
for j i... |
class CommunicationHandlerBase(abc.ABC):
def __init__(self):
pass
def init_buffers_ctx(self, buffers_ctx):
pass
def init_buffers(self):
pass
def send_activations(self, x, batch_index):
pass
def send_gradients(self, x, batch_index):
pass
def recv_activation... |
class PixelFFN(nn.Module):
def __init__(self, dim: int):
super().__init__()
self.dim = dim
self.conv = CAResBlock(dim, dim)
def forward(self, pixel: torch.Tensor, pixel_flat: torch.Tensor) -> torch.Tensor:
(bs, num_objects, _, h, w) = pixel.shape
pixel_flat = pixel_flat.v... |
def _create_variable(v, name, shape, rng):
class Variable():
pass
parameter = (v.type == 'Parameter')
variable_instance = None
if parameter:
if (v.initializer.type == 'Normal'):
initializer = NormalInitializer(v.initializer.multiplier, rng=rng)
elif ((v.initializer.ty... |
def q_int(n, q=None):
if (q is None):
R = LaurentPolynomialRing(ZZ, 'q')
q = R.gen()
else:
R = q.parent()
if (n == 0):
return R.zero()
return R.sum(((q ** ((n - (2 * i)) - 1)) for i in range(n))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.