code stringlengths 101 5.91M |
|---|
_module()
class RDNQE(BaseNet):
def __init__(self, rescale, io_channels, mid_channels=64, num_blocks=8, num_layers=8, channel_growth=64):
super().__init__()
self.rescale = rescale
self.mid_channels = mid_channels
self.channel_growth = channel_growth
self.num_blocks = num_bloc... |
class JointPositionActionMode(ActionMode):
def __init__(self):
super(JointPositionActionMode, self).__init__(JointPosition(False), GripperJointPosition(True))
def action(self, scene: Scene, action: np.ndarray):
arm_act_size = np.prod(self.arm_action_mode.action_shape(scene))
arm_action =... |
class ThrombocytopeniaLabValueLabeler(InpatientLabValueLabeler):
original_omop_concept_codes = ['LOINC/LP393218-5', 'LOINC/LG32892-8', 'LOINC/777-3']
def value_to_label(self, raw_value: str, unit: Optional[str]) -> str:
if (raw_value.lower() in ['normal', 'adequate']):
return 'normal'
... |
def clean_fi_veronumero(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It nee... |
class Grouping(Repository):
def __init__(self, base_dir='.', log_level=Log.info):
self.ServerId = ''
self.is_parallel = False
self.numWorkers = 1
self.numPlanWorkers = 1
self.set_base_dir(base_dir)
self.LogLevel = log_level
def __set_serverId(self, serverId):
... |
class MultiHeadedAttention(nn.Module):
def __init__(self, head_count, model_dim, dropout=0.1, max_relative_positions=0):
assert ((model_dim % head_count) == 0)
self.dim_per_head = (model_dim // head_count)
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
... |
def adjust_learning_rate(warm_up_schedule, optimizer, epoch):
if (epoch < len(warm_up_schedule)):
lr = warm_up_schedule[epoch]
for param_group in optimizer.param_groups:
param_group['lr'] = lr
elif (epoch in range(10, 20)):
lr = (warm_up_schedule[(- 1)] * (0.25 ** int((float(... |
class IntelEM64TCCompiler(UnixCCompiler):
compiler_type = 'intelem'
cc_exe = 'icc -m64'
cc_args = '-fPIC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
mpopt = ('openmp' if (v and (v < '15')) else '... |
def _sanitize_by_index(indices: Set[int], subset: subsets.Subset) -> subsets.Range:
return type(subset)([t for (i, t) in enumerate(subset) if (i in indices)]) |
def test_set_speak_mode(config: Config):
speak_mode = config.speak_mode
config.set_speak_mode(True)
assert (config.speak_mode == True)
config.set_speak_mode(speak_mode) |
class EventHandle(ctypes.Structure):
IPC_HANDLE_SIZE = 64
_fields_ = [('reserved', (ctypes.c_char * IPC_HANDLE_SIZE))] |
def _gen_efficientnet_condconv(variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4... |
def roc_stories(data_dir, comet=False, kg_type='atomic', use_filter=True):
print('starting to process data')
(train_stories, train_infs, train_mems, train_ids) = _roc_stories(data_dir, 'train', comet, kg_type, use_filter=use_filter)
print('done with train')
(val_stories, val_infs, val_mems, val_ids) = _... |
def tensor_normalization(x, axes, beta=None, gamma=None, eps=1e-05, output_stat=False):
return batch_normalization(x, beta, gamma, None, None, axes=axes, decay_rate=0.0, eps=eps, batch_stat=True, output_stat=output_stat) |
class COIN_Task_CLS(Dataset):
def __init__(self, args, logger, split='train'):
self.args = args
self.logger = logger
with open(args.coin_annoataion_json, 'r') as f:
self.coin_json = json.load(f)
self.sample_video_paths = []
self.cls_sid2iid = defaultdict()
... |
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, drop_last=False, pin_memory=True, persistent_workers=True, **kwargs):
(rank, world_size) = get_dist_info()
if (dist and (not isinstance(dataset, IterableDataset))):
sampler = DistributedSample... |
class System3_Generator(system2.System2_Generator):
def __init__(self, system_id, apply_disc=True):
super().__init__(system_id, apply_disc=apply_disc) |
def main():
parser = argparse.ArgumentParser(description='preprocess_wiki_update_gen.py')
parser.add_argument('--data_dir', required=True, help='Path to the raw file from preprocess.py')
parser.add_argument('--out_dir', required=True, help='Path to save file from preprocess.py')
opt = parser.parse_args(... |
def es_req_func(protocols: List['EntanglementProtocol'], args: Arguments) -> 'EntanglementSwappingB':
target_memo = args['target_memo']
for protocol in protocols:
if (isinstance(protocol, EntanglementSwappingB) and (protocol.memory.name == target_memo)):
return protocol |
class RagRetriever():
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
self._init_retrieval = init_retrieval
requires_datasets(self)
requires_faiss(self)
super().__init__()
self.index = (index or self._build_index(c... |
def numBridgeheadsAndSpiro(mol, ri=None):
nSpiro = rdMolDescriptors.CalcNumSpiroAtoms(mol)
nBridgehead = rdMolDescriptors.CalcNumBridgeheadAtoms(mol)
return (nBridgehead, nSpiro) |
def get_last_daily_ci_runs(token):
workflow_runs = get_daily_ci_runs(token)
workflow_run_id = None
for workflow_run in workflow_runs:
if (workflow_run['status'] == 'completed'):
workflow_run_id = workflow_run['id']
break
return workflow_run_id |
class StreamingMean(object):
def __init__(self):
self.value = None
self.count = 0.0
def add(self, value):
if (not self.count):
self.value = value
else:
self.value *= (self.count / (self.count + 1))
self.value += (value / (self.count + 1))
... |
class AttentionLayer(nn.Module):
def __init__(self, query_hidden_size, ctx_hidden_size):
super(AttentionLayer, self).__init__()
self.query_hidden_size = query_hidden_size
self.ctx_hidden_size = ctx_hidden_size
self.attention_func = BilinearAttention2DLayer(query_hidden_size, ctx_hidd... |
def _check_polynomial_P2(cubic, variables):
if (variables is None):
variables = cubic.variables()
if (len(variables) == 3):
(x, y, z) = variables
_check_homogeneity(cubic, [x, y, z], (1, 1, 1), 3)
elif (len(variables) == 2):
(x, y) = variables
z = None
else:
... |
class SetupCheckTaskConfiguration(TaskConfiguration):
def mode() -> str:
return 'check setup'
def tasks(self, config) -> List:
return [] |
def main(args):
schema_dataset = DialogueSchemaDataset(args.data_dir, args.candgen_dir, mode=args.mode)
schema_dataset.load_all_data() |
def transform_mask(in_dict, transform_list, cfg):
for t in transform_list:
eval('{}(in_dict, cfg)'.format(t))
return in_dict |
class StepScheduler(BaseLearningRateScheduler):
def __init__(self, init_lr, gamma, iter_steps):
self.init_lr = init_lr
self.gamma = gamma
self.iter_steps = iter_steps
def get_learning_rate(self, iter):
lr = self.init_lr
for iter_step in self.iter_steps:
if (it... |
.parametrize('evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, evaluation_policy_action_dist, q_hat, description', valid_input_of_create_estimator_inputs)
def test_meta_create_estimator_inputs_using_valid_input_data(evaluation_policy_pscore, evaluation_policy_pscore_it... |
def get_weibo_list(html):
if (not html):
return list()
soup = BeautifulSoup(html, 'lxml')
feed_list = soup.find_all(attrs={'action-type': 'feed_list_item'})
weibo_datas = []
for data in feed_list:
r = get_weibo_info_detail(data, html)
if (r is not None):
wb_data =... |
class IPERMotionImitationEvaluator(MotionImitationEvaluator):
def __init__(self, data_dir, dataset='iPER'):
super().__init__(dataset=dataset, data_dir=data_dir)
def run_inference(self, model, src_infos, ref_infos):
assert hasattr(model, 'imitate'), '{} must implement imitate(src_infos, ref_infos... |
_model
def mobilenetv2_140(pretrained=False, **kwargs):
model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs)
return model |
def adjust_label(base_class_list, novel_class_list, label, chosen_novel_class, base_label=(- 1), other_novels_label=255):
assert ((base_label in [(- 1), 0, 255]) and (other_novels_label in [(- 1), 0, 255]))
new_label = np.zeros_like(label)
for lab in base_class_list:
indexes = np.where((label == lab... |
def create_tensor2node(graph: common.Graph, node: common.BaseNode, fw_info: common.FrameworkInfo):
current_tensor = graph.get_out_stats_collector(node)
is_list_nostat_collectors = (isinstance(current_tensor, list) and (len([sc for sc in current_tensor if (not isinstance(sc, common.NoStatsCollector))]) == 0))
... |
def ResNet101(input_tensor=None):
img_input = input_tensor
if (K.image_data_format() == 'channels_last'):
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=False)(x)
x = BN(axis=bn_axis, name='bn_conv1... |
def doublemnist(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs):
return helper_with_default(DoubleMNIST, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults={}, **kwargs) |
_iterator
class _RangeWrapper(object):
def __init__(self, iterable, start_byte=0, byte_range=None):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if (byte_range is not None):
self.end_byte = (self.sta... |
def test_only_any_generator_3(module_test_cluster):
generator = MagicMock(GenericMethod)
generator.generated_type.return_value = ANY
module_test_cluster.add_generator(generator)
generator2 = MagicMock(GenericMethod)
generator2.generated_type.return_value = module_test_cluster.type_system.convert_typ... |
class PolynomialRing_cdvf(PolynomialRing_cdvr, PolynomialRing_field):
def __init__(self, base_ring, name=None, sparse=False, implementation=None, element_class=None, category=None):
if (element_class is None):
if sparse:
from sage.rings.polynomial.polynomial_element_generic impor... |
()
class TupleObservationScaler(ObservationScaler):
observation_scalers: Sequence[ObservationScaler] = observation_scaler_list_field()
def fit_with_transition_picker(self, episodes: Sequence[EpisodeBase], transition_picker: TransitionPickerProtocol) -> None:
episode = episodes[0]
for i in range(... |
def evaluate_dialog(args):
scorer = DialogScorer(align=args.align)
scores = []
for (fact, dialog_history, hypo) in zip(open(args.fact).readlines(), open(args.dialog_history).readlines(), open(args.hypo).readlines()):
(fact, dialog_history, hypo) = (fact.strip(), dialog_history.strip(), hypo.strip())... |
def register_functions(root_module):
module = root_module
module.add_function('CRC32Calculate', 'uint32_t', [param('uint8_t const *', 'data'), param('int', 'length')])
module.add_function('MakeAddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeDataRateChecker', 'ns3... |
def find_dir_path(wandb_name):
dir_path = os.path.join(os.getcwd(), 'wandb')
runs = []
for (path, subdirs, files) in os.walk(dir_path):
for dir_ in subdirs:
if os.path.isfile(join(dir_path, dir_, 'files', 'run_name.txt')):
with open(join(dir_path, dir_, 'files', 'run_name... |
class SawyerHandlePressSideEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.35), 0.65, (- 0.001))
obj_high = ((- 0.25), 0.75, (+ 0.001))
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)... |
class _ClickHandler(logging.Handler):
_use_stderr = True
def emit(self, record):
try:
msg = self.format(record)
click.echo(msg, err=self._use_stderr)
except Exception:
self.handleError(record) |
class DecoderBlock(nn.Module):
def __init__(self, n_heads, n_dims, total_exercise, total_cat, seq_len):
super(DecoderBlock, self).__init__()
self.seq_len = seq_len
self.exercise_embed = nn.Embedding(total_exercise, n_dims)
self.category_embed = nn.Embedding(total_cat, n_dims)
... |
def test_should_save_rgb_image(output_dir):
input_image = np.ones((28, 28, 3), dtype='uint8')
save_rgb(input_image, output_dir, 'rgb.png')
assert (len(list(Path(output_dir).iterdir())) == 1) |
def generate_minibatches(dataParser, train=True):
while True:
if train:
batch_ids = np.random.choice(dataParser.training_ids, dataParser.batch_size_train)
else:
batch_ids = np.random.choice(dataParser.validation_ids, (dataParser.batch_size_train * 2))
(ims, ems, _) = ... |
def iter_bliss(filename):
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
name_tree = [root.attrib['name']]
elem_tree = [... |
def main(argv):
if (len(argv) > 1):
raise app.UsageError('Too many command-line arguments.')
scorer = rouge_scorer.RougeScorer(FLAGS.rouge_types, use_stemmer=FLAGS.use_stemmer, split_summaries=FLAGS.split_summaries)
aggregator = (scoring.BootstrapAggregator() if FLAGS.aggregate else None)
io.com... |
_utils.test(arch=[ti.cuda, ti.vulkan, ti.opengl, ti.metal])
def test_frexp():
def get_frac(x: ti.f32) -> ti.f32:
(a, b) = ti.frexp(x)
return a
assert test_utils.allclose(get_frac(1.4), 0.7)
def get_exp(x: ti.f32) -> ti.i32:
(a, b) = ti.frexp(x)
return b
assert (get_exp(1.... |
def greedy_fuse(graph_or_subgraph: GraphViewType, validate_all: bool, device: dace.dtypes.DeviceType=dace.dtypes.DeviceType.CPU, recursive: bool=True, stencil: bool=False, stencil_tile=None, permutations_only: bool=True, expand_reductions: bool=False) -> None:
debugprint = config.Config.get_bool('debugprint')
i... |
def test_field_source(variable_reference_mock, field_mock):
ref = vr.FieldReference(variable_reference_mock, field_mock)
assert (ref.source == variable_reference_mock) |
(cmp=False)
class Node(object):
args = attr.ib()
kwargs = attr.ib()
batch_key = attr.ib()
depth = attr.ib(default=0)
outgoing = attr.ib(default=attr.Factory(list))
num_incoming = attr.ib(default=0) |
class Mean(nn.Module):
def __init__(self, out_dim):
super(Mean, self).__init__()
def forward(self, feature, att_mask):
agg_vec_list = []
for i in range(len(feature)):
length = (torch.nonzero((att_mask[i] < 0), as_tuple=False)[0][0] + 1)
agg_vec = torch.mean(featur... |
def make_spec() -> None:
spec = envpool.make_spec('Pong-v5', num_envs=4)
print(spec)
gym_obs_space = spec.observation_space
gym_act_space = spec.action_space
dm_obs_spec = spec.observation_spec()
dm_act_spec = spec.action_spec()
np.testing.assert_allclose(gym_obs_space.high, 255)
assert ... |
class GZIPTransformer(Transformer):
def __init__(self):
self.lossy = False
def forward(self, data, **kwargs):
bytes_ = data.astype(np.float32).tobytes()
compressed_bytes = gz.compress(bytes_)
metadata = {}
return (compressed_bytes, metadata)
def backward(self, data, m... |
def _validate_and_format_custom_entity(entity, utterance_entities, language, builtin_entity_parser):
validate_type(entity, dict, object_label='entity')
if (MATCHING_STRICTNESS not in entity):
strictness = entity.get('parser_threshold', 1.0)
entity[MATCHING_STRICTNESS] = strictness
mandatory_... |
def on_close(page, sockets):
print(page, 'closed')
print('Still have sockets open to', sockets) |
def get_styled_schema(df: pd.DataFrame) -> Any:
styled_df = df.style.set_table_styles([{'selector': 'th', 'props': [('background', 'white'), ('font-weight', 'bold'), ('text-align', 'right'), ('font-family', 'arial'), ('font-size', '13')]}, {'selector': 'td', 'props': [('font-family', 'arial')]}, {'selector': 'tr:nt... |
()
class VectorEncoderFactory(EncoderFactory):
hidden_units: List[int] = field(default_factory=(lambda : [256, 256]))
activation: str = 'relu'
use_batch_norm: bool = False
dropout_rate: Optional[float] = None
exclude_last_activation: bool = False
def create(self, observation_shape: Shape) -> Vec... |
_arg_scope
def mobilenet(input_tensor, num_classes=1001, depth_multiplier=1.0, scope='MobilenetV2', conv_defs=None, finegrain_classification_mode=False, min_depth=None, divisible_by=None, **kwargs):
if (conv_defs is None):
conv_defs = V2_DEF
if ('multiplier' in kwargs):
raise ValueError('mobilen... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_floor_forward_backward(seed, ctx, func_name):
from nbla_test_utils import cap_ignore_region, function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2)]
function_tester(rng, F.floor, re... |
class NaiveCrystal(UniqueRepresentation, Parent):
def __init__(self):
Parent.__init__(self, category=ClassicalCrystals())
self.n = 2
self._cartan_type = CartanType(['A', 2])
self.G = DiGraph(5)
self.G.add_edges([[0, 1, 1], [1, 2, 1], [2, 3, 1], [3, 5, 1], [0, 4, 2], [4, 5, 2]... |
class FairseqOptimizer(object):
def __init__(self, args):
super().__init__()
self.args = args
def add_args(parser):
pass
def optimizer(self):
if (not hasattr(self, '_optimizer')):
raise NotImplementedError
if (not isinstance(self._optimizer, torch.optim.Op... |
def _knowledge_graph_load(dataset, nodes, rels, train, test, valid):
(g, train_df, test_df, valid_df) = dataset.load()
assert (g.number_of_nodes() == nodes)
assert (g.number_of_edges() == ((train + test) + valid))
assert (len({et for (_, _, et) in g.edges(include_edge_type=True)}) == rels)
assert (l... |
def p_optional_ellipsis(s):
if (s.sy == '.'):
expect_ellipsis(s)
return 1
else:
return 0 |
('fastcoref', assigns=['doc._.resolved_text', 'doc._.coref_clusters'], default_config={'model_architecture': 'FCoref', 'model_path': 'biu-nlp/f-coref', 'device': None, 'max_tokens_in_batch': 10000, 'enable_progress_bar': True})
class FastCorefResolver():
'a class that implements the logic from\n
def __init_... |
def _replace_html_entities(text, keep=(), remove_illegal=True, encoding='utf-8'):
def _convert_entity(match):
entity_body = match.group(3)
if match.group(1):
try:
if match.group(2):
number = int(entity_body, 16)
else:
... |
class LogBERTPredict():
def __init__(self, config: LogBERTConfig):
self.config = config
self.model_dirpath = os.path.join(self.config.output_dir, self.config.model_name)
self.model = None
self.tokenizer = get_tokenizer(self.config.tokenizer_dirpath)
self.special_tokens = get_... |
((not have_sympy), 'SymPy not installed')
def test_log():
x = Symbol('x')
x1 = sympy.Symbol('x')
assert (log(x) == log(x1))
assert (log(x)._sympy_() == sympy.log(x1))
assert (sympify(sympy.log(x1)) == log(x))
y = Symbol('y')
y1 = sympy.Symbol('y')
assert (log(x, y) == log(x, y1))
ass... |
def adaptivity(landscape: flexs.Landscape, make_explorer: Callable[([int, int, int], flexs.Explorer)], num_rounds: List[int]=[1, 10, 100], total_ground_truth_measurements: int=1000, total_model_queries: int=10000):
results = []
for rounds in num_rounds:
print(f'Evaluating for num_rounds: {rounds}')
... |
def handle_line(line, options=None, finder=None, session=None):
if line.is_requirement:
parsed_req = handle_requirement_line(line, options)
return parsed_req
else:
handle_option_line(line.opts, line.filename, line.lineno, finder, options, session)
return None |
class GaussianDiffusion(nn.Module):
def __init__(self, denoise_fn, *, image_size, channels=3, timesteps=1000, loss_type='l1', objective='pred_noise', beta_schedule='cosine'):
super().__init__()
self.channels = channels
self.image_size = image_size
self.denoise_fn = denoise_fn
... |
def create_feature_columns() -> Tuple[(List[Any], List[Any])]:
(wide_part_feature_columns, deep_part_feature_columns) = ([], [])
videoplayseconds = fc.numeric_column('videoplayseconds', default_value=0.0)
u_read_comment_7d_sum = fc.numeric_column('u_read_comment_7d_sum', default_value=0.0)
u_like_7d_sum... |
class LogVandMultiplySymmetric(torch.autograd.Function):
def forward(ctx, v, x, L):
(batch, N) = v.shape
supported_N_values = [(1 << log_n) for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
if (not (N in supported_N_values)):
raise NotImplementedError(f'Only support N values in {supp... |
class SetDataManager(DataManager):
def __init__(self, n_way, n_support, n_query, n_eposide=100):
super(SetDataManager, self).__init__()
self.n_way = n_way
self.batch_size = (n_support + n_query)
self.n_eposide = n_eposide
def get_data_loader(self, root='./filelists/tabula_muris',... |
def cleanup_temporary_files(args):
_try_remove(args.sas_file)
_try_remove(args.plan_file)
for i in count(1):
if (not _try_remove(('%s.%s' % (args.plan_file, i)))):
break |
def to_eularian_angles(q):
z = q.z_val
y = q.y_val
x = q.x_val
w = q.w_val
ysqr = (y * y)
t0 = ((+ 2.0) * ((w * x) + (y * z)))
t1 = ((+ 1.0) - (2.0 * ((x * x) + ysqr)))
roll = math.atan2(t0, t1)
t2 = ((+ 2.0) * ((w * y) - (z * x)))
if (t2 > 1.0):
t2 = 1
if (t2 < (- 1.... |
class MultimodalDataset(Dataset):
def __init__(self, data, mode, batch_size, vocabs, topology, bucket_by, max_len=None, bucket_order=None, **kwargs):
self.datasets = {}
self.mode = mode
self.vocabs = vocabs
self.batch_size = batch_size
self.topology = topology
self.bu... |
class RequestException(IOError):
def __init__(self, *args, **kwargs):
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if ((response is not None) and (not self.request) and hasattr(response, 'request')):
self.requ... |
def parse_audio(audio_path: str, del_silence: bool=False, audio_extension: str='pcm') -> Tensor:
signal = load_audio(audio_path, del_silence, extension=audio_extension)
feature = torchaudio.compliance.kaldi.fbank(waveform=Tensor(signal).unsqueeze(0), num_mel_bins=80, frame_length=20, frame_shift=10, window_type... |
def assert_line_armijo(x, p, s, f, **kw):
assert_armijo(s, phi=(lambda sp: f((x + (p * sp)))), **kw) |
def zeroshot_classifier(class_names: List[str], device: torch.device, verbose: bool=False):
templates = ['itap of the {}.', 'a bad photo of the {}', 'a origami {}.', 'a photo of the large {}.', 'a {} in a video game.', 'art of the {}.', 'a photo of the small {}.']
(model, _) = clip.load('ViT-B/32')
model = ... |
def build_lf_matrix() -> None:
logging.info('Getting Spark context')
sc = SparkContext()
sc.addPyFile('snorkel-package.zip')
rdd = sc.parallelize(DATA)
logging.info('Applying LFs')
lf_applier = SparkLFApplier([f, g])
L = lf_applier.apply(rdd)
np.testing.assert_equal(L.toarray(), L_EXPECT... |
def smart_decorator(f, create_decorator):
if isinstance(f, types.FunctionType):
return wraps(f)(create_decorator(f, True))
elif isinstance(f, (classtype, type, types.BuiltinFunctionType)):
return wraps(f)(create_decorator(f, False))
elif isinstance(f, types.MethodType):
return wraps(... |
.torch
def test_save_and_load(small_dataset: Dataset, only_item_id_schema: TensorSchema):
tokenizer = SequenceTokenizer(only_item_id_schema).fit(small_dataset)
before_save = tokenizer.transform(small_dataset)
tokenizer.save('sequence_tokenizer.pth')
del tokenizer
tokenizer = SequenceTokenizer.load('... |
def mpi_fork(n, bind_to_core=False):
if (n <= 1):
return
if (os.getenv('IN_MPI') is None):
env = os.environ.copy()
env.update(MKL_NUM_THREADS='1', OMP_NUM_THREADS='1', IN_MPI='1')
args = ['mpirun', '-np', str(n)]
if bind_to_core:
args += ['-bind-to', 'core']
... |
class XLMForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def dataio_prep(hparams, train_step=None):
if (not train_step):
train_step = hparams
(train_data, valid_data, test_data) = load_datasets(hparams, train_step)
if (hparams['sorting'] == 'ascending'):
hparams['dataloader_opts']['shuffle'] = False
elif (hparams['sorting'] == 'descending'):
... |
class WeightedMean(NN):
def __call__(self, vocab, output_size, moving_params=None):
inputs = tf.placeholder(tf.int32, shape=(None, None), name=('inputs-%s' % self.name))
self.tokens_to_keep = tf.to_float(tf.greater(inputs, vocab.PAD))
self.sequence_lengths = tf.reduce_sum(self.tokens_to_keep... |
def aggregate_metrics(results: List[dict]) -> dict:
ret = {}
for (key, val) in results[0].items():
if isinstance(val, dict):
ret[key] = aggregate_metrics([result[key] for result in results])
else:
ret[key] = {'average': np.average([result[key] for result in results]), 'st... |
def emit(initializer_parameter_map):
print('// {} from {}'.format('generated', __file__))
print(HEADER)
for (initializer_name, weights) in initializer_parameter_map.items():
print(PARAMETERS.format(initializer_name))
print(' return {')
for sample in weights:
print(' {... |
class TestConfigs(unittest.TestCase):
def test_configs_load(self):
cfg_root_path = utils.get_config_root_path()
files = glob.glob(os.path.join(cfg_root_path, './**/*.yaml'), recursive=True)
self.assertGreater(len(files), 0)
for fn in files:
print('Loading {}...'.format(fn... |
def gen_curriculum_combined(gen_digits):
return ([[((1 / (2 * ((i + 1) ** 2))) if (((j // gen_digits) < i) and ((j % gen_digits) < i)) else (((1 / (2 * ((i + 1) ** 2))) + (1 / (2 * ((2 * i) + 1)))) if ((((j // gen_digits) <= i) and ((j % gen_digits) == i)) or (((j // gen_digits) == i) and ((j % gen_digits) <= i))) ... |
def launch_search(exp_config: Union[(List[str], str)], name: str, workers: int, gpus_per_worker: float, cpus_per_worker: float, eval_only: bool, samples: int, seed: int) -> None:
if (len(path.split(exp_config)[0]) > 0):
CFG_PATH = exp_config
else:
CFG_PATH = path.join(DEFAULT_CONFIG_DIR, exp_con... |
def get_full_code(url_params='', data=None):
url = f'
if data:
data = json.dumps(data)
data = f", data=b'{data}', headers={{'Content-Type': 'application/json'}}"
return f"requests.get('{url}'{data})" |
class RAdam(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
... |
def load_dataset(dataset_dir: str) -> List[WikipediaPretrainingDataset]:
dataset_dirs = glob.glob(dataset_dir)
assert (len(dataset_dirs) > 0), f'No matching directories with {dataset_dir}'
datasets = [WikipediaPretrainingDataset(d) for d in dataset_dirs]
assert (len(frozenset([len(d.tokenizer) for d in ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.