code stringlengths 101 5.91M |
|---|
def _get_allgather_out_list(all_gather_in_list, world_size):
out_list = [torch.zeros_like(all_gather_in_list, device=all_gather_in_list.device, dtype=all_gather_in_list.dtype) for _ in range(world_size)]
return out_list |
def EgawaGraph(p, s):
from sage.graphs.generators.basic import CompleteGraph
from itertools import product, chain, repeat
g = Graph(name=((('Egawa Graph with parameters ' + str(p)) + ',') + str(s)), multiedges=False)
X = CompleteGraph(4)
Y = Graph('O?_LUebWkbT_')
g.add_vertices(product(*chain(re... |
def vgg11_bn(pretrained=False, progress=True, **kwargs):
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs) |
def genSplitViewImages(image_dir):
imgF_dir = os.path.join(image_dir, 'imgF')
imgT_dir = os.path.join(image_dir, 'imgT')
output_dir = os.path.join(image_dir, 'img1')
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
for frame in os.listdir(imgF_dir):
if frame.endswith('.jp... |
def load_model(model, pretrained_dict, key):
model_dict = model.state_dict()
new_dict = {}
for (k, v) in pretrained_dict.items():
if k.startswith(key):
new_dict[k[(len(key) + 1):]] = v
model.load_state_dict(new_dict) |
def _vector_str(self, indent, summarize, formatter1, formatter2=None):
element_length = (formatter1.width() + 2)
if (formatter2 is not None):
element_length += (formatter2.width() + 1)
elements_per_line = max(1, int(math.floor(((PRINT_OPTS.linewidth - indent) / element_length))))
char_per_line =... |
class BigBird():
def __init__(self, config):
self.batch_size = config['batch_size']
self.tokenizer = BigBirdTokenizerFast.from_pretrained(config['model_weights'])
self.model = BigBirdForQuestionAnswering.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_ret... |
class Downsampling(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, pre_norm=None, post_norm=None, pre_permute=False):
super().__init__()
self.pre_norm = (pre_norm(in_channels) if pre_norm else nn.Identity())
self.pre_permute = pre_permute
s... |
def CheckProgram(program, data_id, num_demo, demo, demo_len, dsl, karel_world):
(exe, s_exe) = parse(program)
if (not s_exe):
syntax = False
demo_correctness = np.array(([False] * num_demo))
num_correct = 0
else:
syntax = True
demo_correctness = np.array(([False] * nu... |
class ListView(Sequence):
def __init__(self, origin, start, stop=None, length=None, preserveLength=False):
if ((stop is None) and (length is None)):
raise ValueError('At least one of stop or length has to be provided')
self._origin = origin
self._offset = start
self._leng... |
()
('--seed', default=1)
('--n_epochs', default=600)
('--batch_size_per_task', default=1024)
_experiment
def te_ppo_ml1_push(ctxt, seed, n_epochs, batch_size_per_task):
set_seed(seed)
envs = [GarageEnv(normalize(ML1.get_train_tasks('push-v1')))]
env = MultiEnvWrapper(envs, mode='del-onehot')
latent_leng... |
class LoadRigidAsAnimation(bpy.types.Operator):
bl_idname = 'load.rigid_as_anim'
bl_label = 'Import Json as Aniamtion'
bl_options = {'REGISTER', 'UNDO'}
bl_description = 'Import Rigids for each frame of animation'
filepath = StringProperty(name='File path', description='Filepath of Json', maxlen=409... |
def dummy_inverse_laplace(*args):
return _inverse_laplace(args[0], var(repr(args[1])), var(repr(args[2]))) |
def config_gnd(dataset, dir_main):
dataset = dataset.lower()
if (dataset not in DATASETS):
raise ValueError('Unknown dataset: {}!'.format(dataset))
if ((dataset == 'roxford5k') or (dataset == 'rparis6k')):
gnd_fname = os.path.join(dir_main, dataset, 'gnd_{}.pkl'.format(dataset))
with... |
def remove_Dcfg(minions_cfg):
for (m_i, mcfg) in enumerate(minions_cfg):
if ('DNet_cfg' in mcfg):
print('Removing DNet_cfg')
del mcfg['DNet_cfg']
if ('Dopt_cfg' in mcfg):
print('Removing Dopt_cfg')
del mcfg['Dopt_cfg'] |
def get_prior_grad_BO(prior, mx_hat, tx0_hat):
def A_func(mx_hat):
ax = (mx_hat + tx0_hat)
return prior.compute_potential_BO(ax=ax, tx0_hat=tx0_hat)
grad_mx_hat_A = numerical_1st_derivative(mx_hat, A_func, EPSILON)
ax = (mx_hat + tx0_hat)
vx = prior.compute_forward_v_BO(ax=ax, tx0_hat=tx... |
class OldPower(problem.OptimizationFunction):
def __init__(self, obj, power):
self.power = power
self.obj = obj
def calculate_objective_function(self, param):
return (self.obj.calculate_objective_function(param) ** self.power)
def calculate_gradient(self, param):
obj_value = ... |
def print_best_to_file(outfile, metric, samples, metric_name, name1, scores1, name2=None, scores2=None, lower_better=True, n=100):
original_stdout = sys.stdout
with open(outfile, 'a') as f:
sys.stdout = f
print('Metric Name:', metric_name)
if lower_better:
idxs = np.argsort(m... |
def bottleneck_block(cnn, depth, depth_bottleneck, stride, pre_activation):
if pre_activation:
bottleneck_block_v2(cnn, depth, depth_bottleneck, stride)
else:
bottleneck_block_v1(cnn, depth, depth_bottleneck, stride) |
('log_reg_intent_classifier')
class LogRegIntentClassifier(IntentClassifier):
config_type = LogRegIntentClassifierConfig
def __init__(self, config=None, **shared):
super(LogRegIntentClassifier, self).__init__(config, **shared)
self.classifier = None
self.intent_list = None
self.f... |
class BaseMinifiedModeModuleClass(BaseModuleClass):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._minified_data_type = None
def minified_data_type(self) -> (MinifiedDataType | None):
return self._minified_data_type
_data_type.setter
def minified_dat... |
(arg_at(0, is_int_const))
def diag(dim: template(), val: template()):
return Matrix([[(val if (i == j) else 0) for j in static(range(dim))] for i in static(range(dim))]) |
_quantizer(quantization_target=QuantizationTarget.Weights, quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC], identifier=RoundingType.STE)
class STEWeightGPTQQuantizer(BaseKerasGPTQTrainableQuantizer):
def __init__(self, quantization_config: TrainableQuantizerWeightsConfig, max_lsb... |
_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'labels': ['array-like', None], 'pos_label': [str, numbers.Integral, None], 'average': [None, StrOptions({'binary', 'micro', 'macro', 'weighted', 'samples'})], 'sample_weight': ['array-like', None]}, prefer_skip_nested_validation=True)
def sensitivity_score(y... |
def get_norm_layer(norm_type='instance', affine=True, track_running_stats=True):
if (norm_type == 'batch'):
norm_layer = functools.partial(nn.BatchNorm2d, affine=affine, track_running_stats=track_running_stats)
elif (norm_type == 'instance'):
norm_layer = functools.partial(nn.InstanceNorm2d, aff... |
def _seg_54():
return [(66752, 'M', u''), (66753, 'M', u''), (66754, 'M', u''), (66755, 'M', u''), (66756, 'M', u''), (66757, 'M', u''), (66758, 'M', u''), (66759, 'M', u''), (66760, 'M', u''), (66761, 'M', u''), (66762, 'M', u''), (66763, 'M', u''), (66764, 'M', u''), (66765, 'M', u''), (66766, 'M', u''), (66767, ... |
_utils.test(debug=True)
def test_ternary_op_scalarize():
def test():
cond = ti.Vector([1, 0, 1])
x = ti.Vector([3, 3, 3])
y = ti.Vector([5, 5, 5])
z = ti.select(cond, x, y)
assert (z[0] == 3)
assert (z[1] == 5)
assert (z[2] == 3)
test() |
def get_layer_id_for_clip(name, num_layers):
if (name in ['cls_token', 'pos_embed', 'class_embedding']):
return 0
elif name.startswith('patch_embed'):
return 0
elif name.startswith('conv1'):
return 0
elif name.startswith('ln_pre'):
return 0
elif name.startswith('posit... |
def regularize_laplace():
reg = np.ones(6890)
v_ids = get_bodypart_vertex_ids()
reg[v_ids['face']] = 8.0
reg[v_ids['hand_l']] = 5.0
reg[v_ids['hand_r']] = 5.0
reg[v_ids['fingers_l']] = 8.0
reg[v_ids['fingers_r']] = 8.0
reg[v_ids['foot_l']] = 5.0
reg[v_ids['foot_r']] = 5.0
reg[v_i... |
class TimeStepBatch(collections.namedtuple('TimeStepBatch', ['env_spec', 'observations', 'actions', 'rewards', 'next_observations', 'terminals', 'env_infos', 'agent_infos'])):
__slots__ = ()
def __new__(cls, env_spec, observations, actions, rewards, next_observations, terminals, env_infos, agent_infos):
... |
def safe_join(directory, *pathnames):
parts = [directory]
for filename in pathnames:
if (filename != ''):
filename = posixpath.normpath(filename)
if (any(((sep in filename) for sep in _os_alt_seps)) or os.path.isabs(filename) or (filename == '..') or filename.startswith('../')):
... |
(scope='module')
def expected_hxy(test_data_xy):
return (10 * np.tanh((10 * np.tanh((test_data_xy[0] + test_data_xy[1]))))) |
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_outpu... |
class NotCnxp(UnaryCnxp):
code = '~'
def type_constraints(self, tcs):
tcs.integer(self)
tcs.eq_types(self, self.x) |
class InvalidSymbolicApiError(Exception):
def __init__(self, api: str):
super().__init__(f'Symbolic API is "{api}", must be one of ("sympy", "symengine")') |
def save_loss(loss_dict, model_dir, name):
save_dir = os.path.join(model_dir, 'loss')
os.makedirs(save_dir, exist_ok=True)
file_path = os.path.join(save_dir, '{}.csv'.format(name))
pd.DataFrame(loss_dict).to_csv(file_path) |
class PDETerm(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = nn.Linear(1, 30)
self.lin2 = nn.Linear(30, 1)
for param in self.parameters():
param.data.uniform_()
def forward(self, x):
x = self.lin1(x)
x = torch.tanh(x)
x = self.l... |
def load_graph_from_args(pipeline_name: str, framework: str, model: str, tokenizer: Optional[str]=None) -> Pipeline:
if (tokenizer is None):
tokenizer = model
if ((framework == 'pt') and (not is_torch_available())):
raise Exception('Cannot convert because PyTorch is not installed. Please install... |
def get_rules(s1, s2, f1, f2):
phrase_alignments = [(p, s1.phrases[p].align_idx) for p in s1.phrases.keys()]
phrase_pairs = combinations(phrase_alignments, 2)
for ((p11_idx, p21_idx), (p12_idx, p22_idx)) in phrase_pairs:
p11 = s1.phrases[p11_idx]
p21 = s2.phrases[p21_idx]
p12 = s1.ph... |
class DDIMSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
... |
def sl2003_summary(kind, filename):
summary_dir = os.path.join(SL2003_DIR, kind)
summary_contents = open(os.path.join(summary_dir, filename)).read()
return Doc.from_see(summary_contents) |
def get_RHT_data(xyt_filename='filename.fits'):
hdu_list = fits.open(xyt_filename, mode='readonly', memmap=True, save_backup=False, checksum=True)
print('loading data from ', xyt_filename)
header = hdu_list[0].header
data = hdu_list[1].data
ipoints = data['hi']
jpoints = data['hj']
hthets = ... |
def scatter_kwargs_imbalance(inputs, kwargs, target_gpus, dim=0):
inputs = (scatter_imbalance(inputs, target_gpus, dim) if inputs else [])
kwargs = (scatter_imbalance(kwargs, target_gpus, dim) if kwargs else [])
if (len(inputs) < len(kwargs)):
inputs.extend([() for _ in range((len(kwargs) - len(inpu... |
class ConvBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int) -> None:
super().__init__()
self.layers = nn.Sequential(Conv1dSamePadding(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride), nn.BatchNorm1d(num... |
def test_sample_bootstrap_bandit_feedback():
with pytest.raises(ValueError):
dataset = OpenBanditDataset(behavior_policy='random', campaign='all')
dataset.sample_bootstrap_bandit_feedback(is_timeseries_split=True, test_size=1.3)
with pytest.raises(ValueError):
dataset = OpenBanditDataset... |
class TaskTree():
def __init__(self, task_config: TaskConfig):
self._tasks_config = task_config
self.root = OrNode('root')
self.tree_paths = {}
self.entity_paths = {}
self.tasks = {}
self.tree_json = None
self.task_set = set()
self.visualization_paths ... |
def main():
parser = ArgumentParser(description='Also see `pre-commit-hook.py` which lints all files staged in git.')
parser.add_argument('--fix', action='store_true', help='Attempt to fix linting violations')
parser.add_argument('--diff-against', dest='branch', type=str, default=None, help='Diff against th... |
class GraphPaths_all(Parent, GraphPaths_common):
def __init__(self, g):
self.graph = g
Parent.__init__(self, category=FiniteEnumeratedSets())
def __repr__(self):
return ('Paths in %s' % repr(self.graph))
def list(self):
return self.paths() |
def figure3():
n_subjects = 16
net = xfr.models.lightcnn.LightCNN_29Layers_v2(num_classes=80013)
statedict = xfr.models.lightcnn.Load_Checkpoint('../models/LightCNN_29Layers_V2_checkpoint.pth.tar')
net.load_state_dict(statedict)
wb = xfr.models.whitebox.Whitebox(xfr.models.whitebox.WhiteboxLightCNN(... |
def mobilenet_v1_base(final_endpoint='Conv2d_13_pointwise', min_depth=8, depth_multiplier=1.0, conv_defs=None, output_stride=None):
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
end_points = OrderedDict()
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater... |
def get_multiple_choice_adapter_spec(method: str, instructions: str, input_noun: Optional[str], output_noun: str, max_train_instances: int=5, num_outputs: int=5, max_tokens: int=1, empty_input: bool=False, sample_train: bool=True, **kwargs):
if (method == ADAPT_MULTIPLE_CHOICE_JOINT):
return get_multiple_ch... |
class PersLandscapeExact(PersLandscape):
def __init__(self, dgms: list=[], hom_deg: int=0, critical_pairs: list=[], compute: bool=True) -> None:
super().__init__(dgms=dgms, hom_deg=hom_deg)
self.critical_pairs = critical_pairs
if dgms:
self.dgms = dgms[self.hom_deg]
else:... |
def test_assign_pointer():
(dace.float64[N], dace.float64[N])
def program(A, B):
for i in dace.map[0:N]:
with dace.tasklet:
(a << A[:])
(b >> B[i])
b = a
with pytest.raises(NotSupportedError):
get_code(program) |
def get_mp_activation_pytorch_tpc_dict(tpc_model, test_name, tpc_name):
op_sets_to_layer_add = {'Input': [DummyPlaceHolder]}
return {test_name: generate_test_tpc(name=tpc_name, tp_model=tpc_model, base_tpc=generate_pytorch_tpc(name=f'base_{tpc_name}', tp_model=tpc_model), op_sets_to_layer_add=op_sets_to_layer_a... |
def fit_score_model(name, model_kwargs, train_data, test_data, continuous_columns, sample_rows, store_samples):
for (index, kwargs) in enumerate(model_kwargs):
logger.info('Training TGAN Model %d/%d', (index + 1), len(model_kwargs))
tf.reset_default_graph()
base_dir = os.path.join('experimen... |
class Branch(nn.Module):
def __init__(self):
super(Branch, self).__init__()
self.conv1 = nn.Conv2d(128, 128, 3, 1)
self.conv2 = nn.Conv2d(128, 256, 3, 1)
self.conv3 = nn.Conv2d(256, 256, 3, 1)
self.conv4 = nn.Conv2d(256, 512, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(128)
... |
def onnx2tensorrt(onnx_file, trt_file, input_config, verify=False, show=False, workspace_size=1, verbose=False):
import tensorrt as trt
onnx_model = onnx.load(onnx_file)
max_shape = input_config['max_shape']
min_shape = input_config['min_shape']
opt_shape = input_config['opt_shape']
fp16_mode = ... |
def usage(progname):
sys.stderr.write((('Usage: ' + progname) + ' < bipartitematrix\n'))
sys.exit(1) |
def _main(config, config_idx, train):
base_filename = ((config.name + '_cfg') + str(config_idx))
logger = set_up_logger((('logs/' + base_filename) + '.log'))
title = '{}: {} ({}) config index {}'.format(__file__, config.name, config.desc, config_idx)
logger.info((('START ' + title) + '\n\n{}\n'.format(c... |
class TFConvBertModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class SerializedTestCase(hu.HypothesisTestCase):
should_serialize = False
def get_output_dir(self):
output_dir_arg = getattr(_output_context, 'output_dir', DATA_DIR)
output_dir = os.path.join(output_dir_arg, operator_test_type)
if os.path.exists(output_dir):
return output_dir... |
def test_forward_constituency_composition(pretrain_file):
model = build_model(pretrain_file, '--constituency_composition', 'bilstm')
run_forward_checks(model, num_states=2)
model = build_model(pretrain_file, '--constituency_composition', 'max')
run_forward_checks(model, num_states=2)
model = build_m... |
_torch
class FlaxCLIPVisionBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase):
def get_pretrained_model_and_inputs(self):
model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained('hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=True, text_from_pt=Tr... |
class TestYaLMTokenizer():
def setup_method(self, method):
cache_file = tempfile.NamedTemporaryFile(delete=False)
self.cache_path: str = cache_file.name
self.tokenizer = YaLMTokenizer(SqliteCacheConfig(self.cache_path))
self.test_prompt: str = 'The model leverages 100 billion paramet... |
def train(opt):
model = CycleGANModel(opt)
model.train_forward()
dataset = CDFdata.get_loader(opt)
(img_logs, weight_logs) = init_logs(opt)
for epoch_id in range(opt.epoch_size):
for (batch_id, data) in enumerate(dataset):
model.set_input(data)
model.optimize_paramete... |
_inherit(core.Dataset)
class Dataset(core.Dataset):
def __init__(self, data_home=None):
super().__init__(data_home, name='urbansed', clip_class=Clip, bibtex=BIBTEX, remotes=REMOTES, license_info=LICENSE_INFO)
_docs(load_audio)
def load_audio(self, *args, **kwargs):
return load_audio(*args, *... |
class GroupAlgebra_class(CombinatorialFreeModule):
def _coerce_map_from_(self, S):
G = self.basis().keys()
K = self.base_ring()
G_coercion = G.coerce_map_from(S)
if (G_coercion is not None):
from sage.categories.groups import Groups
if (not self.category().is_... |
def train_batch(args, model, batch, options, clusterings):
batch = to_device(batch, args.computation.device)
data = batch['data']
features = model(data)
distance = _train_batch(args, features, clusterings)
return distance |
class Scale():
def __init__(self):
parser = self.get_parser()
self.options = parser.parse_args()
def get_parser(self):
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--in_dir', type=str, help='Path to input director... |
def pwdist_exact(X1, Y1, X2=None, Y2=None, symmetric=False, loss='sinkhorn', cost_function='euclidean', p=2, debias=True, entreg=0.1, device='cpu'):
device = process_device_arg(device)
if (X2 is None):
symmetric = True
(X2, Y2) = (X1, Y1)
c1 = torch.unique(Y1)
c2 = torch.unique(Y2)
(... |
(frozen=True)
class Token():
text: str
logprob: float
top_logprobs: Dict[(str, float)]
def render_lines(self) -> List[str]:
top_logprobs_entries = sorted(self.top_logprobs.items(), key=(lambda entry: (- entry[1])))
top_logprobs_str = (('{' + ', '.join((f'{format_text(text)}: {logprob}' f... |
def foo(a, b, c=None, d=None):
for i in range(3):
wait_one()
for j in range(4):
wait_two() |
def so3_rft(x, b, grid):
F = _setup_so3_ft(b, grid, device_type=x.device.type, device_index=x.device.index)
assert (x.size((- 1)) == F.size(0))
sz = x.size()
x = torch.einsum('ia,afc->fic', (x.view((- 1), x.size((- 1))), F.clone()))
x = x.view((- 1), *sz[:(- 1)], 2)
return x |
class FactorModel(Model):
def __init__(self, factor_dag):
if (not isinstance(factor_dag, FactorDAG)):
raise TypeError(f'factor_dag {factor_dag} is not a FactorDAG')
for node in factor_dag._roots_ph:
raise ValueError(f'root node {node} not a prior')
self.factor_dag = f... |
def get_250k_val_set(input_transform):
structFile = join(struct_dir, 'pitts250k_val.mat')
return WholeDatasetFromStruct(structFile, input_transform=input_transform) |
class FlaxDistilBertForMaskedLM(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def test_ListOffsetArray_NumpyArray():
v2a = ak.contents.listoffsetarray.ListOffsetArray(ak.index.Index(np.array([1, 4, 4, 6, 7], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7])))
resultv2 = v2a[np.array([1, 2], np.int64)]
assert (to_list(resultv2) == [[], [4.4, 5... |
class IdempotentIdPreprocessor(preprocessors.Preprocessor):
def preprocess_cell(self, cell, resources, cell_index):
cell = copy.deepcopy(cell)
cell.id = str(cell_index)
return (cell, resources) |
class TemplateConstraint():
def __init__(self, table_name: str, name: str, definition: str) -> None:
self.table_name = table_name
self.name = name
self.definition = definition |
class Tableau_class(Tableau):
def __setstate__(self, state):
self.__class__ = Tableau
self.__init__(Tableaux(), state['_list']) |
class Partition6(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[2]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[3]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[dec... |
class RealToBinary(Model):
def __init__(self, *, input_shape=None, frame_modulation_size=1, depth_modulation_size=1, value_generator=None, framewise=False, input_range_lo=0.0, input_range_hi=1.0, name=None, bin_dtype=bb.DType.FP32, real_dtype=bb.DType.FP32, core_model=None):
if (core_model is None):
... |
class WarmupCosineSchedule(LambdaLR):
def __init__(self, optimizer, warmup_steps, t_total, cycles=0.5, last_epoch=(- 1)):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last... |
def load_detail(file):
data = {}
with open(file) as f:
for (i, row_text) in enumerate(f):
row = row_text.replace('\r', '').replace('\n', '').split(',')
if (i == 0):
keys = row[1:]
continue
current_values = row[1:]
seq = row[... |
class DebertaV2PreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def mergeable(one: Content, two: Content, mergebool: bool=True) -> bool:
return one._mergeable_next(two, mergebool=mergebool) |
class AnalysisPipelineConfig(PipelineConfig):
def __init__(self, d, layers, tensors):
super().__init__(d)
self.stage_to_model = {stage_id: self.realize_stage(layers, tensors, stage_id, device='cpu') for stage_id in range(self.n_stages)}
try_jit = False
if try_jit:
for (i,... |
class Disjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Disjunction):
result_parts += part.parts
elif isinstance(part, Truth):
return Truth()
elif (not isinstance(p... |
def test_snippet():
bb.snippet(fname, 'AAANU', outdir=('%s' % outdir))
for f in glob.glob(('%s/1S72*.pdb' % outdir)):
comp(f) |
def norm_attention(result_file, attention_dir):
makedir(attention_dir)
slide_list = []
with open(result_file, 'r') as f:
reader = csv.reader(f)
for row in reader:
if (len(row) == 5):
slideID = row[0].split('_')[0]
summary_file = f'{attention_dir}/{... |
def _no_schema(source, line_delimited, nan_string, posinf_string, neginf_string, complex_record_fields, buffersize, initial, resize, highlevel, behavior, attrs):
ctx = HighLevelContext(behavior=behavior, attrs=attrs).finalize()
builder = _ext.ArrayBuilder(initial=initial, resize=resize)
read_one = (not line... |
def complete_dims(s, dims):
if (not hasattr(s, '__iter__')):
return ((s,) * dims)
if (len(s) == dims):
return s
raise ValueError('') |
def ultimate_release():
vj.open()
joystickPosition = vj.generateJoystickPosition()
vj.update(joystickPosition)
time.sleep(0.001)
vj.close() |
def summary_stats(df):
tests = df[(df.scalar == 'double')].test.unique()
best_cpu = {}
worst_cpu = {}
shapes = []
for matrix in matrices:
cs = []
for test in tests:
try:
cpu = df.cpu[(((df.scalar == 'double') & (df.mat == matrix)) & (df.test == test))].val... |
class NER():
def __init__(self, service_channel: str):
print('Initializing NER ... ', end='', flush=True)
channel = grpc.insecure_channel(service_channel)
self.stub = ner_pb2_grpc.NERPredictorServiceStub(channel)
print('Done')
def __call__(self, context, **kwargs):
reques... |
def parse_global_args(parser):
parser.add_argument('--gpu', type=str, default='0', help='Set CUDA_VISIBLE_DEVICES')
parser.add_argument('--verbose', type=int, default=logging.INFO, help='Logging Level, 0, 10, ..., 50')
parser.add_argument('--log_file', type=str, default=os.path.join(LOG_DIR, 'log.txt'), hel... |
def test_ResourceManager2():
from sequence.kernel.process import Process
from sequence.kernel.event import Event
from sequence.components.optical_channel import ClassicalChannel, QuantumChannel
from sequence.topology.node import BSMNode
from sequence.entanglement_management.generation import Entangl... |
def ground_truth(x):
tmp_max = np.max(x, axis=(- 1), keepdims=True)
tmp_out = np.exp((x - tmp_max))
tmp_sum = np.sum(tmp_out, axis=(- 1), keepdims=True)
return (tmp_out / tmp_sum) |
def main():
parser = argparse.ArgumentParser(description='Argument Parser')
parser.add_argument('--suncg_dataset', type=str, default='../../../suncg_data')
parser.add_argument('--dest_dir', type=str, default='../../../nav_data')
args = parser.parse_args()
os.makedirs(args.dest_dir, exist_ok=True)
... |
class PropagatePositions():
def __init__(self, node_builder, node_filter=None):
self.node_builder = node_builder
self.node_filter = node_filter
def __call__(self, children):
res = self.node_builder(children)
if isinstance(res, Tree):
res_meta = res.meta
fi... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.