code stringlengths 101 5.91M |
|---|
class TestEstimatorModels(TestCase):
def test_estimator(self):
try:
ret = subprocess.run([sys.executable, 'python/runtime/tensorflow/estimator_example.py'], env=os.environ.copy(), check=True)
self.assertEqual(ret.returncode, 0)
except Exception as e:
self.fail(('%... |
class GRUEncoder(chainer.Chain):
def __init__(self, n_layers, n_vocab, n_genre, pretrained_w2v, is_update_w2v, dropout, genre_units=5):
super(GRUEncoder, self).__init__()
with self.init_scope():
self.base_embedding_layer = BaseEmbeddingLayer(n_vocab=n_vocab, n_genre=n_genre, genre_units=... |
def GenerateSM75_TensorOp_1688(manifest, args):
if (not CudaToolkitVersionSatisfies(args.cuda_version, 10, 2)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutT... |
def bare_matrixelement(sweep: 'ParameterSweep', paramindex_tuple: Tuple[(int, ...)], paramvals_tuple: Tuple[(float, ...)], operator_name: str, subsystem: QubitBaseClass) -> np.ndarray:
subsys_index = sweep.get_subsys_index(subsystem)
bare_evecs = sweep['bare_evecs'][subsys_index][paramindex_tuple]
return su... |
def sample_logistic(shape, uniform):
u = uniform.sample(shape)
return (torch.log(u) - torch.log((1 - u))) |
def spin_polynomial_square(part, weight, length):
R = ZZ['t']
if (part in _Partitions):
part = SkewPartition([part, _Partitions([])])
elif (part in SkewPartitions()):
part = SkewPartition(part)
if ((part == [[], []]) and (not weight)):
return R.one()
t = R.gen()
return R(... |
('/_leave_chat/', methods=['GET'])
def leave_chat():
backend = get_backend()
uid = userid()
chat_info = backend.get_chat_info(uid)
backend.send(uid, Event.LeaveEvent(chat_info.agent_index, uid, str(time.time())))
return jsonify(success=True) |
def print_hparams(params, sort=True, print_std=True):
kv_list = [(k, v) for (k, v) in params.values().items()]
if sort:
kv_list = list(sorted(kv_list, key=(lambda elem: elem[0])))
str_re = ''
for (k, v) in kv_list:
str_re += ('%s: %s%s' % (k, v, os.linesep))
if print_std:
log... |
def determine_target(test, touched_files, options):
test = parse_test_module(test)
if (test not in SLOW_TESTS):
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
if test.endswith('_no_ninja'):
test = test[:((- 1) * len('_no_ninja'))]... |
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.vocab1 = []
self.vocab2 = []
self.word_embeddings = {}
self.weights = []
... |
class ResNetV2(nn.Module):
def __init__(self, block, layers, num_classes=256, zero_init_residual=False, agg_mode='ap', fmap_out_size=3, use_cbam=False):
super(ResNetV2, self).__init__()
self.inplanes = 64
self.agg_mode = agg_mode
self.layer1 = self._make_layer(block, 64, layers[0], u... |
def convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase):
global tokenizer
tokenizer = tokenizer_for_convert |
def draw_scores_by_task(scores_by_task, filename, methods, replay=True, baseline=True):
legends = list(methods)
if baseline:
labels = ['WA-MDF', 'WA-ADB', 'BiC', 'LUCIR', 'iCaRL', 'ILOS', 'GEM', 'R-EWC', 'MAS', 'LwF']
colors = ['firebrick', 'green', 'deepskyblue', 'steelblue', 'chocolate', 'gold... |
def tokenize_single_comma(val):
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError as e:
raise ValueError('Error while tokenizing attribute') from e
else:
raise ValueError(('Error while toke... |
class ContainerAdaptersManager():
def __init__(self):
self.adapters = {}
def supported_outputs(self):
return ({'default'} | set(self.adapters))
def register(self, adapter):
self.adapters[adapter.container_lib] = adapter |
def timeval(string):
if (string.endswith('am') or (string.endswith('pm') and string[:(- 2)].isdigit())):
numval = int(string[:(- 2)])
if ((len(string) == 3) or (len(string) == 4)):
numval *= 100
if string.endswith('pm'):
numval += 1200
return str(numval)
r... |
def homogeneity(labels1, labels2):
num_missed = 0.0
for label in set(labels1):
matches = labels2[(labels1 == label)]
try:
(match_mode, mode_count) = mode(matches, keepdims=True)
except:
(match_mode, mode_count) = mode(matches)
num_missed += np.sum((matches... |
class LinearAttention(nn.Module):
def __init__(self, dim):
super(LinearAttention, self).__init__()
self.linear = nn.Linear((dim * 3), 1, bias=False)
self.linear_out = nn.Linear((dim * 2), dim, bias=False)
self.sm = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
self.mask = N... |
def ratecv(cp, size, nchannels, inrate, outrate, state, weightA=1, weightB=0):
_check_params(len(cp), size)
if (nchannels < 1):
raise error('# of channels should be >= 1')
bytes_per_frame = (size * nchannels)
frame_count = (len(cp) / bytes_per_frame)
if ((bytes_per_frame / nchannels) != size... |
def allocate_device():
try:
free_devices_lock.acquire()
return free_devices.get()
finally:
free_devices_lock.release() |
def dict_all_to_device(tensor_dict, device):
for k in tensor_dict:
if isinstance(tensor_dict[k], torch.Tensor):
tensor_dict[k] = tensor_dict[k].to(device) |
class MaskLoss(nn.Module):
def __init__(self, reduction):
super(MaskLoss, self).__init__()
self.loss = None
self.reduction = reduction
def forward(self, x, y, mask):
if (self.loss == None):
raise ValueError('loss.py: MaskLoss.loss has not been implemented')
co... |
def mp_validate(model: nn.Module, dl: DataLoader, loss_func: OptLossFunc=None, cb_handler: Optional[CallbackHandler]=None, pbar: Optional[PBar]=None, average=True, n_batch: Optional[int]=None) -> Iterator[Tuple[(Union[(Tensor, int)], ...)]]:
model.eval()
with torch.no_grad():
(val_losses, nums) = ([], [... |
class TFLogitsProcessor(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def global_example_pool(x, batch, size=None):
size = ((batch.max().item() + 1) if (size is None) else size)
return scatter(x, batch, dim=0, dim_size=size, reduce='add') |
def get_entities(database, test_id):
config = dict(database.get_one_bot_test_instance(test_id))
entity_path = 'data/bots/{}/{}/goals_dir/entities.json'.format(config['type'], test_id)
entities = None
if (('STORAGE' in os.environ) and (os.environ['STORAGE'] == 'S3')):
if file_exists(S3_BUCKET_NAM... |
def print_changed_only_false():
set_config(print_changed_only=False)
(yield)
set_config(print_changed_only=True) |
class Weierstrass(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 0.5)] * self.N), ([0.5] * self.N)))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0
self.change_dimensionality = True
def ... |
class VCTKFeaturesDataset(Dataset):
def __init__(self, vctk_path, subdirectory, normalizer=None, features_path='features'):
self._vctk_path = vctk_path
self._subdirectory = subdirectory
features_path = ((self._vctk_path + os.sep) + features_path)
self._sub_features_path = ((features_... |
class GPTNeoXJapaneseConfig(PretrainedConfig):
model_type = 'gpt_neox_japanese'
def __init__(self, vocab_size=32000, hidden_size=2560, num_hidden_layers=32, num_attention_heads=32, intermediate_multiple_size=4, hidden_act='gelu', rotary_pct=1.0, rotary_emb_base=10000, max_position_embeddings=2048, initializer_r... |
def is_relevant(line):
return (not (line.startswith('Analyzing contract at') or line.startswith('Starting symbolic execution step...') or line.startswith('Symbolic execution finished with coverage') or line.startswith('Outcomes: '))) |
def test_ratio_order(example_diversity_ones_zeros):
(y, y_pred_ones, y_pred_zeros) = example_diversity_ones_zeros
ratio1 = ratio_errors(y, y_pred_ones, y_pred_zeros)
ratio2 = ratio_errors(y, y_pred_zeros, y_pred_ones)
assert (ratio1 == ratio2) |
def checkNull(dummy_dictionary):
if (None in list(dummy_dictionary.values())):
return True
else:
return False |
class _TANGRAM_REGISTRY_KEYS_NT(NamedTuple):
SC_KEY: str = 'X'
SP_KEY: str = 'Y'
DENSITY_KEY: str = 'DENSITY' |
def get_vertices(component_root):
vertices = []
def recurse_component(node, vertices):
if (node.node_type == NodeType.NORMAL):
vertices.append(node.children[0])
return
for child in node.children:
recurse_component(child, vertices)
recurse_component(compone... |
def find_best_checkpoint(*dirs):
best_checkpoint_path = None
best_epoch = (- 1)
best_val_loss = .0
for dir in dirs:
checkpoint_paths = glob('{}/{}*'.format(dir, FLAGS.checkpoint_prefix))
for checkpoint_path in checkpoint_paths:
epoch = int(re.findall('e\\d+', checkpoint_path)... |
def read_squad_examples(logger, args, input_file, debug):
def _process_sent(sent):
if (type(sent) != str):
return [_process_sent(s) for s in sent]
return sent.replace('', '-').replace('&', 'and').replace('&', 'and')
input_data = []
for _input_file in input_file.split(','):
... |
def _py2expr(a, ctx=None):
if isinstance(a, bool):
return BoolVal(a, ctx)
if _is_int(a):
return IntVal(a, ctx)
if isinstance(a, float):
return RealVal(a, ctx)
if isinstance(a, str):
return StringVal(a, ctx)
if is_expr(a):
return a
if z3_debug():
_z... |
def check_openmp_support():
if ('PYODIDE_PACKAGE_ABI' in os.environ):
return False
code = textwrap.dedent(' #include <omp.h>\n #include <stdio.h>\n int main(void) {\n #pragma omp parallel\n printf("nthreads=%d\\n", omp_get_num_threads());\n return 0;\n }\... |
def valid_noise(string_value):
if (string_value == 'inf'):
return string_value
else:
return float(string_value) |
class GradientAnisotropicDiffusion(pymia_fltr.Filter):
def __init__(self, time_step: float=0.125, conductance: int=3, conductance_scaling_update_interval: int=1, no_iterations: int=5):
super().__init__()
self.time_step = time_step
self.conductance = conductance
self.conductance_scali... |
def url_to_filename(url: str, etag: str=None) -> str:
url_bytes = url.encode('utf-8')
b64_bytes = base64.b64encode(url_bytes)
decoded = b64_bytes.decode('utf-8')
if etag:
etag = etag.replace('"', '')
return f'{decoded}.{etag}'
else:
return decoded |
def load_all_models(args, train_samples):
student_in_context_samples = random.sample(train_samples, args.ic_num)
print('Loading student model!!!')
tokenizer = AutoTokenizer.from_pretrained(args.student_model_path, cache_dir=args.cache_dir, use_fast=False)
smodel = AutoModelForCausalLM.from_pretrained(ar... |
def test_entry_exit_node_without_nodes(graph):
assert (graph.entry_node is None)
assert (graph.exit_nodes == set()) |
class RewriteName(ast.NodeTransformer):
def __init__(self, class_name):
self.class_name = class_name
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
return ast.Call(func=ast.Attribute(value=ast.Name(id=self.class_name, ctx=ast.Load()), attr=node.func.id, ctx=ast.Load(... |
def get_xpos_factory(shorthand, fn):
logger.info('Resolving vocab option for {}...'.format(shorthand))
train_file = os.path.join(DATA_DIR, '{}.train.in.conllu'.format(shorthand))
if (not os.path.exists(train_file)):
raise UserWarning('Training data for {} not found in the data directory, falling bac... |
def test_average_combiner(create_pool_classifiers):
query = np.array([[1, (- 1)]])
ensemble_classifiers = create_pool_classifiers
expected = 0
result = average_combiner(ensemble_classifiers, query)
assert (result == expected) |
class DynamicConstantProvider(DelegatingConstantProvider):
def __init__(self, pool: ConstantPool, delegate: ConstantProvider, probability: float, max_constant_length: int):
super().__init__(pool, delegate, probability)
assert (max_constant_length > 0), 'Length limit for constant pool elements must b... |
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, bias=True, negative_slope=0.2, scale=(2 ** 0.5)):
super().__init__()
if bias:
self.bias = nn.Parameter(torch.zeros(channel))
else:
self.bias = None
self.negative_slope = negative_slope
self.s... |
def add_distant_neighbors(data, hops):
assert (hops > 1)
(edge_index, _) = remove_self_loops(data.edge_index)
(edge_index, _) = add_self_loops(edge_index, num_nodes=data.x.size(0))
one_hop_set = set([tuple(x) for x in edge_index.transpose(0, 1).tolist()])
(row, col) = edge_index
adj = SparseTens... |
def add_node(G, center_feature, location_list, index):
num = center_feature.shape[0]
for i in range(num):
coordinate_list = get_location(index[i], location_list)
G.add_node(i, feature=center_feature[i], coordinate=coordinate_list)
return G |
def get_model(cond_input_op):
train_pl = tf.placeholder_with_default(False, shape=(), name='train_pl')
y = mnist_model.model(cond_input_op, train_pl)
return (y, train_pl) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('x_shape , batch_axis, channel_axis', [((2, 4, 3, 3), 0, 1), ((4, 32, 8, 8), (- 4), (- 3)), ((2, 3, 3, 4), 0, 3), ((16, 4), 0, 1), ((5, 2, 6), [0, 1], 2), ((5, 2, 6), [(- 3), (- 2)], (- 1))])
.parametrize('eps', [1e-05])
.parametrize('output_... |
def visualfrontend_checker():
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = VisualFrontend().to(device)
model.to(device)
(T, N, C, H, W) = (10, args['BATCH_SIZE'], 1, args['ROI_SIZE'], args['ROI_SIZE'])
inputBatch = torch.rand(T, N, C, H, W).to(device)
model.eva... |
.script
def rref_script_annotation(rref_var: RRef[Tensor]) -> Tensor:
return rref_python_annotation(rref_var).to_here() |
def fictest(A: dace.int32[4]):
for a in range(min(A[0], A[1])):
with dace.tasklet:
(inp << A[2])
(out >> A[3])
out = (inp + a) |
class ScaledSetASpaceInvadersWorld(RandomScaledSpaceInvadersWorld):
scale_range_start = 0.95
scale_range_end = 1.0 |
class TFCLIPTextModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def kdl_tree_from_urdf_model(urdf):
kdl = PyKDL
root = urdf.get_root()
tree = kdl.Tree(root)
def add_children_to_tree(parent):
if (parent in urdf.child_map):
for (joint, child_name) in urdf.child_map[parent]:
child = urdf.link_map[child_name]
if (child... |
class DConv_BN(chainer.Chain):
def __init__(self, nb_in, nb_out, ksize=3, dilate=1, no_bn=False):
super(DConv_BN, self).__init__()
self.no_bn = no_bn
with self.init_scope():
self.conv = L.DilatedConvolution2D(nb_in, nb_out, ksize=(ksize, 1), pad=(dilate, 0), dilate=(dilate, 1))
... |
.parametrize('hidden_size,sparse_feature_num', [((2,), 2), ((), 2)])
def test_DeepFEFMEstimator(hidden_size, sparse_feature_num):
import tensorflow as tf
if ((not TEST_Estimator) or (tf.__version__ == '1.4.0')):
return
from deepctr.estimator import DeepFEFMEstimator
sample_size = SAMPLE_SIZE
... |
def get_spotify_ids(json_path):
with open(json_path) as f_json:
json_data = json.load(f_json)
json_data = json_data['response']['songs']
if (len(json_data) == 0):
spotify_ids = []
else:
json_data = json_data[0]
spotify_ids = []
for trac... |
def perms_canonical_labels(p, e=None):
if (not (len(p) > 1)):
raise ValueError('input must have length >= 2')
n = len(p[0])
c_win = None
m_win = list(range(n))
x = p[0]
y = p[1:]
if (e is None):
e = list(range(n))
while e:
i = e.pop()
m_test = perms_canoni... |
class FeatureDataset(IterableDataset):
def __init__(self, args, shards_path, all_shards_path, node_selection=identity, shard_shuffle=identity, is_train=True):
self.shards_path = shards_path
self.all_shards_path = all_shards_path
self.is_train = is_train
verbose = (args.verbose and du... |
class BasePolicyReinforce(BasePolicy):
def __init__(self, policy_config):
super(BasePolicyReinforce, self).__init__(policy_config)
self.logits = []
self.returns = []
def forward(self, data):
shared_features = F.relu(self.shared_features(data))
if (self.is_self_play and se... |
def test_broadcasting(backend):
tb = pyhf.tensorlib
assert (list(map(tb.tolist, tb.simple_broadcast(tb.astensor([1, 1, 1]), tb.astensor([2]), tb.astensor([3, 3, 3])))) == [[1, 1, 1], [2, 2, 2], [3, 3, 3]])
assert (list(map(tb.tolist, tb.simple_broadcast(tb.astensor(1), tb.astensor([2, 3, 4]), tb.astensor([5... |
_task('gigaword', dataclass=GigawordConfig)
class GigawordTask(OFATask):
def __init__(self, cfg: GigawordConfig, src_dict, tgt_dict):
super().__init__(cfg, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = self.cfg.data.split(',')
assert (len(pa... |
def delete_yaml_config(config_filename):
if os.path.exists(config_filename):
os.remove(config_filename) |
class iData(object):
train_trsf = []
test_trsf = []
common_trsf = []
class_order = None |
def load_mnist(root, training):
if training:
data = 'train-images-idx3-ubyte'
label = 'train-labels-idx1-ubyte'
N = 60000
else:
data = 't10k-images-idx3-ubyte'
label = 't10k-labels-idx1-ubyte'
N = 10000
with open(osp.join(root, data), 'rb') as fin:
fin... |
def get_month_bins(dates):
now = datetime.now(tz=dates[0].tzinfo)
this_month = datetime(year=now.year, month=now.month, day=1, tzinfo=dates[0].tzinfo)
bins = [(this_month - relativedelta(months=i)) for i in reversed(range((- 1), month_duration))]
return seconds_from_epoch(bins) |
def resolve_classpath(classpath=None):
if ((classpath == '$CLASSPATH') or ((classpath is None) and (os.getenv('CORENLP_HOME', None) == '$CLASSPATH'))):
classpath = os.getenv('CLASSPATH')
elif (classpath is None):
classpath = os.getenv('CORENLP_HOME', os.path.join(str(Path.home()), 'stanza_corenl... |
def register_Ns3UintegerValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('uint64_t const &', 'value')])
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
... |
.parametrize('seed', [313])
.parametrize('axes', [[0, 1], [1, 2], [0, 2], [(- 2), (- 1)]])
.parametrize('decay_rate', [0.9])
.parametrize('eps', [1e-05])
.parametrize('output_stat', [True, False])
.parametrize('ctx, func_name', ctxs)
def test_batch_normalization_for_multiple_axes_forward_backward(seed, axes, decay_rate... |
def mapk(actual, predicted, k=20):
return np.mean([apk(a, p, k) for (a, p) in zip(actual, predicted)]) |
def _unbatch_encoding(enc: BatchEncoding):
docs = []
for i in range(len(enc['input_ids'])):
docs.append(BatchEncoding(data={k: [v[i]] for (k, v) in enc.items()}))
return docs |
def post_register_types(root_module):
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
if ('SqliteDataOutput' not in enabled_features):
try:
root_module.classes.remove(root_module['ns3::SqliteDataOutput'])
except KeyError:
pass |
_connect.numpy.implements('amin')
def _nep_18_impl_amin(a, axis=None, out=UNSUPPORTED, keepdims=False, initial=None, where=UNSUPPORTED):
return min(a, axis=axis, keepdims=keepdims, initial=initial) |
def time_str(s):
(days, remainder) = divmod(s, ((60 * 60) * 24))
(hours, remainder) = divmod(remainder, (60 * 60))
(minutes, seconds) = divmod(remainder, 60)
string = ''
if (days > 0):
string += '{:d} days, '.format(int(days))
if (hours > 0):
string += '{:d} hours, '.format(int(h... |
def dataset_from_h5pyfile(hfile: os.PathLike) -> xr.Dataset:
f = h5py.File(hfile, 'r')
data = {key: f[key] for key in list(f.keys())}
f.close()
return xr.Dataset(data) |
class Partition6(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linea... |
def occupy_gpu(gpus=None):
if (gpus is None):
torch.zeros(1).cuda()
else:
gpus = ([gpus] if isinstance(gpus, int) else list(gpus))
for g in gpus:
torch.zeros(1).cuda(g) |
def S2():
var('x,y,z')
t1 = clock()
a = expand(((((x ** sin(x)) + (y ** cos(y))) - (z ** (x + y))) ** 100))
t2 = clock()
return (t2 - t1) |
def test_Workshop():
topo = L3EthStarAttack()
net = Mininet(topo=topo, link=TCLink, listenPort=OF_MISC['switch_debug_port'])
net.start()
(plc1, attacker, hmi) = net.get('plc1', 'attacker', 'hmi')
(plc2, plc3, plc4) = net.get('plc2', 'plc3', 'plc4')
CLI(net)
target_ip1 = plc1.IP()
target_... |
class GenericTemplate(object):
def __init__(self):
self.document = Document('../assets/word-base/dissertate.docx')
def fill(self):
print('')
def save(self):
self.document.save('dissertation.docx')
def clear_paragraph(self, paragraph):
p_element = paragraph._p
p_ch... |
class RGCNConv(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(RGCNConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.rel_lins = ModuleDict({f'{key[0]}_{key[1]}_{key[2]}': Linear(in_channels, out_channels, bias=False) fo... |
class Partition7(nn.Module):
LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/Mod... |
def derive_feature_columns(targets, fc_map, fd_map, selected_field_names, label_name):
for target in targets:
if (target not in fc_map):
fc_map[target] = {}
fc_target_map = fc_map[target]
new_fc_target_map = {}
for field_name in fc_target_map:
if (field_name i... |
class Trainer(object):
def __init__(self, args, model: Union[(RelationScorer, RelationEmbedder)], loss, train_dataset: EntityRelationDatasetBase, validation_dataset: EntityRelationDatasetBase, train_loader, save_path='.', checkpoint_filename='checkpoint%s.pth.tar', keep_checkpoints=5):
super(Trainer, self).... |
def add_dataset_args(parser: ArgumentParser) -> None:
parser.add_argument('--input_data_dir', type=str, default='data/TrainDatasets', help='Directory of the input data')
parser.add_argument('--dataset', type=str, default='Set5', help='Test dataset')
parser.add_argument('--sample_num', type=int, default=(- 1... |
class Decay(object):
def __init__(self, init_val, end_val, max_epochs, sigma):
pass
def __call__(self):
raise NotImplementedError
def get_current_weight(self):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError |
def simCopyPasteObjects(objectHandles, options):
handles = ffi.new('int[]', objectHandles)
ret = lib.simCopyPasteObjects(handles, len(objectHandles), options)
_check_return(ret)
return list(handles) |
def queryResult(domain, turn):
sql_query = 'select * from {}'.format(domain)
flag = True
for (key, val) in turn['metadata'][domain]['semi'].items():
if ((val == '') or (val == 'dont care') or (val == 'not mentioned') or (val == "don't care") or (val == 'dontcare') or (val == "do n't care")):
... |
def resnet110(**kwargs):
model = ResNet_Cifar(BasicBlock, [18, 18, 18], **kwargs)
return model |
(scope='session')
def example_csvy_file_dir():
return (Path(__file__).resolve().parent / 'tests/data') |
def initialize(N, datatype=np.float64):
alpha = datatype(1.5)
beta = datatype(1.2)
A = np.fromfunction((lambda i, j: ((((i * j) + 1) % N) / N)), (N, N), dtype=datatype)
B = np.fromfunction((lambda i, j: ((((i * j) + 2) % N) / N)), (N, N), dtype=datatype)
x = np.fromfunction((lambda i: ((i % N) / N))... |
def task2read_data_func(task):
if (task == SENTIMENT):
return read_processed
if (task in [POS, POS_BILSTM]):
return read_tagging_data
if (task == PARSING):
return read_parsing_data
raise ValueError(('No data reading function available for task %s.' % task)) |
class _LRScheduler(object):
def __init__(self, optimizer, warmup_epochs, epochs):
if (not isinstance(optimizer, Optimizer)):
raise TypeError('{:} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
for group in optimizer.param_groups:
gro... |
def main():
print(__doc__)
with mpmath.workdps(50):
(p, q) = lambertw_pade()
(p, q) = (p[::(- 1)], q[::(- 1)])
print('p = {}'.format(p))
print('q = {}'.format(q))
(x, y) = (np.linspace((- 1.5), 1.5, 75), np.linspace((- 1.5), 1.5, 75))
(x, y) = np.meshgrid(x, y)
z = (x... |
class NEMCell(RNNCell):
def __init__(self, cell, input_shape, distribution, pred_init):
self.cell = cell
if (not isinstance(input_shape, tf.TensorShape)):
input_shape = tf.TensorShape(input_shape)
self.input_size = input_shape
self.gamma_shape = tf.TensorShape((input_shap... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.