code stringlengths 101 5.91M |
|---|
def test_three():
array = ak.operations.from_json('["one", "two"] \n ["three"]', line_delimited=True)
assert (array.to_list() == [['one', 'two'], ['three']]) |
def revert_nans_columns(table_data, nan_column_name):
combinations = table_data[nan_column_name].unique()
for combination in combinations:
if (combination != 'None'):
column_names = [column_name.strip() for column_name in combination.split(',')]
table_data.loc[((table_data[nan_co... |
('/getFunctionParams', methods=['GET', 'POST'])
def getFunctionParams() -> Any:
global index_df
index_df = index_df.astype(str)
col_names = index_df.columns.values.tolist()
table_columns = []
for col_name in col_names:
temp_dic = {}
temp_dic['value'] = col_name
temp_dic['labe... |
class SawyerDrawerOpenEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.9, 0.0)
obj_high = (0.1, 0.9, 0.0)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config... |
def _is_divider(line: str) -> bool:
line = line.strip()
return ((not line) or (line == '-DOCSTART- -X- -X- O')) |
def torch_unsqueeze(input, dim):
shape = list(input.shape)
if (dim < 0):
dim = ((input.dim() + 1) + dim)
shape.insert(dim, 1)
return torch.empty(shape, device='meta') |
def write_jsonl_data(jsonl_data: List[Dict], jsonl_path: str, force=False):
if (os.path.exists(jsonl_path) and (not force)):
raise FileExistsError
with open(jsonl_path, 'w') as file:
for line in jsonl_data:
file.write((json.dumps(line, ensure_ascii=False) + '\n')) |
_converter_regitstry('sVC')
def sVC_converter(context: 'SG2260Context', reg: sVC_reg):
n = (((reg.opd0_c - 1) * reg.opd0_w) + reg.opd1_w)
opd0 = dict(address=reg.opd0_addr, dtype=(reg.opt_opd0_prec, reg.opt_opd0_sign), shape=(1, reg.opd0_c, 1, reg.opd0_w), layout=Layout.alignEU)
res0 = dict(address=reg.res0... |
class SimpleTokenizer(object):
ALPHA_NUM = '[\\p{L}\\p{N}\\p{M}]+'
NON_WS = '[^\\p{Z}\\p{C}]'
def __init__(self):
self._regexp = regex.compile(('(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS)), flags=((regex.IGNORECASE + regex.UNICODE) + regex.MULTILINE))
def tokenize(self, text, uncased=False):
... |
class BaseProgressBar(object):
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.n = getattr(iterable, 'n', 0)
self.epoch = epoch
self.prefix = ''
if (epoch is not None):
self.prefix += 'epoch {:03d}'.format(epoch)
if (pr... |
class PermutationGroupMorphism_from_gap(PermutationGroupMorphism):
def __init__(self, G, H, gap_hom):
if (not all((isinstance(X, PermutationGroup_generic) for X in [G, H]))):
raise TypeError('the groups must be permutation groups')
PermutationGroupMorphism.__init__(self, G, H)
se... |
def tf_mobilenetv3_large_075(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model |
def BicomplexSpectralSequence(l):
return KenzoSpectralSequence(__bicomplex_spectral_sequence__(s2k_listofmorphisms(l))) |
class MNIST_L2(nn.Module):
def __init__(self, dropout=0.0):
super(MNIST_L2, self).__init__()
self.dropout = dropout
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.relu = nn.ReLU(True)
self.pool = nn.MaxPool2d(kernel_siz... |
def _sort_readers(events: EventCollection, reader_funcs: Sequence[Callable[([], ContextManager[Iterable[Tuple[(int, RawEvent)]]])]]) -> None:
items: List[Tuple[(int, RawEvent)]] = []
for reader_func in reader_funcs:
with reader_func() as reader:
items.extend(reader)
with contextlib.closi... |
def use_cython_array_utility_code(env):
cython_scope = env.global_scope().context.cython_scope
cython_scope.load_cythonscope()
cython_scope.viewscope.lookup('array_cwrapper').used = True |
def method_def(name, declarations, is_python_method, module):
pycname = get_pycname(name)
if is_noarg_binding(declarations):
pycfunc_voidcast = ''
flags = ('METH_NOARGS' if is_python_method else 'METH_VARARGS | METH_KEYWORDS')
else:
pycfunc_voidcast = '(void(*)(void))'
flags ... |
_function()
def lf_confirm_keyword(x):
matches = ['are you', 'do you', 'did you', 'can you', 'could you', 'could u', 'have you', 'will you', 'did anyone', 'can we', 'can I', 'is she', 'is he', 'has she', 'has he', 'has anyone']
not_matches = ['where ', 'who ', 'when ', 'why ', 'what ', 'how ']
return (CONFI... |
def move_weights(our, oai, dst2src=False):
transformer_model = oai.transformer
load_weights(transformer_model.ln_f, our.transformer.final_layernorm, dst2src)
load_weights(transformer_model.wte, our.word_embeddings, dst2src)
load_weights(transformer_model.wpe, our.position_embeddings, dst2src)
for (o... |
def _is_valid_jobid(jobid: str) -> bool:
jobids = jobid.split('_')
return (((len(jobids) == 1) and represents_int(jobids[0])) or ((len(jobids) == 2) and all([represents_int(i) for i in jobids]))) |
def split_acquisition_function(fn: AcquisitionFunction, split_size: int) -> AcquisitionFunction:
if (split_size <= 0):
raise ValueError(f'split_size must be positive, got {split_size}')
(fn)
def wrapper(x: TensorType) -> TensorType:
x = tf.convert_to_tensor(x)
length = x.shape[0]
... |
_model
def legacy_seresnet50(pretrained=False, **kwargs):
model_args = dict(block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet50', pretrained, **model_args) |
_scorer('bleu', dataclass=BleuConfig)
class Scorer(object):
def __init__(self, cfg):
self.stat = BleuStat()
self.pad = cfg.pad
self.eos = cfg.eos
self.unk = cfg.unk
try:
from fairseq import libbleu
except ImportError as e:
sys.stderr.write('ERR... |
class DownloadCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser('download')
download_parser.add_argument('--cache-dir', type=str, default=None, help='Path to location to store the models')
download_parser.add_argument('-... |
def main(args):
training_data_path = args.training_path
output_path = args.model_name
model_arch = args.model_arch
batch_size = args.batch_size
nepoch = args.epoch_count
initial_lr = args.learning_rate
output_channels = 2
print((' ---model will be written at %s --- ' % output_path))
... |
_start_docstrings('The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.', CAMEMBERT_START_DOCSTRING, CAMEMBERT_INPUTS_DOCSTRING)
class CamembertModel(RobertaModel):
config_class = CamembertConfig
pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP |
class LocalScheme(object):
def is_defined(self, local_name, current_depth):
raise NotImplementedError('Abstract class')
def define(self, local_name, lineno, depth):
raise NotImplementedError('Abstract class')
def clear_scope(self, from_indentation):
raise NotImplementedError('Abstrac... |
def register_Ns3CsmaHelper_methods(root_module, cls):
cls.add_constructor([param('ns3::CsmaHelper const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')])
cls.add_method('Install', 'ns3::NetDeviceContainer... |
class PartialILIDS(BaseImageDataset):
dataset_dir = 'partial_ilids'
def __init__(self, root='./toDataset', verbose=True, **kwargs):
super(PartialILIDS, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.query_dir = osp.join(self.dataset_dir, 'query')
self.g... |
class TwoHeadMlp(PyTorchModule):
def __init__(self, hidden_sizes, first_head_size, second_head_size, input_size, init_w=0.003, hidden_activation=F.relu, output_activation=identity, hidden_init=ptu.fanin_init, b_init_value=0.0, layer_norm=False, layer_norm_kwargs=None):
super().__init__()
if (layer_n... |
class WordDropoutMC(DropoutMC):
def forward(self, x):
if self.training:
self.activate = True
if ((not self.activate) or (not self.p)):
return x
m = x.data.new(x.size(0), x.size(1), 1).bernoulli_((1 - self.p))
mask = torch.autograd.Variable(m, requires_grad=Fal... |
class Quad():
def __init__(self, context: moderngl.Context):
self.prog = context.program(vertex_shader='\n #version 330\n in vec2 in_position;\n in vec2 in_uv;\n out vec2 uv;\n\n void main() {\n gl_Position = vec4(... |
class CocoCaptions(data.Dataset):
def __init__(self, root, annFile, transform=None, target_transform=None):
from pycocotools.coco import COCO
self.root = os.path.expanduser(root)
self.coco = COCO(annFile)
self.ids = list(self.coco.imgs.keys())
self.transform = transform
... |
def _register():
if hasattr(ak.numba, 'ArrayViewType'):
return
import numba
import awkward._connect.numba.arrayview
import awkward._connect.numba.arrayview_cuda
import awkward._connect.numba.builder
import awkward._connect.numba.growablebuffer
import awkward._connect.numba.layout
... |
def resnet_v2_200(inputs, num_classes=None, is_training=True, multi_grid=[1, 2, 4], global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_200'):
blocks = [resnet_v2_block('block1', base_depth=64, num_units=3, stride=2), resnet_v2_block('block2', base_depth=128, num_units=24, strid... |
def test_bagging_with_pipeline():
(X, y) = make_imbalance(iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0)
estimator = EasyEnsembleClassifier(n_estimators=2, estimator=make_pipeline(SelectKBest(k=1), AdaBoostClassifier()))
estimator.fit(X, y).predict(X) |
class Vggish(nn.Module):
args = {'postprocess': False}
output_dims = 128
model_tag = {'name': 'VGGish', 'dataset': 'YouTube-8M'} |
def build_transforms(cfg, is_train=True):
normalize_transform = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if is_train:
transform = Random_Transforms((cfg.INPUT.SIZE_TRAIN[1], cfg.INPUT.SIZE_TRAIN[0]), cfg.DATASETS.SHIFT, cfg.DATASETS.MAXRATION, cfg.DATASETS.ROTATION)
else:
... |
class ChannelTransformer_share(nn.Module):
def __init__(self, config, vis, img_size, channel_num=[64, 128, 256, 512, 1024], patchSize=[32, 16, 8, 4]):
super().__init__()
self.patchSize = patchSize[3]
self.embeddings = Channel_Embeddings(config, self.patchSize, img_size=img_size, in_channels=... |
class FSMTConfig(PretrainedConfig):
model_type = 'fsmt'
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, langs=['en', 'de'], src_vocab_size=42024, tgt_vocab_size=42024, activation_function='relu', d_model=1024, max_length=200, max_position_embed... |
class Acceleration():
SIZE = 12
def from_reader(reader: _ResponseReader):
assert (reader.remaining() >= Acceleration.SIZE)
rv = Acceleration()
rv.x = reader.read_f32()
rv.y = reader.read_f32()
rv.z = reader.read_f32()
return rv
def __repr__(self):
retu... |
class EGT_PCQM4M(EGT_MOL):
def __init__(self, **kwargs):
super().__init__(output_dim=1, **kwargs)
def output_block(self, g):
h = super().output_block(g)
return h.squeeze((- 1)) |
def collate_remove_none(batch):
batch = list(filter((lambda x: (x is not None)), batch))
return data.dataloader.default_collate(batch) |
class Uniform(Distribution):
arg_constraints = {'low': constraints.dependent, 'high': constraints.dependent}
has_rsample = True
def mean(self):
return ((self.high + self.low) / 2)
def stddev(self):
return ((self.high - self.low) / (12 ** 0.5))
def variance(self):
return ((sel... |
def build_data(args):
src = []
tgt = []
print('preprocessing data...')
with open(args.datafile, 'r') as fin:
current_src = []
n_prev = args.n_prev_sent
for line in tqdm(fin.readlines()):
line = line.lstrip('-')
line = line.strip()
if (len(curre... |
class Conv127(BasicConv):
def __init__(self, units, **kwargs):
super(Conv127, self).__init__(units, [7, 5, 5, 5, 5], **kwargs) |
class PingProtocol(Protocol):
def __init__(self, own: Node, name: str, other_name: str, other_node: str):
super().__init__(own, name)
own.protocols.append(self)
self.other_name = other_name
self.other_node = other_node
def init(self):
pass
def start(self):
new... |
class Layer():
def __init__(self, args, controller, length, dim, bounds=None):
self.args = args
self.controller = controller
self.length = length
self.dim = dim
self.use_forward = (args.method == 'baf')
self.parents = []
self.l = self.u = None
self.lw ... |
class PDF(Protocol):
def sample(self, sample_shape: Shape) -> Any:
...
def log_prob(self, value: Any) -> Any:
... |
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr(refresh=False, *args, **kwargs):
kwargs['ckpt'] = '
return mockingjay_url(*args, refresh=refresh, **kwargs) |
def simulate(initial_state, config, intervention=None, seed=None):
t_eval = np.arange(config.start_time, (config.end_time + config.delta_t), config.delta_t)
solution = odeint(dynamics, y0=dataclasses.astuple(initial_state), t=t_eval, args=(config, intervention), rtol=0.0001, atol=0.0001)
states = ([initial_... |
.experimental
.parametrize('gt_users, result', [(False, pd.DataFrame({'count': [2, 3], 'value': [1.0, 0.5]})), (True, pd.DataFrame({'count': [0, 2, 3], 'value': [0.0, 1.0, 0.0]}))])
def test_user_dist(log, recs, true, true_users, gt_users, result):
users = (true_users if gt_users else None)
vals = HitRate().use... |
def tversky(y_true, y_pred):
y_true_pos = K.flatten(y_true)
y_pred_pos = K.flatten(y_pred)
true_pos = K.sum((y_true_pos * y_pred_pos))
false_neg = K.sum((y_true_pos * (1 - y_pred_pos)))
false_pos = K.sum(((1 - y_true_pos) * y_pred_pos))
alpha = 0.7
return ((true_pos + smooth) / (((true_pos +... |
def _get_3DCE_head(is_train, ft_map, rois, num_classes):
num_rfcn_chn = 10
S = 7
num_hidden = 2048
cfg1 = (config.TRAIN if is_train else config.TEST)
conv_new_1 = mx.sym.Convolution(data=ft_map, kernel=(1, 1), num_filter=((S * S) * num_rfcn_chn), name='conv_new_1', lr_mult=3.0)
conv_new_cat = mx... |
def all_reduce_metrics(output_data_batch, datasets, name='depth'):
if isinstance(output_data_batch[0], dict):
output_data_batch = [output_data_batch]
names = [key for key in list(output_data_batch[0][0].keys()) if key.startswith(name)]
dims = [output_data_batch[0][0][name].shape[0] for name in names... |
def item_features(spark):
return spark.createDataFrame([('i1', 4.0, 'cat'), ('i2', 10.0, 'dog'), ('i4', 0.0, 'cat')]).toDF('item_idx', 'item_feature_1', 'class') |
class GoogleMapGetLocationDetails(VirtualFunctionTool):
name = 'GoogleMapGetLocationDetails'
summary = 'Get details of a location.'
parameters: List[ArgParameter] = [{'name': 'location_address', 'type': 'string', 'description': "The address of the location, in the format of 'street address, city, zip code'.... |
def test_pipeline_param_error():
clf = make_pipeline(LogisticRegression())
with pytest.raises(ValueError, match='Pipeline.fit does not accept the sample_weight parameter'):
clf.fit([[0], [0]], [0, 1], sample_weight=[1, 1]) |
def add_fpn_retinanet_losses(model):
loss_gradients = {}
(gradients, losses) = ([], [])
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
model.AddMetrics(['retnet_fg_num', 'retnet_bg_num'])
for lvl in range(k_min, (k_max + 1)):
suffix = 'fpn{}'.format(lvl)
bbox_loss = ... |
class ReaderWithLimitBase(Reader):
def __init__(self, reader):
Reader.__init__(self, schema=reader._schema)
self.reader = reader
self.net = core.Net('reader_with_limit')
self._data_finished = self.net.AddExternalInput(self.net.NextName('data_finished'))
self.should_stop = Non... |
def main():
parser = argparse.ArgumentParser(description='Echo app')
parser.add_argument('input', type=str)
parser.add_argument('output', type=str)
args = parser.parse_args()
with open(args.input, 'r') as input_file:
content = input_file.read()
print('===== echo app =====')
print('In... |
class GradientPTQConfig():
def __init__(self, n_iter: int, optimizer: Any, optimizer_rest: Any=None, loss: Callable=None, log_function: Callable=None, train_bias: bool=True, rounding_type: RoundingType=RoundingType.SoftQuantizer, use_hessian_based_weights: bool=True, optimizer_quantization_parameter: Any=None, opti... |
class MeanAggregator(Layer):
def __init__(self, input_dim, output_dim, neigh_input_dim=None, dropout=0, bias=True, act=tf.nn.relu, name=None, concat=False, mode='train', if_use_high_way=False, **kwargs):
super(MeanAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
... |
def replace_key(key):
if (key.endswith('.model.1.bias') and (len(key.split('.')) > 10)):
key = key.replace('.model.1.bias', '.conv1d_1.bias')
elif (key.endswith('.model.1.weight') and (len(key.split('.')) > 10)):
key = key.replace('.model.1.weight', '.conv1d_1.weight')
elif (key.endswith('.m... |
class UDec(nn.Module):
def __init__(self, squeeze, ch_mul=64, in_chans=3):
super(UDec, self).__init__()
self.enc1 = Block(squeeze, ch_mul, seperable=False)
self.enc2 = Block(ch_mul, (2 * ch_mul))
self.enc3 = Block((2 * ch_mul), (4 * ch_mul))
self.enc4 = Block((4 * ch_mul), (8... |
def create_d_vae(weight_path, d_vae_type, image_size, device):
if (d_vae_type == 'dall-e'):
return get_dalle_vae(weight_path, image_size, device)
elif (d_vae_type == 'customized'):
return get_d_vae(weight_path, image_size, device)
else:
raise NotImplementedError() |
def load_entity_data(file=None):
if (file is None):
return None
entity = []
with open(file, 'r') as f:
id = f.readline()
while (len(id) > 0):
entity.append(int(id))
id = f.readline()
entity = np.asarray(entity)
return entity |
def r_stmt_stmt(tn, t):
(stmt1, stmt2) = (t[0], t[1])
def fn(world, n):
if (n > MAX_FUNC_CALL):
return ([], n, False)
(hit_s1, n, s) = stmt1(world, (n + 1))
if (not s):
return (hit_s1, n, s)
if (n > MAX_FUNC_CALL):
return (hit_s1, n, False)
... |
class TestSingularValueDecomposition(unittest.TestCase):
def test_adjust_singular_vectors(self):
power_signals_d = np.array([[0., 0.0, 0.0, 2.], [0., 0.0, 0.0, 2.], [0.8125, 0.0, 0.0, 2.], [0., 0.0, 0.0, 2.]])
left_singular_vectors_u = np.array([[0., (- 0.), 0., 0.1584339], [0., (- 0.), (- 0.6766346... |
def process_punctuation(inText):
outText = inText
for p in PUNCT:
if ((((p + ' ') in inText) or ((' ' + p) in inText)) or (re.search(COMMA_STRIP, inText) != None)):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = PERIOD_STRIP.sub('',... |
def main() -> int:
options = parse_args()
misc.options = options
def action_notimpl():
raise RuntimeError(f'Unknown action: {options.action}')
dispatch = {'wheel': action_wheel, 'android': action_android, 'ios': action_ios, 'cache': action_open_cache_dir}
dispatch.get(options.action, action_... |
def register_Ns3RngStream_methods(root_module, cls):
cls.add_constructor([param('uint32_t', 'seed'), param('uint64_t', 'stream'), param('uint64_t', 'substream')])
cls.add_constructor([param('ns3::RngStream const &', 'r')])
cls.add_method('RandU01', 'double', [])
return |
def convert_to_tflayer_args(args_names, name_mapping):
def decorator(func):
(func)
def decorated_func(inputs, *args, **kwargs):
kwargs = map_common_tfargs(kwargs)
posarg_dic = {}
assert (len(args) <= len(args_names)), 'Please use kwargs instead of positional args ... |
def main():
app = QtGui.QApplication(sys.argv)
tool = CityscapesLabelTool()
sys.exit(app.exec_()) |
def write_data_npz(path, kind, data, info, meta, protocol):
kwds = {'kind': kind, 'info': info, 'meta': meta, 'protocol': protocol}
for (name, array) in data.items():
kwds[f'data/{name}'] = array
if (protocol == 1):
np.savez(normalize_extension(path, '.npz'), **kwds)
elif (protocol == 2)... |
class MobileNetV2PreTrainedModel(PreTrainedModel):
config_class = MobileNetV2Config
load_tf_weights = load_tf_weights_in_mobilenet_v2
base_model_prefix = 'mobilenet_v2'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
def _init_weights(self, module: Union[(nn.Linear, nn.C... |
def generate_drln(dims, reduce_dim, libname, reps=1):
if os.path.exists(libname):
return
size = reduce((lambda x, y: (x * y)), dims.values())
reduce_size = dims[reduce_dim]
dims_declaration = '\n'.join([('struct %s { enum { value = %d }; };' % (d, dims[d])) for d in dims])
temp_source = ('\n... |
def _get_origin_activation_node(n: BaseNode) -> BaseNode:
if isinstance(n, VirtualActivationWeightsNode):
return n.original_activation_node
if isinstance(n, VirtualSplitActivationNode):
return n.origin_node
return n |
class OpenAIGPTModelTest(CommonTestCases.CommonModelTester):
all_model_classes = ((OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel) if is_torch_available() else ())
class OpenAIGPTModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_token_typ... |
def plot_final_bbox(config, roi_numbers, data, sequence, targetPath='final_results'):
for i in range(1, len(roi_numbers[0])):
img_path = os.path.join(config.img_dir, sequence, ('%05d.jpg' % i))
img = cv2.imread(img_path)
data_t = data[i]['rois']
for obj_id in range(len(roi_numbers)):... |
def init_array(A, B, alpha):
n = N.get()
m = M.get()
alpha[0] = datatype(1.5)
for i in range(m):
for j in range(i):
A[(i, j)] = (datatype(((i + j) % m)) / m)
A[(i, i)] = 1.0
for j in range(n):
B[(i, j)] = (datatype(((n + (i - j)) % n)) / n) |
def register_Ns3DsdvPacketQueue_methods(root_module, cls):
cls.add_constructor([param('ns3::dsdv::PacketQueue const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Dequeue', 'bool', [param('ns3::Ipv4Address', 'dst'), param('ns3::dsdv::QueueEntry &', 'entry')])
cls.add_method('DropPacketWithDst', '... |
def parse_kbp37_or_relx_file(path: str):
with open(path, 'r') as f:
for instance in f.read().strip().split('\n\n'):
(input_line, label) = instance.strip().split('\n')
(example_id, sentence) = input_line.split('\t')
sentence = sentence.strip('"').strip().replace(' .', '.')... |
class TransformerEncoderLayer(Module):
__constants__ = ['batch_first', 'norm_first']
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu, layer_norm_eps=1e-05, batch_first=False, norm_first=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, '... |
_utils.test()
def test_parallel_assignment():
mat = ti.field(ti.i32, shape=(3, 4))
def func():
c = 0
for i in ti.static(range(4)):
(mat[(0, c)], mat[(1, c)], mat[(2, c)]) = (1, 2, 3)
c += 1
func()
for i in range(3):
for j in range(4):
assert (m... |
def register_Ns3UanTransducer_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UanTransducer const &', 'arg0')])
cls.add_method('AddPhy', 'void', [param('ns3::Ptr< ns3::UanPhy >', 'phy')], is_pure_virtual=True, is_virtual=True)
cls.add_method('ApplyRxGainDb', 'double',... |
def ignore_not_mentioned(pred_belief, target_belief):
clean_target_belief = []
clean_pred_belief = []
for bs in target_belief:
if ('not mentioned' in bs):
continue
clean_target_belief.append(bs)
for bs in pred_belief:
if ('not mentioned' in bs):
continue
... |
def _transform_timezone(result_str: str, timezone_token: str, timezone: str, utc_add: str, utc_offset_hours: int, utc_offset_minutes: int) -> str:
result = deepcopy(result_str)
if (timezone_token != ''):
if (timezone_token == 'z'):
result = result.replace(timezone_token, timezone)
el... |
class BaseURL(_URLTuple):
__slots__ = ()
def replace(self, **kwargs):
return self._replace(**kwargs)
def host(self):
return self._split_host()[0]
def ascii_host(self):
rv = self.host
if ((rv is not None) and isinstance(rv, text_type)):
try:
rv ... |
def load_var_train_data(output_path: str):
return (load_data_tensors_TW(join(output_path, 'vectors', 'train', 'identifiers_var_train_datapoints_x.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'train', 'tokens_var_train_datapoints_x.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'train', 'var_... |
def choices(tree):
n = len(leaves(tree))
addr = (np.nan * np.ones((n, (n - 1))))
def _addresses(node, index, choices):
if np.isscalar(node):
for (i, choice) in choices:
addr[(node, i)] = choice
return index
elif (isinstance(node, tuple) and (len(node) ... |
def get_policy_network(archi, kwargs, env, policy_type):
action_dim = env.action_space.low.size
obs_dim = env.observation_space.spaces['observation'].low.size
goal_dim = env.observation_space.spaces['representation_goal'].low.size
if (policy_type == 'tanhgaussian'):
kwargs['obs_dim'] = (obs_dim ... |
def load_cadata():
data_home = get_data_home()
train_file = os.path.join(data_home, 'cadata')
return _todense(_load(train_file, None, 'cadata')) |
def simple_c_type(signed, longness, name):
return modifiers_and_name_to_type.get((signed, longness, name)) |
def get_cleva_toxicity_metric_specs() -> List[MetricSpec]:
return [MetricSpec(class_name='helm.benchmark.metrics.cleva_harms_metrics.CLEVAToxicityMetric', args={})] |
def check_clang_apply_replacements_binary(args):
try:
subprocess.check_call([args.clang_apply_replacements_binary, '--version'])
except:
print('Unable to run clang-apply-replacements. Is clang-apply-replacements binary correctly specified?', file=sys.stderr)
traceback.print_exc()
... |
def read_init():
with open(os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'), 'r', encoding='utf-8', newline='\n') as f:
lines = f.readlines()
line_index = 0
while (not lines[line_index].startswith('if TYPE_CHECKING')):
line_index += 1
backend_specific_objects = {}
while (line_index ... |
def __getattr__(name):
return _sub_module_deprecation(sub_package='sparse', module='spfuncs', private_modules=['_spfuncs'], all=__all__, attribute=name) |
def load_mnist(n_samples=None, class_0='0', class_1='8'):
mnist = fetch_openml('mnist_784', version=1, as_frame=False)
mask = np.logical_or((mnist.target == class_0), (mnist.target == class_1))
(X, y) = shuffle(mnist.data[mask], mnist.target[mask], random_state=42)
if (n_samples is not None):
(X... |
class WeatherSpec(DomainSpec):
name = 'weather'
greet = 'Weather bot is here.'
nlg_spec = {'loc': {'inform': ['I am at %s.', '%s.', 'Weather at %s.', 'At %s.', 'In %s.'], 'request': ['Which city are you interested in?', 'Which place?']}, 'datetime': {'inform': ['Weather %s', '%s.', 'I am interested in %s.']... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.