code stringlengths 101 5.91M |
|---|
def test_fit_digraph_joblib_multiprocessing(digraph_logistic_regression):
classifiers = {'b': {'classifier': LogisticRegression()}, 'c': {'classifier': LogisticRegression()}}
digraph_logistic_regression.n_jobs = 2
nx.set_node_attributes(digraph_logistic_regression.hierarchy_, classifiers)
digraph_logist... |
class ViTMAEForPreTraining(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def CloseExpression(clean_lines, linenum, pos):
line = clean_lines.elided[linenum]
startchar = line[pos]
if (startchar not in '({[<'):
return (line, clean_lines.NumLines(), (- 1))
if (startchar == '('):
endchar = ')'
if (startchar == '['):
endchar = ']'
if (startchar == '... |
class Sphere(Shape):
node_type = 'goos.shape.sphere'
def __init__(self, pos: goos.Function, radius: goos.Function, rot: goos.Function=None, material: goos.material.Material=None) -> None:
if (rot is None):
rot = goos.Constant([0, 0, 0])
super().__init__([pos, radius, rot])
se... |
def register_Ns3UanPhyCalcSinr_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UanPhyCalcSinr const &', 'arg0')])
cls.add_method('CalcSinrDb', 'double', [param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', ... |
def read_files(filenames, conversion, log):
trees = []
for filename in filenames:
with open(filename, encoding='utf-8') as fin:
text = fin.read()
try:
tree = conversion(text)
if (tree is not None):
trees.append(tree)
except ValueError a... |
class TextQuestionAnsweringTool(PipelineTool):
default_checkpoint = 'google/flan-t5-base'
description = 'This is a tool that answers questions related to a text. It takes two arguments named `text`, which is the text where to find the answer, and `question`, which is the question, and returns the answer to the ... |
def get_top_videos(vid_fp, out_fp, most_recent_num=30, num_keep=10):
chan_vid_l = collections.defaultdict(list)
bad_vid_c = 0
for line in open(vid_fp):
try:
(chan_id, vid_id, views, date_raw, date_posted) = line.strip('\n').split('\t')[0:5]
except:
bad_vid_c += 1
... |
class ProtocolClient(Protocol):
_req_sent = None
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
self._req_sent = None
def can_take_request(self):
return (self._req_sent is None)
def waiting_for_re... |
def convert_emotion_to_tokens(emotion_list, emotion_type='Emotion', SELECTED_EMOTION_TO_TOKENS={'Emotion': CPED_EMOTION_TO_TOKENS, 'Sentiment': CPED_SENTIMENT_TO_TOKENS}):
emotion_tokens_list = []
for emo in emotion_list:
if (emo not in SELECTED_EMOTION_TO_TOKENS[emotion_type]):
emotion_toke... |
class SummedProbabilities(ObservableBase):
def __init__(self, num_qubits: int, one_state=False, full_sum: bool=True, include_identity: bool=True) -> None:
super().__init__(num_qubits)
self.one_state = one_state
self.full_sum = full_sum
self.include_identity = include_identity
def... |
class SyncBatchNorm2d(Module):
def __init__(self, num_features, eps=1e-05, momentum=0.9, last_gamma=False):
super(SyncBatchNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.last_gamma = last_gamma
self.weight = Para... |
def git_version(cwd):
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if (v is not None):
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(c... |
class PyPIRPCLocator(Locator):
def __init__(self, url, **kwargs):
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
return set(self.client.list_packages())
def _get_project(self, na... |
def create_instances_csv(num_props: int=15, path: str='mnistfc_instances.csv'):
nets = ['mnist-net_256x2.onnx', 'mnist-net_256x4.onnx', 'mnist-net_256x6.onnx']
props = [f'prop_{i}_0.03.vnnlib' for i in range(num_props)]
props += [f'prop_{i}_0.05.vnnlib' for i in range(num_props)]
with open(path, 'w') as... |
class IndicatorColumn(FeatureColumn):
def __init__(self, category_column=None, name=''):
if (category_column is not None):
assert isinstance(category_column, CategoryColumn)
self.category_column = category_column
self.name = name
def get_field_desc(self):
if (self.cat... |
.parametrize('world_size', [1, 2, 3, 4, 5, 6, 7, 8])
def test_distributed_sampler(world_size):
sampler = [[1, 2, 3], [4, 5, 6, 7], [8], [9, 10]]
ddp_indices = []
for rank in range(world_size):
ddp_sampler = DistributedBatchSamplerWrapper(sampler, world_size, rank)
ddp_indices += _merge_batch... |
class TFT5Model(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class CenterlossFunc(Function):
def forward(ctx, feature, label, centers, batch_size):
ctx.save_for_backward(feature, label, centers, batch_size)
centers_batch = centers.index_select(0, label.long())
return (((feature - centers_batch).pow(2).sum() / 2.0) / batch_size)
def backward(ctx, g... |
_module()
class ToTensor():
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})' |
def register_Ns3LteDataRadioBearerInfo_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteDataRadioBearerInfo const &', 'arg0')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_instance_attribute('m_drbIdentity', 'uint8_t', is_const=False)
... |
def main():
parser = argparse.ArgumentParser(prog='genienlp')
subparsers = parser.add_subparsers(dest='subcommand')
for subcommand in subcommands:
(helpstr, get_parser, command_fn) = subcommands[subcommand]
get_parser(subparsers.add_parser(subcommand, help=helpstr))
argv = parser.parse_a... |
def register_coco_panoptic_annos_sem_seg(name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json):
panoptic_name = name
delattr(MetadataCatalog.get(panoptic_name), 'thing_classes')
delattr(MetadataCatalog.get(panoptic_name), 'thing_colors')
MetadataCatalog.get(panoptic_nam... |
def parseArgs():
args = sys.argv
linum = int(args[1])
argstring = ''
configname = 'tfconfig'
with open(configname, 'r') as rf:
for (i, line) in enumerate(rf):
argstring = line
if ((i + 1) == linum):
print(line)
break
argparser = arg... |
def load_image(path_to_image):
image = cv2.imread(path_to_image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return np.array(image).astype('float') |
def img_callback(msg):
global img_processed, lane_mid_error
cv_image = bridge.imgmsg_to_cv2(msg, 'rgb8')
(img_processed, lane_mid_error) = pipeline(cv_image)
img_processed.header.stamp = rospy.Time.now()
img_processed_pub.publish(img_processed) |
def log_variant(log_file, variant_data):
mkdir_p(os.path.dirname(log_file))
with open(log_file, 'w') as f:
json.dump(variant_data, f, indent=2, sort_keys=True, cls=MyEncoder) |
class ConditionalConvTemporalGraphical(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, num_node=17, bias=True):
super().__init__()
self.E = torch.nn.Parameter(torch.FloatTensor(1, num_node, num_node), requires_grad=True)
... |
def open_read_sentences(filename, zip_filename):
with zipfile.ZipFile(zip_filename) as zin:
with zin.open(filename) as fin:
test_sentences = read_sentences(fin)
print(('Read %d texts from %s - %s' % (len(test_sentences), zip_filename, filename)))
return test_sentences |
def fbank(*args, **kwargs):
kwargs['model_config'] = os.path.join(os.path.dirname(__file__), 'fbank.yaml')
return baseline_local(*args, **kwargs) |
class GCPServer(Server):
def __init__(self, region_tag: str, instance_name: str, key_root: PathLike=(key_root / 'gcp'), log_dir=None, ssh_private_key=None):
super().__init__(region_tag, log_dir=log_dir)
assert (self.region_tag.split(':')[0] == 'gcp'), f"Region name doesn't match pattern gcp:<region>... |
class DayOfYear(TimeFeature):
def __call__(self, idx: pd.DatetimeIndex) -> np.ndarray:
return self.process((idx.dayofyear - 1))
def _max_val(self):
return 365.0 |
class EMAWarmup():
def __init__(self, inv_gamma=1.0, power=1.0, min_value=0.0, max_value=1.0, start_at=0, last_epoch=0):
self.inv_gamma = inv_gamma
self.power = power
self.min_value = min_value
self.max_value = max_value
self.start_at = start_at
self.last_epoch = last... |
class MPNetTokenizerFast():
def __init__(self, *args, **kwargs):
requires_tokenizers(self)
def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) |
def test_prefit_split_same_results() -> None:
est = RandomForestClassifier(random_state=random_state).fit(X_train, y_train)
mapie_cal_prefit = MapieCalibrator(estimator=est, cv='prefit')
mapie_cal_prefit.fit(X_calib, y_calib)
mapie_cal_split = MapieCalibrator(estimator=RandomForestClassifier(random_stat... |
def get_activation(name):
if (name == 'silu'):
return nn.SiLU
elif (name == 'relu'):
return nn.ReLU
elif (name == 'leaky_relu'):
return nn.LeakyReLU
else:
raise NotImplementedError(f'Unknown activation: {name}') |
class GaussianEmitter(EmitterBase):
def __init__(self, archive, x0, sigma0, bounds=None, batch_size=64, seed=None):
self._rng = np.random.default_rng(seed)
self._batch_size = batch_size
self._x0 = np.array(x0, dtype=archive.dtype)
self._sigma0 = (archive.dtype(sigma0) if isinstance(s... |
def exact_one_model(f):
models = get_models(f, k=2)
if isinstance(models, list):
return (len(models) == 1)
else:
return False |
def NonlinearRHS(self, U, U_hat, dU, **params):
global TV, curl_hat, curl_, P_hat, W
curl_hat = project(curl(U_hat), TV, output_array=curl_hat)
curl_ = TV.backward(curl_hat, curl_)
U = U_hat.backward(U)
W[:] = np.cross(U, curl_, axis=0)
dU = project(W, TV, output_array=dU)
P_hat = A.solve(in... |
def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules):
if (not inplace):
model = copy.deepcopy(model)
if all((isinstance(module_element, str) for module_element in modules_to_fuse)):
_fuse_modules(model, modules_to_fuse, fuser_func)
else:
for module_l... |
def all_features():
return [sagemath_doc_html(), sage__combinat(), sage__geometry__polyhedron(), sage__graphs(), sage__groups(), sage__libs__braiding(), sage__libs__ecl(), sage__libs__flint(), sage__libs__gap(), sage__libs__linbox(), sage__libs__m4ri(), sage__libs__ntl(), sage__libs__pari(), sage__libs__singular(),... |
def check_model_doc(overwrite=False):
with open(PATH_TO_TOC, encoding='utf-8') as f:
content = yaml.safe_load(f.read())
api_idx = 0
while (content[api_idx]['title'] != 'API'):
api_idx += 1
api_doc = content[api_idx]['sections']
model_idx = 0
while (api_doc[model_idx]['title'] != ... |
class TestDAVIS(torch.utils.data.Dataset):
def __init__(self, root, year, split):
self.root = root
self.year = year
self.split = split
self.init_data()
def read_img(self, path):
pic = Image.open(path).convert('RGB')
transform = tv.transforms.ToTensor()
ret... |
def build_inputs(subproblems: T.Iterable[SubProblem], shared_inputs: T.Optional[T.Element]=None) -> Values:
inputs = Values()
if (shared_inputs is not None):
inputs['shared_inputs'] = shared_inputs
for subproblem in subproblems:
if subproblem.inputs:
inputs[subproblem.name] = sub... |
def parse_args():
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmsegmentation format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('aug_path', help='pascal voc aug path')
parser.add_argument('-o', '--out_dir', help='output path... |
def generate_sql_str(sql_label, cols):
sel_col = sql_label['sel']
sel_agg = sql_label['agg']
conds = sql_label['conds']
if (AGG_MAP[sel_agg] != ''):
select_str = ((('select ' + AGG_MAP[sel_agg]) + ' ') + cols[sel_col])
else:
select_str = ('select ' + cols[sel_col])
cond_strs = []... |
def str2bool(v):
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise ArgumentTypeError('Boolean value expected.') |
def getTensorRef(tensor: np.ndarray, problem_size: cutlass.gemm.GemmCoord, operand: str, layout: cutlass.layout, batch_offset: int=0):
ptr = tensor.__array_interface__['data'][0]
if (operand == 'a'):
tensor_coord = problem_size.mk()
batch_stride = (problem_size.m() * problem_size.k())
elif (... |
def ToGraph_PUNGraph(Table, SrcCol, DstCol, AggrPolicy):
return _snap.ToGraph_PUNGraph(Table, SrcCol, DstCol, AggrPolicy) |
class _RFCN(nn.Module):
def __init__(self, classes, class_agnostic):
super(_RFCN, self).__init__()
self.classes = classes
self.n_classes = len(classes)
self.class_agnostic = class_agnostic
self.RCNN_loss_cls = 0
self.RCNN_loss_bbox = 0
self.box_num_classes = (... |
def eval_policy(policy, env_name, seed, mean, std, seed_offset=100, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed((seed + seed_offset))
avg_reward = 0.0
for _ in range(eval_episodes):
(state, done) = (eval_env.reset(), False)
while (not done):
state = ((np.ar... |
def cg(opfunc, x, config, state=None):
if ((config is None) and (state is None)):
raise ValueError('cg requires a dictionary to retain state between iterations')
state = (state if (state is not None) else config)
rho = config.get('rho', 0.01)
sig = config.get('sig', 0.5)
_int = config.get('i... |
class ResnetBlockFC(nn.Module):
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
if (size_out is None):
size_out = size_in
if (size_h is None):
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
... |
def plot_sensitivity(ax, alg, alphas, sp, best_performance, stderr, exp_attrs, second_time=False):
alpha = 1.0
if PLOT_RERUN_AND_ORIG:
alpha = (1.0 if second_time else 0.5)
lbl = f'{alg}'
ax.set_xscale('log', basex=2)
color = ('blue' if sp else 'red')
if (sp not in [0.0, 1.0]):
a... |
class DonutProcessorTest(unittest.TestCase):
def setUp(self):
self.processor = DonutProcessor.from_pretrained(DONUT_PRETRAINED_MODEL_NAME)
def test_token2json(self):
expected_json = {'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nickname... |
def test_Rule_is_valid():
class FakeRuleManager():
def __init__(self):
pass
def get_memory_manager(self):
return 0.5
def fake_condition(val1, val2, args):
assert args['exist']
return (val1 < 0.5)
condition_args = {'exist': True}
rule = Rule(1, None... |
class InfinityCrystalOfRiggedConfigurations(UniqueRepresentation, Parent):
def __classcall_private__(cls, cartan_type):
from sage.combinat.root_system.type_folded import CartanTypeFolded
if isinstance(cartan_type, CartanTypeFolded):
return InfinityCrystalOfNonSimplyLacedRC(cartan_type)
... |
class TestAppendList(list):
def append(self):
raise NotImplementedError
def extend(self):
raise NotImplementedError |
def build_bag_multi(slideID, class_label, inst_num, max_bag):
patch_dir_x10 = f'../Lymphoma/patches/x10'
patch_dir_x20 = f'../Lymphoma/patches/x20'
patch_i_list = patch_map[slideID]
patch_num = len(patch_i_list)
bag_num = int((patch_num / inst_num))
if (bag_num < max_bag):
max_bag = bag_... |
class TransformerWithToken(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, return_intermediate_dec=False):
super().__init__()
self.token = nn.Parameter(torch.randn(1, 1, d_m... |
def test_reset(stopping_condition, individual_1):
stopping_condition.after_search_iteration(individual_1)
assert (stopping_condition.current_value() == 40)
stopping_condition.reset()
assert (stopping_condition.current_value() == 0) |
class Visualizer():
def __init__(self, opt):
self.opt = opt
self.use_html = (opt.isTrain and (not opt.no_html))
self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, 'logs', opt.name))
self.win_size = opt.display_winsize
self.name = opt.name
self.saved = False... |
class PreprocessLoader(data.Dataset):
def __init__(self, scene_name='BasementSittingBooth', with_bps=False):
self.scene_name = scene_name
print('[INFO] {} scene selected'.format(self.scene_name))
self.with_bps = with_bps
if self.with_bps:
self.scene_bps = []
s... |
def dim_size_access(dim):
if dim.dynamic:
return 'this.{}'.format(dim.size_str)
return dim.size_str |
def stage_users(args, min_unix_time, max_unix_time):
codes_by_unix_time = {}
logging.info('Processing user file from {}...'.format(args.user_stats_path))
with open(args.user_stats_path, 'r') as infile:
for line in infile:
if (line[0:2] != '20'):
continue
parts... |
def test_should_raise_error_if_grad_cam_layer_cannot_be_found():
model = tf.keras.Sequential([tf.keras.layers.Dense(10, input_shape=(10,), name='dense_1'), tf.keras.layers.Dense(1, name='dense_2')])
with pytest.raises(ValueError):
layer_name = GradCAM.infer_grad_cam_target_layer(model) |
def register_Ns3PacketTagIterator_methods(root_module, cls):
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
cls.add_method('HasNext', 'bool', [], is_const=True)
cls.add_method('Next', 'ns3::PacketTagIterator::Item', [])
return |
class UniDAEvaluator(DatasetEvaluator):
def __init__(self, n_source_classes, norm=True) -> None:
self.ground_truths = []
self.predicts = []
self.predicts_without_ood = []
self.features = []
self.iid_scores = []
self.n_source_classes = n_source_classes
self.nor... |
def get_param_space(trial):
trial.suggest_float('learning_rate', 0.0001, 0.001, log=True)
trial.suggest_float('lr_decay_rate', 0.7, 1.0, log=True)
trial.suggest_categorical('batch_size', [16, 32, 64, 128])
trial.suggest_categorical('weight_decay', [1e-06, 1e-07, 0])
trial.suggest_categorical('use_re... |
def test_dependency_parse(doc_pb):
sentence = doc_pb.sentence[0]
assert (sentence.basicDependencies.ByteSize() > 0)
assert (sentence.enhancedDependencies.ByteSize() > 0)
assert (sentence.enhancedPlusPlusDependencies.ByteSize() > 0)
tree = sentence.enhancedPlusPlusDependencies
isinstance(tree, De... |
def add_generic_rpn_outputs(model, blob_in, dim_in, spatial_scale_in):
loss_gradients = None
if cfg.FPN.FPN_ON:
FPN.add_fpn_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if cfg.MODEL.FASTER_RCNN:
model.CollectAndDistributeFpnRpnProposals()
if model.train:
... |
class Test_starTemperature(TestCase):
def test_works_sun(self):
answer = (5800 * aq.K)
result = eq.estimateStellarTemperature((1 * aq.M_s))
self.assertAlmostEqual(answer, result, 0)
def test_works_hd189(self):
answer = (4939 * aq.K)
result = eq.estimateStellarTemperature(... |
def test_legal_action():
key = jax.random.PRNGKey(0)
state = init(key=key)
assert (state.legal_action_mask == jnp.bool_([1, 1, 0])).all()
state = step(state, CALL)
assert (state.legal_action_mask == jnp.bool_([1, 1, 0])).all()
state = step(state, RAISE)
assert (state.legal_action_mask == jnp... |
def edgeIntersection(baseX, baseY, dX, dY, n1X, n1Y, n2X, n2Y):
t = ((((dX * n1Y) + (dY * n2X)) - (dX * n2Y)) - (dY * n1X))
c = ((((n2X * n1Y) - (n1X * n2Y)) + (baseX * (n2Y - n1Y))) + (baseY * (n1X - n2X)))
if (t == 0):
return (0, 0, 0, 0)
alpha = (c / t)
if (alpha < 0):
return (0, ... |
def format_5core(in_json, out_csv, label01=True):
records = []
for line in open(in_json, 'r'):
record = json.loads(line)
records.append(record)
out_df = pd.DataFrame()
out_df[UID] = [r['reviewerID'] for r in records]
out_df[IID] = [r['asin'] for r in records]
out_df[LABEL] = [r['... |
def extract_features(extractor: Callable, dataloader: DataLoader, device: str) -> List[np.ndarray]:
all_feats = []
for imgs in tqdm(dataloader, desc='Extracting features'):
imgs = torch.stack(imgs).to(device)
feats = extractor(imgs).cpu().numpy()
all_feats.extend(feats)
return np.sta... |
.parametrize('block,batch_norm_fix', [(BasicBlock, True), (Bottleneck, False), (RevBasicBlock, False), (RevBottleneck, True)])
def test_resnet(block, batch_norm_fix):
model = ResNet(block, [2, 2, 2, 2], num_classes=2, channels_per_layer=None, init_max_pool=True, batch_norm_fix=batch_norm_fix, strides=None)
mode... |
def constrained_birkhoff_von_neumann_iterator(H, X):
(G, p) = H.pop(0)
eligible_edges = [(from_node, to_node, edge_attributes) for (from_node, to_node, edge_attributes) in G.edges(data=True) if all((((i < edge_attributes['weight']) or (edge_attributes['weight'] < i)) for i in range(0, int((math.floor(sum(sum(X)... |
class OAConvolve(Benchmark):
param_names = ['mode', 'size']
params = [['full', 'valid', 'same'], [(a, b) for (a, b) in product((40, 200, 3000), repeat=2) if (b < a)]]
def setup(self, mode, size):
rng = np.random.default_rng(1234)
self.a = rng.standard_normal(size[0])
self.b = rng.sta... |
class ResNet_C5_Head(res.ResNet):
def __init__(self, dim_in, spatial_scale, norm='bn'):
super().__init__()
self.dim_in = dim_in[(- 1)]
if cfg.BACKBONE.RESNET.USE_ALIGN:
block = res.AlignedBottleneck
elif cfg.BACKBONE.RESNET.BOTTLENECK:
block = res.Bottleneck
... |
_operation
def abs(a: torch.Tensor):
if is_real(a):
raise ValueError('Last dimension must have length 2.')
return torch.sqrt(abs_sqr(a)) |
def get_dataset(dataset_name):
keys = dict()
keys['dataset'] = [dataset_name]
announce_msg('Generate configs for {} dataset'.format(keys['dataset']))
assert (len(keys['dataset']) == 1), 'We work with only one dataset.....[NOT OK]'
if (keys['dataset'][0] == constants.GLAS):
t = 67.0
k... |
def add_arguments(parser):
parser.register('type', 'bool', (lambda v: (v.lower() == 'true')))
parser.add_argument('--data_dir', type=str, default='data/', help='Data directory')
parser.add_argument('--model_dir', type=str, default='inference_ckpt_example/', help='Model directory')
parser.add_argument('-... |
def load_session(pathSession):
pathEmo = (pathSession + '/dialog/EmoEvaluation/')
pathWavFolder = (pathSession + '/sentences/wav/')
improvisedUtteranceList = []
for emoFile in [f for f in os.listdir(pathEmo) if os.path.isfile(os.path.join(pathEmo, f))]:
for utterance in load_utterInfo((pathEmo +... |
def register_Ns3WimaxPhy_methods(root_module, cls):
cls.add_constructor([param('ns3::WimaxPhy const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True)
cls.add_method('Attach', 'void', [param('ns3::Ptr< ns3... |
class TestConfigurable(TestCase):
def test_from_config(self):
hello = MyClass.from_config({'a': 1})
self.assertEqual(hello.a, 1)
def test_to_config(self):
hello = MyClass(a=1)
self.assertEqual(hello.get_config(), {'hello': {'a': 1}}) |
class Task():
MAX_LENGHT_PER_BATCH = None
valid_loaders: framework.data_structures.DotDict
model_interface: ModelInterface
batch_dim: int
TRAIN_NUM_WORKERS = 1
VALID_NUM_WORKERS = 1
train_set: torch.utils.data.Dataset
train_loader: torch.utils.data.DataLoader
model: torch.nn.Module
... |
class TestTfIdf():
def setup(self):
pass
def test_fit_predict(self, logrecord_body):
params = TfIdfParams()
model = TfIdf(params)
assert isinstance(model, TfIdf), 'not a TFIDF model'
loglines = logrecord_body['logline']
model.fit(loglines)
res = model.tran... |
def test_generic_function_raised_exceptions():
func = GenericFunction(MagicMock(), MagicMock(), {'FooError'})
assert (func.raised_exceptions == {'FooError'}) |
def create_extension(template, kwds):
from Cython.Build.Dependencies import default_create_extension
from sage.env import sage_include_directories
include_dirs = (kwds.get('include_dirs', []) + sage_include_directories(use_sources=True))
kwds['include_dirs'] = include_dirs
return default_create_exte... |
def get_hms(seconds):
(m, s) = divmod(seconds, 60)
(h, m) = divmod(m, 60)
return (h, m, s) |
def get_model_bidirectional_conditioning(batch_size, max_seq_length, input_size, hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout):
inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])
inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])
cont_train = True
... |
def main():
parser = argparse.ArgumentParser(description='OGBN-Arxiv (GNN)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--use_sage', action='store_true')
parser.add_argument('--num_layers', type=int, default=3)
... |
class ImageProperties():
def __init__(self, image: sitk.Image):
self.size = image.GetSize()
self.origin = image.GetOrigin()
self.spacing = image.GetSpacing()
self.direction = image.GetDirection()
self.dimensions = image.GetDimension()
self.number_of_components_per_pix... |
def test_label_encoder_integration_list_classifiers():
rng = np.random.RandomState(123456)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = load_dataset(encode_labels=['no', 'yes'], rng=rng)
pool_classifiers = [LogisticRegression(), SVC(probability=True)]
[clf.fit(X_train, y_train) for clf in pool_c... |
def _mean_frequency_by_risk_group(y_true, y_pred, sample_weight=None, n_bins=100):
idx_sort = np.argsort(y_pred)
bin_centers = (np.arange(0, 1, (1 / n_bins)) + (0.5 / n_bins))
y_pred_bin = np.zeros(n_bins)
y_true_bin = np.zeros(n_bins)
for (n, sl) in enumerate(gen_even_slices(len(y_true), n_bins)):
... |
class pix2pix(data.Dataset):
def __init__(self, root, transform=None, loader=default_loader, seed=None):
imgs = make_dataset(root)
if (len(imgs) == 0):
raise RuntimeError(((('Found 0 images in subfolders of: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
... |
class LightningModel(pl.LightningModule):
def __init__(self, model):
super().__init__()
self.model = model
self.num_labels = 2
self.qa_outputs = torch.nn.Linear(self.model.config.hidden_size, self.num_labels)
def forward(self):
pass |
class Regex(Token):
compiledREtype = type(re.compile('[A-Z]'))
def __init__(self, pattern, flags=0):
super(Regex, self).__init__()
if isinstance(pattern, basestring):
if (len(pattern) == 0):
warnings.warn('null string passed to Regex; use Empty() instead', SyntaxWarni... |
def calculate_source_fwhm(ekev, theta_fwhm):
wl = (1.2398e-09 / ekev)
k = (2 * np.sqrt((2 * np.log(2))))
theta_sigma = (theta_fwhm / k)
sigma0 = (wl / ((2 * np.pi) * theta_sigma))
return (sigma0 * k) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.