code stringlengths 101 5.91M |
|---|
class GatewayRandomDataGen(GatewayOperator):
def __init__(self, handle: str, region: str, input_queue: GatewayQueue, output_queue: GatewayQueue, error_event, error_queue: Queue, chunk_store: ChunkStore, size_mb: int, n_processes: Optional[int]=1):
super().__init__(handle, region, input_queue, output_queue, ... |
class Function_Order(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'Order', conversions=dict(), latex_name='\\mathcal{O}')
def _sympy_(self, arg):
roots = arg.solve(arg.default_variable(), algorithm='sympy', multiplicities=False, explicit_solutions=True)
if (len(roots)... |
def build_lang(pairs, type):
lang = Lang()
for pair in pairs:
if type:
lang.index_words(pair['context_arr'])
lang.index_words(pair['response'], trg=True)
lang.index_words(pair['sketch_response'], trg=True)
lang.index_type(pair['deps_type'])
return lang |
class MetricCollection(Metric):
def __init__(self, metrics: List[Metric], **kwargs):
super().__init__(**kwargs)
self._metrics = metrics
def __call__(self, id_to_pred, id_to_labels):
return self._compute_metrics(id_to_pred, id_to_labels)
def _compute_metrics(self, id_to_pred, id_to_la... |
def value_with_optional_details(value, default_details=None):
if isinstance(value, dict):
assert (len(value) == 1)
(value, details) = list(value.items())[0]
else:
details = default_details
return (value, details) |
class TableauTuples_size(TableauTuples):
def __init__(self, size):
super().__init__(category=Sets())
self._size = size
def __contains__(self, t):
if isinstance(t, self.element_class):
return (self.size() == t.size())
elif (TableauTuples.__contains__(self, t) or isinst... |
_as_last_axis()
def denoise_nl_means(image, patch_size=7, patch_distance=11, h=0.1, fast_mode=True, sigma=0.0, *, preserve_range=False, channel_axis=None):
if (channel_axis is None):
multichannel = False
image = image[(..., np.newaxis)]
else:
multichannel = True
ndim_no_channel = (im... |
def make_visualizer(cfg, split='test'):
module = '.'.join(['lib.visualizers', cfg.task])
path = os.path.join('lib/visualizers', (cfg.task + '.py'))
visualizer = imp.load_source(module, path).Visualizer(split)
return visualizer |
class Callables():
def __init__(self):
self._callbacks = []
def callbacks(self):
self._flush()
return self._callbacks
def append(self, callback):
try:
callback_ref = (weakref.ref(callback.__func__), weakref.ref(callback.__self__))
except AttributeError:
... |
(auto_optimize=True)
def gesummv_shared(alpha: dc.float64, beta: dc.float64, A: dc.float64[(M, N)], B: dc.float64[(M, N)], x: dc.float64[N], y: dc.float64[M]):
y[:] = (((alpha * A) x) + ((beta * B) x)) |
def check_negative_indices(*nodes):
for node in nodes:
if ((node is None) or ((not isinstance(node.constant_result, _py_int_types)) and (not isinstance(node.constant_result, float)))):
continue
if (node.constant_result < 0):
warning(node.pos, "the result of using negative ind... |
class BertCombined(nn.Module):
def __init__(self, num_tokens, num_labels, dropout):
super().__init__()
self.bert_wiki = BertModel.from_pretrained('bert-base-cased')
self.bert_wiki.resize_token_embeddings(num_tokens)
self.bert_pubmed = BertModel.from_pretrained('bert-base-cased')
... |
def download_file(url, DATA_DIR=''):
local_filename = url.split('/')[(- 1)]
local_filename = os.path.join(DATA_DIR, local_filename)
if os.path.exists(local_filename):
print(f'-I- file {local_filename} already exists, skipping download.')
return local_filename
with requests.get(url, strea... |
def register_Ns3MmWaveMacSchedSapUser_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacSchedSapUser const &', 'arg0')])
cls.add_method('SchedConfigInd', 'void', [param('ns3::MmWaveMacSchedSapUser::SchedConfigIndParameters const &', 'params')], is_pure_virtual=True... |
def predict(args):
device = ('cuda' if torch.cuda.is_available() else 'cpu')
vocab_json = os.path.join(args.input_dir, 'vocab.json')
test_pt = os.path.join(args.input_dir, 'test.pt')
test_loader = DataLoader(vocab_json, test_pt, 128)
vocab = test_loader.vocab
model = GRUClassifier(vocab, args.di... |
def exit_after(s):
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(s, quit_function, args=[fn.__name__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
... |
class BruteForceBLAS(BaseANN):
def __init__(self, metric, precision=numpy.float32):
if (metric not in ('angular', 'euclidean', 'hamming', 'jaccard')):
raise NotImplementedError(("BruteForceBLAS doesn't support metric %s" % metric))
elif ((metric == 'hamming') and (precision != numpy.bool... |
def test_encoder():
img_feat = torch.randn(4, 36, 2048)
seq_size = 20
ques = torch.randperm(seq_size).view(1, seq_size)
ques = ques.unsqueeze(1).repeat(4, 10, 1)
ques_len = torch.LongTensor([6, 5, 4, 3]).unsqueeze(1).repeat(1, 10)
config = {'use_hist': False, 'use_bert': False, 'img_feature_size... |
def register_Ns3WimaxPhy_methods(root_module, cls):
cls.add_constructor([param('ns3::WimaxPhy const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True)
cls.add_method('Attach', 'void', [param('ns3::Ptr< ns3... |
class TvltProcessor(ProcessorMixin):
attributes = ['image_processor', 'feature_extractor']
image_processor_class = 'TvltImageProcessor'
feature_extractor_class = 'TvltFeatureExtractor'
def __init__(self, image_processor, feature_extractor):
super().__init__(image_processor=image_processor, featu... |
class TestSinusoidPositionEncodingOp(serial.SerializedTestCase):
(positions_vec=hu.arrays(dims=[MAX_TEST_SEQUENCE_LENGTH], dtype=np.int32, elements=st.integers(1, MAX_TEST_SEQUENCE_LENGTH)), embedding_size=st.integers(1, MAX_TEST_EMBEDDING_SIZE), batch_size=st.integers(1, MAX_TEST_BATCH_SIZE), alpha=st.floats(MIN_T... |
def add_roi_Xconv1fc_gn_head(model, blob_in, dim_in, spatial_scale):
hidden_dim = cfg.FAST_RCNN.CONV_HEAD_DIM
roi_size = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
roi_feat = model.RoIFeatureTransform(blob_in, 'roi_feat', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=roi_size, sampling_ratio=c... |
_level_function()
def run_lengths(array, *, highlevel=True, behavior=None, attrs=None):
(yield (array,))
return _impl(array, highlevel, behavior, attrs) |
class Partition2(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/Bert... |
def is_deprecated(f):
with warnings.catch_warnings(record=True):
warnings.simplefilter('error')
try:
f(**{'not a kwarg': None})
except DeprecationWarning:
return True
except Exception:
pass
return False |
def to_numpy(pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return np_img |
class FairseqCriterion(_Loss):
def __init__(self, args, task):
super().__init__()
self.args = args
self.task = task
self.padding_idx = (task.target_dictionary.pad() if (task.target_dictionary is not None) else (- 100))
def add_args(parser):
pass
def build_criterion(cl... |
class NASNetworkGDAS_FRC(nn.Module):
def __init__(self, C, N, steps, multiplier, stem_multiplier, num_classes, search_space, affine, track_running_stats):
super(NASNetworkGDAS_FRC, self).__init__()
self._C = C
self._layerN = N
self._steps = steps
self._multiplier = multiplier... |
def paragraphize(tree, para_end_sentences):
book = tree.getroot()
body = book.find('.//body')
elems = [x for x in body]
new_body = ET.Element('body')
para_num = 0
start = 0
end = para_end_sentences[0]
for elem in elems:
if (elem.tag == 'header'):
new_body.append(elem)... |
class SimpleCNN(nn.Module):
def __init__(self, weight_path='simple_cnn.weights', eps_cnn=1e-05, momentum_cnn=0.05):
super(SimpleCNN, self).__init__()
weights = self.load_weight(weight_path)
self.conv1 = self.init_conv(1, 64, weights['conv1'], weights['b1'])
self.conv1_bn = nn.BatchNo... |
class Embedder(nn.Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.d_model = d_model
self.embed = nn.Embedding(vocab_size, d_model)
def forward(self, x):
return self.embed(x) |
def check_all_models_are_auto_configured():
missing_backends = []
if (not is_torch_available()):
missing_backends.append('PyTorch')
if (not is_tf_available()):
missing_backends.append('TensorFlow')
if (not is_flax_available()):
missing_backends.append('Flax')
if (len(missing_... |
class Evaluator():
def __init__(self, dataset, dirname, _type='Single_Label'):
Model = BC.Model
self.model = Model.init_from_config(dirname)
self.model.dirname = dirname
self.metrics = metrics_type[_type]
self.display_metrics = True
def evaluate(self, test_data, save_resu... |
def test_binary():
ak_array = ak.Array(np.arange(10, dtype='<u4'))
np_array = np.arange(10, dtype='>u4')
assert np.array_equal(ak_array, np_array) |
def roots_interval(f, x0):
F1 = f.base_ring()
(x, y) = f.parent().gens()
fx = F1[y](f.subs({x: F1(x0)}))
roots = fx.roots(QQbar, multiplicities=False)
result = {}
for (i, r) in enumerate(roots):
prec = 53
IF = ComplexIntervalField(prec)
CF = ComplexField(prec)
div... |
def postprocess(shards):
res = {'unsharded': _postprocess(*shards['unsharded'])}
if ('sharded' in shards):
res['sharded'] = [_postprocess(*shard) for shard in shards['sharded']]
if ('sharded_ids' in shards):
res['sharded_ids'] = shards['sharded_ids']
return res |
(repr=False)
class JSONDecodeErrorContext(FailureContext):
validation_message: str
document: str
position: int
lineno: int
colno: int
message: str
title: str = 'JSON deserialization error'
type: str = 'json_decode'
def unique_by_key(self, check_message: (str | None)) -> tuple[(str, .... |
class DeformRoIPoolingFunction(Function):
def forward(ctx, data, rois, offset, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0):
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
c... |
class BaseRegressor(Algorithm, ABC):
def __init__(self):
self.values_ = None
def fit_predict(self, *args, **kwargs) -> np.ndarray:
self.fit(*args, **kwargs)
return self.values_
def _split_vars(self, shape):
n_row = shape[0]
self.values_row_ = self.values_[:n_row]
... |
def main(argv=sys.argv[1:]):
p = argparse.ArgumentParser()
p.add_argument('cdbg_prefix', help='cdbg prefix')
p.add_argument('catlas_prefix', help='catlas prefix')
p.add_argument('input_node_list_file', help='a cdbg_ids.txt.gz file')
p.add_argument('-o', '--output-node-list-file', required=True)
... |
def run_test():
model = models.__dict__['resnet18'](pretrained=False)
numClass = 1
img_dir = './images'
split_name = 'test'
splits = [split_name]
split_file_suffix = '_list.txt'
split_files = {}
for split in splits:
split_files[split] = os.path.join((split + split_file_suffix))
... |
def compat_cfg(cfg):
cfg = copy.deepcopy(cfg)
cfg = compat_imgs_per_gpu(cfg)
cfg = compat_loader_args(cfg)
cfg = compat_runner_args(cfg)
return cfg |
def print_config(config: DictConfig, fields: Sequence[str]=('trainer', 'model', 'datamodule', 'callbacks', 'logger', 'seed'), resolve: bool=True) -> None:
style = 'dim'
tree = Tree(f':gear: CONFIG', style=style, guide_style=style)
for field in fields:
branch = tree.add(field, style=style, guide_styl... |
def test_store_model_to_hdf(simulation_verysimple, tmp_path):
simulation_state = simulation_verysimple.simulation_state
fname = (tmp_path / 'simulation_state.h5')
store_simulation_state_to_hdf(simulation_state, fname)
with h5py.File(fname) as f:
assert np.array_equal(f['simulation_state/velocity... |
def _create_dataset(name, uri, cache_dir, variables, shuffle, batch_size, no_image_normalization):
d = nnabla_pb2.Dataset()
d.name = name
d.uri = uri
if (cache_dir is not None):
d.cache_dir = cache_dir
d.shuffle = shuffle
d.batch_size = batch_size
d.variable.extend(variables)
d.n... |
class FunctionDatabase():
def __init__(self, states: List[fenics.Function], adjoints: List[fenics.Function]) -> None:
self.states = states
self.adjoints = adjoints
self.state_spaces = [x.function_space() for x in self.states]
self.adjoint_spaces = [x.function_space() for x in self.ad... |
class FPIdenticalPred(FunPred):
sig = (Constant, Constant)
code = 'fpIdentical'
type_constraints = _all_floats |
def do_inference_for_submission(helper: PredictHelper, config: PredictionConfig, dataset_tokens: List[str]) -> List[Prediction]:
path_to_model_weights = ''
cv_heading = load_model(helper, config, path_to_model_weights)
cv_preds = []
for token in dataset_tokens:
cv_preds.append(cv_heading(token))... |
class AsyncLoopContext(LoopContext):
_to_iterator = staticmethod(auto_aiter)
async def length(self):
if (self._length is not None):
return self._length
try:
self._length = len(self._iterable)
except TypeError:
iterable = [x async for x in self._iterato... |
class TemplateEngine():
_statement_re = re.compile('\\{\\% (\\w+)(.*) \\%\\}')
def __init__(self, variables: Mapping[(str, str)]) -> None:
self._variables = variables
self._global_line_counter = 0
def process(self, source: TextIO, sink: TextIO) -> None:
while True:
line =... |
def get_arg():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('method')
parser.add_argument('datatrack')
parser.add_argument('ssl_type')
parser.add_argument('i_cv', type=int)
parser.add_argument('--use_opt', action='store_true', default=False)
return parser.parse_... |
def main():
with open(args.output, 'w', encoding='utf-8') as fw:
with open(args.input, 'r', encoding='utf-8') as f:
input_str = f.read()
input_str = (('<SENTENCES>' + input_str) + '</SENTENCES>')
dom = xml.dom.minidom.parseString(input_str)
example_nodes = dom.documentEle... |
def average_pooling_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, kernel, stride=None, ignore_border=True, pad=None, channel_last=False, including_pad=True):
dy = grad_inputs[0]
x0_shape = input_shapes[0]
ctx = nn.get_current_context()
df = AveragePoolingDataGrad(ctx, kernel, strid... |
def log(s, elapsed=None):
line = ('=' * 40)
print(line)
print(secondsToStr(), '-', s)
if elapsed:
print('Elapsed time:', elapsed)
print(line)
print() |
def replace_abbreviations(text):
return replace_with_separator(text, SEPARATOR, [AB_SENIOR, AB_ACRONYM]) |
def hp_params(trial):
if is_optuna_available():
if isinstance(trial, optuna.Trial):
return trial.params
if is_ray_available():
if isinstance(trial, dict):
return trial
raise RuntimeError(f'Unknown type for trial {trial.__class__}') |
def gram_matrix(x):
(b, ch, h, w) = x.size()
features = x.view(b, ch, (w * h))
features_t = features.transpose(1, 2)
gram = (features.bmm(features_t) / ((ch * h) * w))
return gram |
def save_params(net, best_map, current_map, epoch, save_interval, prefix):
current_map = float(current_map)
if (current_map > best_map[0]):
best_map[0] = current_map
net.save_parameters('{:s}_best.params'.format(prefix, epoch, current_map))
with open((prefix + '_best_map.log'), 'a') as f... |
def calculate_weight_compression(model):
float_size = 0
fxp_size = 0
for m in model.modules():
if (isinstance(m, layers.Quantization) and m.is_coefficient()):
float_size += m.get_float_size()
fxp_size += m.get_fxp_size()
return (float_size / fxp_size) |
def test_compute_fitness_values_statement_coverage_non_empty_file(subject_properties_mock, executor_mock, trace_mock, plus_test_with_object_assertion):
module_name = 'tests.fixtures.linecoverage.plus'
tracer = ExecutionTracer()
tracer.get_subject_properties().existing_lines = _get_lines_data_for_plus_module... |
class OctConv(nn.Module):
def __init__(self, num_in, num_out, alphax, alphay, ks=3, pd=1, hasbias=True):
super(OctConv, self).__init__()
self.In_H = int((num_in * alphax))
self.In_L = (num_in - self.In_H)
self.Out_H = int((num_out * alphay))
self.Out_L = (num_out - self.Out_H... |
def register_Ns3McpsDataRequestParams_methods(root_module, cls):
cls.add_constructor([param('ns3::McpsDataRequestParams const &', 'arg0')])
cls.add_constructor([])
cls.add_instance_attribute('m_dstAddr', 'ns3::Mac16Address', is_const=False)
cls.add_instance_attribute('m_dstAddrMode', 'ns3::LrWpanAddress... |
def get_embedder(multires, i=0):
if (i == (- 1)):
return (nn.Identity(), 3)
embed_kwargs = {'include_input': False, 'input_dims': 2, 'max_freq_log2': (multires - 1), 'num_freqs': multires, 'log_sampling': True, 'periodic_fns': [torch.sin, torch.cos]}
embedder_obj = Embedder(**embed_kwargs)
embed... |
class Embed(Module):
def __init__(self, n_inputs, n_features, w_init=None, fix_parameters=False):
if (w_init is None):
w_init = UniformInitializer(((- np.sqrt(3.0)), np.sqrt(3)))
w_shape = (n_input, n_features)
w = nn.Variables.from_numpy_array(w_init()).apply(need_grad=(not fix_... |
class CaptionEmbeddingsHdfReader(object):
def __init__(self, qa_emb_file_path: str, in_memory: bool=False):
self.qa_emb_file_path = qa_emb_file_path
self._in_memory = in_memory
if self._in_memory:
with h5py.File(self.qa_emb_file_path, 'r') as qa_embedding_hdf:
sel... |
def slice_function_at_return(function: callable) -> list[UniqueInstruction]:
tracer = ExecutionTracer()
instrumentation = CheckedCoverageInstrumentation(tracer)
instrumentation_transformer = InstrumentationTransformer(tracer, [instrumentation])
function.__code__ = instrumentation_transformer.instrument_... |
def make_index(data_path):
rel_paths = {'development': 'TAU-urban-acoustic-scenes-2019-development', 'evaluation': 'TAU-urban-acoustic-scenes-2019-evaluation', 'leaderboard': 'TAU-urban-acoustic-scenes-2019-leaderboard'}
metadata_rel_path = os.path.join(rel_paths['development'], 'meta.csv')
setup_paths = {}... |
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) |
def normalize_embeddings(emb, types, mean=None):
for t in types.split(','):
if (t == ''):
continue
if (t == 'center'):
if (mean is None):
mean = emb.mean(0, keepdim=True)
emb.sub_(mean.expand_as(emb))
elif (t == 'renorm'):
emb.d... |
def validate_es_referenciacatastral(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(referenciacatastral.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
... |
class Conv2dStaticSamePadding(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, groups=1, dilation=1, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, bias=bias, groups=groups)
self.stride = s... |
_utils.test(arch=supported_archs_taichi_ndarray)
def test_ndarray_matrix_numpy_io():
n = 5
m = 2
x = ti.Vector.ndarray(n, ti.i32, (m,))
x_np = (1 + np.arange((n * m)).reshape(m, n).astype(np.int32))
x.from_numpy(x_np)
assert (x_np.flatten() == x.to_numpy().flatten()).all()
k = 2
x = ti.M... |
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=9, help='seed for reproducibility')
parser.add_argument('--input_data_dir', type=str, default='rule_classifier_data', help='base directory for the data')
parser.add_argument('--data_split', type=str, def... |
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t().type_as(target)
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).floa... |
def add_node_to_G(G, node):
G.add_node(node['nid'], id=node['id'], type=node['node_type'], question=node['question_node'], function=node['function']) |
def to_dag(C_in, gene, reduction):
dag = nn.ModuleList()
for edges in gene:
row = nn.ModuleList()
for (op_name, s_idx) in edges:
stride = (2 if (reduction and (s_idx < 2)) else 1)
op = ops.OPS[op_name](C_in, stride, True)
if (not isinstance(op, ops.Identity)):... |
def load_results(model: str):
model_results_path = (model + '_outputs.pkl')
with open(model_results_path, 'rb') as f:
results = pkl.load(f)
sequences = results['primary']
predictions = postprocess(results['prediction'])
true_values = postprocess(results['log_fluorescence'])
num_mutations... |
def load_test_suite(inputs):
import platform
import unittest
from lit.LitTestCase import LitTestCase
litConfig = LitConfig.LitConfig(progname='lit', path=[], quiet=False, useValgrind=False, valgrindLeakCheck=False, valgrindArgs=[], noExecute=False, debug=False, isWindows=(platform.system() == 'Windows')... |
def mk_type_name(type: CairoType, open_namespaces: List[ScopedName]) -> str:
sep_char = 's'
pointer_char = ''
if isinstance(type, TypeTuple):
return mk_tuple_name(type, open_namespaces)
elif isinstance(type, TypeStruct):
return get_name_in_open_scopes(type.scope, open_namespaces).replace... |
def writeFile(filename, points, ANBtype, SNBtype, SNAtype, ODItype, APDItype, FHItype, FMAtype, mwtype):
f = open(filename, 'w')
for point in points:
f.write((str(point) + '\n'))
f.write((ANBtype + '\n'))
f.write((SNBtype + '\n'))
f.write((SNAtype + '\n'))
f.write((ODItype + '\n'))
f... |
class Tagger(Model):
def __init__(self, hparams):
super(Tagger, self).__init__(hparams)
self._comparsion = {Task.conllner: 'max', Task.wikiner: 'max', Task.udpos: 'max'}[self.hparams.task]
self._selection_criterion = {Task.conllner: 'val_f1', Task.wikiner: 'val_f1', Task.udpos: 'val_acc'}[se... |
def check_similar(ref, res):
delta = np.abs((ref - res))
debug = ('avg abs err = %.10f, max abs err = %.10f' % (np.mean(delta), np.max(delta)))
assert np.allclose(ref, res), debug |
def _notebook_run(path):
notebook_dir = os.path.dirname(path)
test_ipynb = (os.path.split(path)[(- 1)] + '.test.ipynb')
args = ['jupyter', 'nbconvert', '--execute', '--allow-errors', '--ExecutePreprocessor.timeout=-1', '--to', 'notebook', '--output', test_ipynb, path]
subprocess.check_call(args)
arg... |
_cache(maxsize=1000)
def measure_entangled_state_with_cache_density(state: Tuple[Tuple[complex]], state_index: int, num_states: int) -> Tuple[(array, array, float)]:
state = array(state)
projector0 = [1]
projector1 = [1]
for i in range(num_states):
if (i == state_index):
projector0 =... |
def to_mido_meta_track(music: 'Music') -> MidiTrack:
meta_track = MidiTrack()
if (music.metadata.title is not None):
meta_track.append(MetaMessage('track_name', name=music.metadata.title))
for tempo in music.tempos:
meta_track.append(to_mido_tempo(tempo))
for key_signature in music.key_s... |
def run_experiment_lite(stub_method_call=None, batch_tasks=None, exp_prefix='experiment', exp_name=None, log_dir=None, script='scripts/run_experiment_lite.py', python_command='python', mode='local', dry=False, docker_image=None, aws_config=None, env=None, variant=None, use_gpu=False, sync_s3_pkl=False, sync_s3_png=Fals... |
class ParallelTextAndSchemaInputPipeline(ParallelTextInputPipeline):
def default_params():
params = ParallelTextInputPipeline.default_params()
params.update({'schema_loc_files': []})
return params
def _build_schema_lookup_tables(self):
schema_loc_files = self.params['schema_loc_f... |
def combine_stains(stains, conv_matrix):
stains = dtype.img_as_float(stains.astype('float64')).astype('float32')
logrgb2 = np.dot((- np.reshape(stains, ((- 1), 3))), conv_matrix)
rgb2 = np.exp(logrgb2)
return rescale_intensity(np.reshape((rgb2 - 2), stains.shape), in_range=((- 1), 1)) |
class CarModel(Enum):
FordEscort = 'FordEscort'
BMW320i = 'BMW320i'
VWVanagon = 'VWVanagon' |
def test_ai_config_file_not_exists(workspace):
config_file = workspace.get_path('ai_settings.yaml')
ai_config = AIConfig.load(str(config_file))
assert (ai_config.ai_name == '')
assert (ai_config.ai_role == '')
assert (ai_config.ai_goals == [])
assert (ai_config.api_budget == 0.0)
assert (ai_... |
def _model_data(model_type, sparse):
emb_dim = 16
sparse_support = (GCN, APPNP, GAT, RGCN)
if (sparse and (model_type not in sparse_support)):
pytest.skip(f"{model_type.__name__} doesn't support/use sparse=True")
if (model_type in (GCN, APPNP, GAT, PPNP)):
G = example_graph_random()
... |
class DiagramAlgebra(CombinatorialFreeModule):
def __init__(self, k, q, base_ring, prefix, diagrams, category=None):
self._prefix = prefix
self._q = base_ring(q)
self._k = k
self._base_diagrams = diagrams
cat = AssociativeAlgebras(base_ring.category()).FiniteDimensional().Wit... |
def download_dds_results(download_dir='dds_results'):
os.makedirs(download_dir, exist_ok=True)
train_fname = os.path.join(download_dir, 'dds_results_2.5M.npy')
if (not os.path.exists(train_fname)):
_download(DDS_RESULTS_TRAIN_URL, train_fname)
with open(train_fname, 'rb') as f:
(... |
def test_none_statement_delta(test_case_mock):
statement = stmt.NoneStatement(test_case_mock)
statement.delta()
assert (statement.value is None) |
def _gen_instance_module(fields):
s = '\nfrom copy import deepcopy\nimport torch\nfrom torch import Tensor\nimport typing\nfrom typing import *\n\nimport detectron2\nfrom detectron2.structures import Boxes, Instances\n\n'
(cls_name, cls_def) = _gen_instance_class(fields)
s += cls_def
return (cls_name, s... |
def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info((' name = %s, sha... |
def mk_lean_core_import_path(file_name: str):
if (file_name[0] == '.'):
return file_name
return ('starkware.cairo.lean.semantics.soundness.' + file_name) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add... |
def write_rst(kind, name, project, path):
file_name = transfer_filename(name)
with open(os.path.join(path, 'cpp', project, (kind + '.rst')), 'a') as rst:
rst.write((((('\t' + kind) + '/') + file_name) + '.rst\n'))
rst.close()
with open(os.path.join(path, 'cpp', project, kind, (file_name + '.... |
def test_sg_filter_2d():
x = np.array([[1.0, 2.0, 1.0], [2.0, 4.0, 2.0]])
expected = np.array([[1.0, (4.0 / 3), 1.0], [2.0, (8.0 / 3), 2.0]])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, expected)
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
assert_allclose(y, expected... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.