code stringlengths 101 5.91M |
|---|
def test_pickle_meta_evaluator():
set_seed(100)
tasks = SetTaskSampler((lambda : GarageEnv(PointEnv())))
max_path_length = 200
env = GarageEnv(PointEnv())
n_traj = 3
with tempfile.TemporaryDirectory() as log_dir_name:
runner = LocalRunner(SnapshotConfig(snapshot_dir=log_dir_name, snapsho... |
class TTableIterator(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, PTableV):
_snap.TTableIterator_swiginit(self, _snap.new_TTableIterator(PTableV))
def Next(self):
return _snap... |
def _nucombos(nutot):
nucombos = []
for nu1 in range((nutot + 1)):
for nu2 in range(((nutot + 1) - nu1)):
for nu3 in range((((nutot + 1) - nu1) - nu2)):
nu4 = (((nutot - nu1) - nu2) - nu3)
nucombos.append((nu1, nu2, nu3, nu4))
return nucombos |
class BidirectionalGRU(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout, batch_first):
super(BidirectionalGRU, self).__init__()
self.BiGRU = nn.GRU(input_size=rnn_dim, hidden_size=hidden_size, num_layers=1, batch_first=batch_first, bidirectional=True)
self.layer_norm = nn.LayerNo... |
class environment():
def __init__(self, number_steps, max_required_step, safe_zone):
self._dynamics = dynamics(number_steps, max_required_step, safe_zone)
def reward(self, phi_idx, position):
return self._dynamics.reward(phi_idx, position)
def state_transition(self, domain, phi_idx, system_r... |
class PCAOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--pca_iterations', type=int, default=250, help='number of iterations to get latent code')
self.parser.add_argument('--fake_img_size', type=int, default=512, help='spatial size for the ... |
class BaseModel(metaclass=ABCMeta):
required_baseconfig = ['learning_rate']
def _model(self, config):
raise NotImplementedError
def _forward(self, inputs, mode, config):
raise NotImplementedError
def _loss(self, outputs, inputs, config):
raise NotImplementedError
def _metrics... |
class ColorizationDataset(BaseDataset):
def modify_commandline_options(parser, is_train):
parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB')
return parser
def __init__(self, opt):
BaseDataset.__init__(self, opt)
self.dir = os.path.join(opt.dataroot, opt.phase)
... |
class KGDatasetFB15k237(KGDataset):
def __init__(self, path, name='FB15k-237'):
self.name = name
url = '
if (not os.path.exists(os.path.join(path, name))):
print('File not found. Downloading from', url)
_download_and_extract(url, path, (name + '.zip'))
self.pa... |
def get_updates_arg_preprocessing(args, kwargs):
if (len(args) > 4):
raise TypeError('`get_update` call received more arguments than expected.')
elif (len(args) == 4):
(opt, params, _, loss) = args
kwargs['loss'] = loss
kwargs['params'] = params
return ([opt], kwargs, [])... |
def get_representative_dataset(data_loader: tf.data.Dataset, n_iters: int, data_loader_key: int=0, preprocess=None):
class RepresentativeDataset():
def __init__(self, in_data_loader):
self.dl = in_data_loader
self.iter = iter(self.dl)
def __call__(self):
for _ in ... |
def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16_nre_noskip'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112... |
def load_filenames(opts):
rooms = {'smallroom': [], 'mediumroom': [], 'largeroom': []}
for room in rooms.keys():
rir_list_fn = os.path.join(opts.data_root, room, 'rir_list')
with open(rir_list_fn, 'r') as fn:
for line in fn:
rooms[room].append(line.split(' ')[4].strip... |
_start_docstrings('SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes.', SEGFORMER_START_DOCSTRING)
class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.segformer = SegformerModel(config)
... |
def max_exp_idx(exp_name):
log_dir = os.path.join('../runs', exp_name)
log_files = glob.glob('{}*'.format(log_dir))
if (len(log_files) == 0):
n = 1
else:
log_ns = [re.search('_(\\d+)(_log)?$', f).group(1) for f in log_files]
n = max(log_ns)
return int(n) |
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe)
def encode(self, line):
global bpe
ids = bpe.encode(line)
return list(map(str, ids... |
.parametrize(['energy', 'expected'], [(511.0, 1.), (255.5, 0.), (0.0, 0.0), (.0, .)])
def test_kappa_calculation(energy, expected):
kappa = util.kappa_calculation(energy)
npt.assert_almost_equal(kappa, expected) |
class BaseGrabFormat(Format):
_pillow_imported = False
_ImageGrab = None
def __init__(self, *args, **kwargs):
super(BaseGrabFormat, self).__init__(*args, **kwargs)
self._lock = threading.RLock()
def _can_write(self, request):
return False
def _init_pillow(self):
with ... |
class TFWhisperForConditionalGeneration(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def load_dblp_graph_structure_only(cf):
import dgl
g = dgl.load_graphs(f'{cf.data.data_root}{cf.data.data_name}.pt')[0][0]
return g |
class TestSpatial(PinocchioTestCase):
def test_skew(self):
v3 = rand(3)
self.assertApprox(v3, unSkew(skew(v3)))
self.assertLess(np.linalg.norm(skew(v3).dot(v3)), 1e-10)
(x, y, z) = tuple(rand(3).tolist())
M = np.array([[0.0, x, y], [(- x), 0.0, z], [(- y), (- z), 0.0]])
... |
def MIND(user_feature_columns, item_feature_columns, k_max=2, p=100, dynamic_k=False, user_dnn_hidden_units=(64, 32), dnn_activation='relu', dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-06, dnn_dropout=0, output_activation='linear', sampler_config=None, seed=1024):
if (len(item_feature_columns) > 1):
... |
def _sa_coefficients_lambda_(K, beta=0):
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.rings.power_series_ring import PowerSeriesRing
from sage.rings.rational_field import QQ
V = LaurentSeriesRing(QQ, names='v', default_prec=K)
v = V.gen()
T = PowerSeriesRing(V, names='t... |
def main(task: str, path: str, lang: str, split: str, bitext: str, alignment: str, output_path: str, name: str):
MAPPING: Dict[(str, Type[Dataset])] = {'wikiann': WikiAnnNER, 'ud': ParsingDataset, 'better-abstract': BetterDataset, 'ace': ACEDataset, 'muc': MUCDataset}
assert (task in MAPPING)
CLASS = MAPPIN... |
def main(args):
assert ((args.valid_percent >= 0) and (args.valid_percent <= 1.0))
if (not os.path.exists(args.dest)):
os.makedirs(args.dest)
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, ('**/*.' + args.ext))
rand = random.Random(args.seed)
valid_f = (open(... |
def require_ray(test_case):
if (not _has_ray):
return unittest.skip('test requires Ray/tune')(test_case)
else:
return test_case |
class AlignedPairs():
def __init__(self, quant_tokens, x, y, amr, score=0, near=(- float('inf'))):
self.quant_tokens = quant_tokens
self.quant_token_index = x
self.snt_token_index = y
self.amr = amr
self.score = score
self.near = near
def __str__(self):
re... |
def batch(data, batch_size, batch_size_fn=None, repeat=False):
if (batch_size_fn is None):
def batch_size_fn(new, count, sofar):
return count
minibatch = []
size_so_far = 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_fa... |
(frozen=True)
class TransitionMiniBatch():
observations: Union[(Float32NDArray, Sequence[Float32NDArray])]
actions: Float32NDArray
rewards: Float32NDArray
next_observations: Union[(Float32NDArray, Sequence[Float32NDArray])]
returns_to_go: Float32NDArray
terminals: Float32NDArray
intervals: F... |
def load_hr_map(data_dir):
file = join(data_dir, 'ndcg_test.pickle')
with open(file, 'rb') as f:
hr_map = pickle.load(f)
return hr_map |
def checkpoint_name(save_dir, save_name, checkpoint_name):
if checkpoint_name:
model_dir = os.path.split(checkpoint_name)[0]
if (model_dir == save_dir):
return checkpoint_name
return os.path.join(save_dir, checkpoint_name)
model_dir = os.path.split(save_name)[0]
if (model... |
def waterfall_memoized():
demand_satisfied = {}
def fn(flow_val, k, commods):
if (k in demand_satisfied):
return demand_satisfied[k]
EPS = 1e-06
demand_remaining = {commod[0]: commod[(- 1)][(- 1)] for commod in commods}
flow_remaining = flow_val
sorted_commods... |
def linearspectrogram(wav):
D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
S = (_amp_to_db(np.abs(D)) - hp.ref_level_db)
if hp.signal_normalization:
return _normalize(S)
return S |
class TraceHessianCalculatorPytorch(TraceHessianCalculator):
def __init__(self, graph: Graph, input_images: List[torch.Tensor], fw_impl, trace_hessian_request: TraceHessianRequest, num_iterations_for_approximation: int=HESSIAN_NUM_ITERATIONS):
super(TraceHessianCalculatorPytorch, self).__init__(graph=graph,... |
def sanity_check_random_collate():
features = torch.load('tmpfeat.bin')
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
args = SimpleNamespace()
args.lowercase = True
sample_slice = features[:2]
for x in sample_slice:
x.ex.candida... |
def clean_es_cif(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', split: bool=False, inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is inv... |
class ShapenetCaptionInstructDataset(ShapenetCaptionDataset):
def __getitem__(self, index):
data = super().__getitem__(index)
if (data != None):
data['text_output'] = data['text_input']
data['text_input'] = self.text_processor('')
return data |
def gen_task_seq(cl_setting, sample_n, load_pth=None):
if (load_pth is None):
ori_task_seq = ('oarlks' if (cl_setting == 'functional') else 'abcdef')
all_permutations = np.array(list(itertools.permutations(ori_task_seq, len(ori_task_seq))))
setting_indices = (np.arange(len(all_permutations))... |
def _reconstruct_persistent_obj(meta):
meta = dnnlib.EasyDict(meta)
meta.state = dnnlib.EasyDict(meta.state)
for hook in _import_hooks:
meta = hook(meta)
assert (meta is not None)
assert (meta.version == _version)
module = _src_to_module(meta.module_src)
assert (meta.type == 'cla... |
_type_check
def to_numpy(x: Any) -> Union[(Batch, np.ndarray)]:
if isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif isinstance(x, np.ndarray):
return x
elif isinstance(x, (np.number, np.bool_, Number)):
return np.asanyarray(x)
elif (x is None):
return np.... |
class ConcatDataset(Dataset):
def cumsum(sequence):
(r, s) = ([], 0)
for e in sequence:
l = len(e)
r.append((l + s))
s += l
return r
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
assert (len(datasets) > 0), 'datase... |
class Loss():
def _fw_func(target: Any, weights: Any) -> Tuple[(Any, Any)]:
return (target, weights)
def _bw_func(pred: Any) -> Any:
return pred
def fw_func(self):
return self._fw_func
def bw_func(self):
return self._bw_func
def metric_wrapper(self, metric_func: Calla... |
def test_file_operations_log(test_file: TextIOWrapper):
log_file_content = 'File Operation Logger\nwrite: path/to/file1.txt #checksum1\nwrite: path/to/file2.txt #checksum2\nwrite: path/to/file3.txt #checksum3\nappend: path/to/file2.txt #checksum4\ndelete: path/to/file3.txt\n'
test_file.write(log_file_content)
... |
def get_img_batch(files_list, secret_size, batch_size=4, size=(400, 400)):
batch_cover = []
batch_secret = []
for i in range(batch_size):
img_cover_path = random.choice(files_list)
try:
img_cover = Image.open(img_cover_path).convert('RGB')
img_cover = ImageOps.fit(img... |
def is_iterable(obj):
if isinstance(obj, tf_ops.Tensor):
return False
try:
_ = iter(obj)
except Exception:
return False
return True |
def test_single_sent_scores_dont_depend_on_newline_sep():
pred = ['Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.', 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .']
tgt = ['Margot Frank, died in 1945, a month ear... |
def get_match(embeddings, labels, ways, shots):
match_embeddings = [paddle.zeros_like(embeddings[0]) for _ in range((ways * shots))]
class_c = paddle.zeros([ways])
for i in range(len(embeddings)):
idx = int(labels.numpy()[i])
match_embeddings[((idx * shots) + class_c[idx])] += embeddings[i]
... |
def test_bad_arg_default(msg):
from pybind11_tests import debug_enabled
with pytest.raises(RuntimeError) as excinfo:
m.bad_arg_def_named()
assert (msg(excinfo.value) == ("arg(): could not convert default argument 'a: UnregisteredType' in function 'should_fail' into a Python object (type not register... |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args,... |
def kernel2(n: ti.i32) -> ti.i32:
x = 0
for i in range(n):
ti.atomic_add(x, 1)
return x |
def _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True):
assert (x.ndim == 1), 'Input needs to be 1d'
high_locations = np.where((x > high_thres))[0]
locations = (x > low_thres)
encoded_pairs = find_contiguous_regions(locations)
filtered_list = list(filter((lambda pair: ((pair[0... |
.spark
def test_history_based_fp_one_features_df(log_for_feature_gen, user_features):
history_based_fp = HistoryBasedFeaturesProcessor(user_cat_features_list=['gender'])
history_based_fp.fit(log=log_for_feature_gen, user_features=user_features)
assert isinstance(history_based_fp.item_cond_pop_proc, EmptyFea... |
class DoubleCritic(nn.Module):
hidden_dims: Sequence[int]
activations: Callable[([jnp.ndarray], jnp.ndarray)] = nn.relu
layer_norm: bool = False
def __call__(self, observations: jnp.ndarray, actions: jnp.ndarray) -> Tuple[(jnp.ndarray, jnp.ndarray)]:
critic1 = Critic(self.hidden_dims, activation... |
class HorizontalFlip(object):
def __iter__(self):
return iter([HorizontalFlip()])
def __call__(self, image):
return F.hflip(image)
def __repr__(self):
return 'HorizontalFlip()' |
class DMA_tensor_0x000__reg(atomic_reg):
OP_NAME = 'DMA_tensor(0x000)'
_fields_ = [('intr_en', ctypes.c_uint64, 1), ('stride_enable', ctypes.c_uint64, 1), ('nchw_copy', ctypes.c_uint64, 1), ('cmd_short', ctypes.c_uint64, 1), ('decompress_enable', ctypes.c_uint64, 1), ('cmd_id_en', ctypes.c_uint64, 4), ('cmd_id'... |
class Shared(nn.Module):
def __init__(self, module):
self.module = module
def call(self, x):
return self.module(x) |
def get_llm_backend(llm_name):
if (llm_name in OPENAI_CHAT_MODELS):
return langchain_openai_chatllm(llm_name)
elif (llm_name in OPENAI_LLM_MODELS):
return langchain_openai_llm(llm_name)
else:
return langchain_fastchat_llm(llm_name) |
class TestBaseSynthesizer():
def test_set_random_state(self):
instance = BaseSynthesizer()
instance.set_random_state(3)
assert isinstance(instance.random_states, tuple)
assert isinstance(instance.random_states[0], np.random.RandomState)
assert isinstance(instance.random_state... |
def test_decompress_require_lossless_no_compressed_in_tags(tensor_key, named_tensor):
tensor_codec = TensorCodec(NoCompressionPipeline())
(tensor_name, origin, round_number, report, tags) = tensor_key
tensor_key = TensorKey(tensor_name, origin, round_number, report, ('lossy_compressed',))
metadata = [{'... |
def create_loader():
dataset = create_dataset()
if (cfg.dataset.task == 'graph'):
id = dataset.data['train_graph_index']
loaders = [get_loader(dataset[id], cfg.train.sampler, cfg.train.batch_size, shuffle=True)]
delattr(dataset.data, 'train_graph_index')
else:
loaders = [get_... |
def run_target(binary_file: str, test_type: TestType) -> None:
print_log('start run', test_type.value, 'test: ', binary_file)
start_time = time.time()
assert (test_type in {TestType.CPP, TestType.PY})
if (test_type == TestType.CPP):
run_cpp_test(binary_file)
else:
run_oss_python_test... |
def test_override_paramsets_incorrect_num_parameters():
with open('validation/data/2bin_histosys_example2.json', encoding='utf-8') as spec_file:
source = json.load(spec_file)
spec = {'channels': [{'name': 'singlechannel', 'samples': [{'name': 'signal', 'data': source['bindata']['sig'], 'modifiers': [{'n... |
def DM_48_9_1():
from sage.rings.finite_rings.finite_field_constructor import FiniteField
F16 = FiniteField(16, 'x')
F3 = FiniteField(3)
F3F16 = F3.cartesian_product(F16)
w = F16.primitive_element()
assert ((w ** 4) == (w + 1))
A = [[(0, 4), (2, 2), (2, 2), (0, 13), (0, 4), (2, 13), (0, 1), ... |
class Power(Function):
node_type = 'goos.function.power'
def __init__(self, fun: Function, power: float) -> None:
super().__init__(fun)
self._pow = power
def eval(self, inputs: List[flows.NumericFlow], context: goos.EvalContext) -> flows.NumericFlow:
value = copy.deepcopy(inputs[0])
... |
class LabelPowerset(ProblemTransformationBase):
def __init__(self, classifier=None, require_dense=None):
super(LabelPowerset, self).__init__(classifier=classifier, require_dense=require_dense)
self._clean()
def _clean(self):
self.unique_combinations_ = {}
self.reverse_combination... |
class MultidilatedResnetBlock(nn.Module):
def __init__(self, dim, padding_type, conv_layer, norm_layer, activation=nn.ReLU(True), use_dropout=False):
super().__init__()
self.conv_block = self.build_conv_block(dim, padding_type, conv_layer, norm_layer, activation, use_dropout)
def build_conv_bloc... |
class GemmKind(enum.Enum):
Gemm = enum_auto()
Sparse = enum_auto()
Universal = enum_auto()
PlanarComplex = enum_auto()
PlanarComplexArray = enum_auto()
Grouped = enum_auto() |
def register_Ns3OnOffApplication_methods(root_module, cls):
cls.add_constructor([param('ns3::OnOffApplication const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('GetSocket', 'ns3::Ptr< ns3::Socket >', [], is_const=True)
... |
def deduce_input_types(func: T.Callable) -> T.Sequence[T.ElementOrType]:
signature = inspect.signature(func)
input_types = []
for (i, parameter) in enumerate(signature.parameters.values()):
input_types.append(deduce_input_type(parameter, func, (i == 0)))
return input_types |
def _point_partition(expected, observed, start=None, end=None):
expected = set(expected)
observed = set(observed)
edge_start = min(expected.union(observed))
if (start is not None):
edge_start = start
edge_end = max(expected.union(observed))
if (end is not None):
edge_end = end
... |
class BlingFireTokenizer():
def tokenize(self, sentence: str) -> List[Token]:
return [Token(t) for t in text_to_words(sentence).split()] |
class CB(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1, groups=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, groups=groups)
self.bn = nn.BatchNorm2d(nOut)
def forward(self, i... |
def correcteness_set_ret(a, b):
for i in range(len(b)):
if ((b[i] > 0) and (a[i] == 0)):
return False
return True |
class BatchMutableMapping(MutableMapping, BatchMapping):
__metaclass__ = ABCMeta
def set_batch(self, key_val_pairs):
pass
def __setitem__(self, key, value):
self.set_batch([(key, value)])
def del_batch(self, keys):
pass
def __delitem__(self, key):
self.del_batch([key]... |
def test_entity_vocab(entity_vocab):
assert (len(entity_vocab) == 103)
assert (len(list(entity_vocab)) == 103)
assert ('United States' in entity_vocab)
assert (entity_vocab['[PAD]'] == 0)
assert (entity_vocab['United States'] == 4)
assert (entity_vocab.get_id('United States') == 4)
assert (e... |
def side_branch(x, factor):
x = Conv2D(1, (1, 1), activation=None, padding='same')(x)
kernel_size = ((2 * factor), (2 * factor))
x = Conv2DTranspose(1, kernel_size, strides=factor, padding='same', use_bias=False, activation=None)(x)
return x |
def add_arguments(parser):
parser.add_argument('file', help='path to input star file')
parser.add_argument('-o', '--output', help='output file (default: stdout)')
return parser |
_utils.test(ti.cpu)
def test_expr_dict_basic():
def func(u: int, v: float) -> float:
x = {'foo': (2 + u), 'bar': (3 + v)}
return ((x['foo'] * 100) + x['bar'])
assert (func(2, 0.1) == test_utils.approx(403.1)) |
def label_array(item: Generated, name: str, explode: (bool | None)) -> None:
if explode:
delimiter = '.'
else:
delimiter = ','
new = delimiter.join(map(str, force_iterable((item[name] or ()))))
if new:
item[name] = f'.{new}'
else:
item[name] = '' |
def resnet18_scs(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs) |
class LRS2Main(Dataset):
def __init__(self, dataset, datadir, reqInpLen, charToIx, stepSize, videoParams):
super(LRS2Main, self).__init__()
with open((((datadir + '/') + dataset) + '.txt'), 'r') as f:
lines = f.readlines()
self.datalist = [((datadir + '/main/') + line.strip().spl... |
def bn_folding_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), channel_last=False, name='convblock', dims=2):
h = x
kernel = ((3,) * dims)
pad = ((1,) * dims)
stride = ((1,) * dims)
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride, ... |
def fit_iaml_ranger(key='iaml_ranger', **kwargs):
tfms = {}
[tfms.update({k: ContTransformerInt}) for k in ['nf']]
[tfms.update({k: ContTransformerRange}) for k in ['mmce', 'f1', 'auc', 'mec']]
[tfms.update({k: ContTransformerLogRange}) for k in ['timetrain', 'timepredict', 'ramtrain', 'rammodel', 'ramp... |
class ROIHeadsTest(unittest.TestCase):
def test_roi_heads(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.ROI_HEADS.NAME = 'StandardROIHeads'
cfg.MODEL.ROI_BOX_HEAD.NAME = 'FastRCNNConvFCHead'
cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
cfg.MODEL.ROI_BOX_HEAD.POOLER_TYP... |
def reshape_dependency_tree_new(as_start, as_end, dependencies, multi_hop=False, add_non_connect=False, tokens=None, max_hop=5):
dep_tag = []
dep_idx = []
dep_dir = []
for i in range(as_start, as_end):
for dep in dependencies:
if (i == (dep[1] - 1)):
if ((((dep[2] - 1... |
class fftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name': 'fftw threads', 'libs': ['rfftw_threads', 'fftw_threads'], 'includes': ['fftw_threads.h', 'rfftw_threads.h'], 'macros': [('SCIPY_FFTW_THREADS_H', None)]}] |
_module
class Solarization(object):
'Solarization augmentation in BYOL
def __init__(self, threshold=128):
self.threshold = threshold
def __call__(self, img):
img = np.array(img)
img = np.where((img < self.threshold), img, (255 - img))
return Image.fromarray(img.astype(np.uin... |
def extract_all_comparison_from_node(node: Token) -> List[Comparison]:
comparison_list = []
if hasattr(node, 'tokens'):
for t in node.tokens:
comparison_list.extend(extract_all_comparison_from_node(t))
if (type(node) == Comparison):
comparison_list.append(node)
return compari... |
def graphGroundTruthPreProcess(graph):
for it in range(40):
(cp, adj) = locate_stacking_road(graph)
if (((it % 5) == 0) and (it != 0)):
graph = apply_adjustment_delete_closeby_nodes(graph, adj)
else:
(graph, c) = apply_adjustment(graph, adj)
if (c == 0):
... |
class Conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False, act=True):
super().__init__()
padding = (kernel_size // 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, ... |
def ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs):
global backend, layers, models, keras_utils
(backend, layers, models, keras_utils) = get_submodules_from_kwargs(kwargs)
if (not ((weights in {'imagenet', None}) or os.path.exists(we... |
class Discriminator(object):
def __init__(self, x_dim=784):
self.x_dim = x_dim
self.name = 'mnist/dcgan/d_net'
def __call__(self, x, reuse=True):
with tf.variable_scope(self.name) as vs:
if reuse:
vs.reuse_variables()
bs = tf.shape(x)[0]
... |
def shape_inner_product(node):
input_shape = node.get_only_parent().output_shape
return TensorShape(input_shape.batch_size, node.layer.parameters.num_output, 1, 1) |
class TestF77ReturnComplex(TestReturnComplex):
code = '\n function t0(value)\n complex value\n complex t0\n t0 = value\n end\n function t8(value)\n complex*8 value\n complex*8 t8\n t8 = value\n end\n function t16(value)\n complex*... |
class TimeoutHooks():
(firstresult=True)
def pytest_timeout_set_timer(item, settings):
(firstresult=True)
def pytest_timeout_cancel_timer(item): |
class TestImageListDataset(unittest.TestCase):
def test_image_list_dataset(self):
(height, width) = (720, 1280)
with temp_image(height, width) as image_fpath:
image_list = [image_fpath]
category_list = [None]
dataset = ImageListDataset(image_list, category_list)
... |
def surrogate_ber_check(loader):
y_posterior_list = []
for (_, labels, conf) in loader:
temp_list = [c[l].item() for (c, l) in zip(conf, labels)]
y_posterior_list.extend(temp_list)
return np.mean((- np.log(np.array(y_posterior_list)))) |
class FPN(nn.Module):
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), bn=True):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_chann... |
def accel_decel(clip, new_duration=None, abruptness=1.0, soonness=1.0):
if (new_duration is None):
new_duration = clip.duration
fl = (lambda t: f_accel_decel(t, clip.duration, new_duration, abruptness, soonness))
return clip.fl_time(fl).set_duration(new_duration) |
def main():
args = get_args()
DirectoryProcessor.process(args.input_dir, args.output_dir, from_html) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.