code stringlengths 101 5.91M |
|---|
class SectionFreeModule(FiniteRankFreeModule):
Element = TrivialSection
def __init__(self, vbundle, domain):
from .scalarfield import ScalarField
self._domain = domain
name = 'C^0({};{})'.format(domain._name, vbundle._name)
latex_name = 'C^0({};{})'.format(domain._latex_name, vbu... |
class BitVecSortRef(SortRef):
def size(self):
return int(Z3_get_bv_sort_size(self.ctx_ref(), self.ast))
def subsort(self, other):
return (is_bv_sort(other) and (self.size() < other.size()))
def cast(self, val):
if is_expr(val):
if z3_debug():
_z3_assert((s... |
class AltCLIPTextModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class Test_test_RansacCircleHelper(unittest.TestCase):
def test_When_get_inliers_and_all_points_on_circumfrence_and_no_exclusion_list(self):
p1 = Point((+ 1), 0)
p2 = Point((+ 0), 1)
p3 = Point((- 1), 0)
list_of_points = list()
list_of_points.append(p1)
list_of_points... |
def CalculateMoranAutoAvFlexibility(ProteinSequence):
result = CalculateEachMoranAuto(ProteinSequence, _AvFlexibility, '_AvFlexibility')
return result |
def main(output_dir, viz_progress=False):
scenario = 'lift'
if (scenario == 'spin'):
n_frames_per_episode = 198
else:
n_frames_per_episode = 198
total_budget = 400
plan_horizon = 6
n_plan_iterations = 20
frame_skip = 1
action_mode = 'RGB_asym'
sampler = 'uniform'
... |
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3ApplicationContainer_methods(root_module, root_module['ns3::ApplicationContainer'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3Asc... |
class TinyImageNetDataset(Dataset):
NUM_IMAGES_PER_CLASS = 500
def __init__(self, data_folder: Path, data_type='train', transform=None):
self.data_type = data_type
self._common_data_folder = data_folder
self._data_folder = os.path.join(data_folder, data_type)
self.labels = {}
... |
def adapt(config):
def get_dataloader(dataset, batch_size, shuffle=False, pin_memory=True, collation=None):
if (collation is None):
collation = CollateFN()
return DataLoader(dataset, batch_size=batch_size, collate_fn=collation, shuffle=shuffle, num_workers=config.pipeline.dataloader.num_... |
def test_make_nn_regression():
(X, y, w) = make_nn_regression(n_samples=10, n_features=50, n_informative=5)
assert (X.shape[0] == 10)
assert (X.shape[1] == 50)
assert (y.shape[0] == 10)
assert (w.shape[0] == 50)
assert (np.sum((X.data != 0)) == (10 * 5))
(X, y, w) = make_nn_regression(n_samp... |
_utils.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_out_of_bound():
x = ti.field(ti.i32, shape=(8, 16))
def func():
x[(3, 16)] = 1
with pytest.raises(RuntimeError):
func() |
def test_exact_thompson_samples_are_minima() -> None:
search_space = Box([0, 0], [1, 1])
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing='ij'), axis=(- 1)), ((- 1), 2))
ys = quadratic(xs)
dataset = Da... |
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (((x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2)) + x_mean)
return x |
class TestImage(unittest.TestCase):
def setUp(self) -> None:
directory = os.path.dirname(os.path.abspath(__file__))
trainset = torchvision.datasets.CIFAR10(root=os.path.join(directory, '../datasets/tmp'), train=True, download=True)
self.cifar = trainset.data
trainset = torchvision.da... |
def bitmask(words, state):
result = 0
for (bit, type) in enumerate(words):
if (type == state):
result |= (1 << bit)
return result |
def _unsharp_mask_single_channel(image, radius, amount, vrange):
blurred = gaussian(image, sigma=radius, mode='reflect')
result = (image + ((image - blurred) * amount))
if (vrange is not None):
return np.clip(result, vrange[0], vrange[1], out=result)
return result |
class BaseNet(nn.Module):
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.rep_dim = None
def forward(self, *input):
raise NotImplementedError
def summary(self):
net_parameters = filter((lambda p: p.requires_grad), s... |
def rect1_2_cxy_wh(rect):
return (np.array([((rect[0] + (rect[2] / 2)) - 1), ((rect[1] + (rect[3] / 2)) - 1)]), np.array([rect[2], rect[3]])) |
def chunk_bytes_iter(iterator, chunk_len: int, stride: Tuple[(int, int)], stream: bool=False):
acc = b''
(stride_left, stride_right) = stride
if ((stride_left + stride_right) >= chunk_len):
raise ValueError(f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chu... |
def create_eval_script():
eval_log_dir = (log_dir / 'evaluation')
os.makedirs(eval_log_dir, exist_ok=True)
eval_sbatch_script = Path('./sbatch_eval.sh').absolute()
eval_file = (args.train_file.parent / 'evaluation/evaluate_policy.py')
dataset_path = next(filter((lambda x: (x.split('=')[0] == 'datamo... |
def remap_subset_by_sameness(x, y, sameness):
subset = []
for var in x:
for same_var in sameness[var]:
if (same_var in y):
subset.append(var)
break
subset_x = filter_layouts(x, subset)
return remap_vars_by_sameness(subset_x, y, sameness) |
def test_npartition_type() -> None:
df = pd.DataFrame({'phone': ['', '(', '', '555/234/5678', , '(1) ', '+1 ( x. 1234', ' extension 1234', '2345678', '800-299-JUNK', '1-866-4ZIPCAR', '123 ABC COMPANY', '+66 91 889 8948', 'hello', np.nan, 'NULL']})
clean_phone(df, 'phone') |
class CrystalHomset(Homset):
def __init__(self, X, Y, category=None):
if (category is None):
category = Crystals()
Homset.__init__(self, X, Y, category)
def _repr_(self):
return 'Set of Crystal Morphisms from {} to {}'.format(self.domain(), self.codomain())
def _coerce_im... |
class LayerNorm(nn.Module):
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine']
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_sh... |
class RecStepByStepLayer(RecLayer):
layer_class = 'rec_step_by_step'
SubnetworkRecCell = SubnetworkRecCellSingleStep
class ConstructionState():
GetSources = Entity('get_sources')
Init = Entity('init')
InLoop = Entity('in_body')
def prepare_compile(cls, rec_layer_name, net_dict, o... |
def conv_nxn_with_init(in_channels, out_channels, kernel_size, stride, padding, bias):
layer_ = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
nn.init.xavier_normal(layer_.weight, gain=1.0)
if bias:
nn.init.constant(l... |
def download_youtube(url, dst_dir, dst_filename=None, keep_video=False):
ydl_opts = {'format': 'mp4', 'restrictfilenames': True, 'keepvideo': keep_video, 'postprocessors': [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
rt ... |
def test_feature_hasher_strings():
raw_X = [['foo', 'bar', 'baz', 'foo'.encode('ascii')], ['bar'.encode('ascii'), 'baz', 'quux']]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = (2 ** lg_n_features)
it = (x for x in raw_X)
feature_hasher = FeatureHasher(n_features=n_features, input... |
class CartanType(CartanType_standard_finite, CartanType_simple, CartanType_crystallographic):
def __init__(self, n):
assert (n >= 1)
CartanType_standard_finite.__init__(self, 'B', n)
if (n == 1):
self._add_abstract_superclass(CartanType_simply_laced)
def _latex_(self):
... |
.parametrize('value1, value2, expected_lines', [pytest.param(0, 1, OrderedSet([14, 17])), pytest.param(1, 0, OrderedSet([14, 15]))])
def test_tracking_covered_statements_cmp_predicate(simple_module, value1, value2, expected_lines):
tracer = ExecutionTracer()
adapter = LineCoverageInstrumentation(tracer)
tra... |
def evaluate_from_json_simple(json_file, reference_file, top_k=10, score_key='per_word_ll', code_entry='clean_code'):
json_file = (Path(json_file) if (not isinstance(json_file, Path)) else json_file)
with open(json_file, 'r') as f:
all_pred = json.load(f)
all_pred = dedup_results(all_pred)
w... |
def get_playlist_tracks(id_):
return [i['track']['id'] for i in sp.user_playlist('spotify', id_)['tracks']['items']] |
def eg_rule_action2(memories_info: List['MemoryInfo'], args: Arguments):
mid = args['mid']
path = args['path']
index = args['index']
memories = [info.memory for info in memories_info]
memory = memories[0]
protocol = EntanglementGenerationA(None, ('EGA.' + memory.name), mid, path[(index + 1)], me... |
class FlaxXLMRobertaForCausalLM(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def GetDataDir():
from os.path import dirname, realpath, join
dataDir = join(dirname(realpath('__file__')), 'data')
return dataDir |
def quadratic_entropy(example, train_term_dist, word2id, word2vec):
assert (word2vec is not None), 'Error: Word vector representations have to be available for quadratic entropy.'
summed = 0
for word_1 in set(example):
if ((word_1 not in word2id) or (word_1 not in word2vec)):
continue
... |
def remove_spectral_norm(module: T_module, name: str='weight') -> T_module:
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNorm) and (hook.name == name)):
hook.remove(module)
del module._forward_pre_hooks[k]
break
else:
raise ... |
def read_keyframe_helper_data(fpath: str):
video_id_to_keyframes = {}
try:
with PathManager.open(fpath, 'r') as io:
csv_reader = csv.reader(io)
header = next(csv_reader)
video_id_idx = header.index('video_id')
keyframes_idx = header.index('keyframes')
... |
def test_orders_gauss():
arr = np.zeros((1,))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=0))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=3))
assert_raises(ValueError, sndi.gaussian_filter, arr, 1, (- 1))
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=(- 1), order=0))
assert_e... |
def restore_func(name):
if (name not in __all__):
raise ValueError('{} not a dual function.'.format(name))
try:
val = _restore_dict[name]
except KeyError:
return
else:
sys._getframe(0).f_globals[name] = val |
class MultiVocab(BaseMultiVocab):
def state_dict(self):
state = OrderedDict()
key2class = OrderedDict()
for (k, v) in self._vocabs.items():
state[k] = v.state_dict()
key2class[k] = type(v).__name__
state['_key2class'] = key2class
return state
def l... |
def default_init(scale: Optional[float]=1.0):
return nn.initializers.variance_scaling(scale, 'fan_avg', 'uniform') |
def gs_interpolation_lee_osullivan(points, tau, parameters, wy):
from .utils import _degree_of_vector
(s, l) = (parameters[0], parameters[1])
F = points[0][0].parent()
M = lee_osullivan_module(points, (s, l), wy)
shifts = [(i * wy) for i in range(0, (l + 1))]
Mnew = M.reduced_form(shifts=shifts)... |
def affine_im(face, transpix=[10, 5], angle=10):
im1 = skimage.transform.rotate((255 - binary(face)), angle)
translation_matrix = np.float32([[1, 0, transpix[0]], [0, 1, transpix[1]]])
im1 = warpAffine(im1, translation_matrix, (face.shape[0], face.shape[1]))
im1 = (im1 / np.max(im1))
return ((1 - im... |
def constant_init(module, val, bias=0):
nn.init.constant_(module.weight, val)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias) |
def segmented_tensor(min_dim=1, max_dim=4, dtype=np.float32, is_sorted=True, elements=None, segment_generator=segment_ids, allow_empty=False, **kwargs):
gen_empty = (st.booleans() if allow_empty else st.just(False))
data_dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
data_dims_ = st.tu... |
def export_target(merged_file: str, json_file: str, binary_file: str, shared_library_list: List[str], platform_type: TestPlatform) -> None:
if (binary_file is None):
raise Exception(f"{merged_file} doesn't have corresponding binary!")
print_log('start to export: ', merged_file)
cmd_shared_library = ... |
(scope='module')
def csvy_model_test_abundances(example_csvy_file_dir, atomic_dataset):
csvypath = (example_csvy_file_dir / 'csvy_model_to_test_abundances.yml')
config = Configuration.from_yaml(csvypath)
csvy_model_test_abundances = SimulationState.from_csvy(config, atom_data=atomic_dataset)
return csvy... |
def test_naming_scope_known_indices_not_empty(naming_scope):
some_object = 'something'
naming_scope.get_name(some_object)
assert (dict(naming_scope) == {some_object: 'var_0'}) |
def random_indices(y, nclass=10, intraclass=False, device='cuda'):
n = len(y)
if intraclass:
index = torch.arange(n).to(device)
for c in range(nclass):
index_c = index[(y == c)]
if (len(index_c) > 0):
randidx = torch.randperm(len(index_c))
... |
def create_training_data(in_path, out_path):
with open(in_path) as fin:
text_lines = TextLine.from_lines([line for line in fin])
if (len(text_lines) == 0):
raise RuntimeError(f'No text boxes found for document "{in_path}".')
with open(out_path, 'w') as fout:
for line in text_lines:
... |
def read_search_examples(filename):
examples = []
with open(filename, encoding='utf-8') as f:
for (idx, line) in enumerate(f):
line = line.strip()
js = json.loads(line)
if ('idx' not in js):
js['idx'] = idx
if ('function_tokens' in js):
... |
class RobertaOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--fn', default='assets/demo.jpg', type=str)
parser.add_argument('--output', default='output', type=str)
parser.add_argument('--inres', default='512,512', type=str)
(args, _) = parser.parse_known_args()
args.inres = tuple((int(x)... |
class ParameterGroup(enum.Enum):
filtering = ('Testing scope', 'Customize the scope of the API testing.')
validation = ('Response & Schema validation', 'These options specify how API responses and schemas are validated.')
hypothesis = ('Hypothesis engine', 'Configuration of the underlying Hypothesis engine.... |
def stella_model_example_file1():
return read_stella_model((MODEL_DATA_PATH / 'mesa.stella.dat')) |
def rref_forward_chain(dst, world_size, rref, ttl):
if (ttl > 0):
current_dst = worker_name(dst)
next_dst = ((dst + 1) % world_size)
ret_rref = rpc.remote(current_dst, rref_forward_chain, args=(next_dst, world_size, rref, (ttl - 1)))
return [ret_rref]
else:
return rref.to... |
class Augmentor():
def __init__(self, rand_rotate_angle=False, data_aug_flip=False, color_aug=False, gaussian_blur=False, motion_blur=False):
self.rand_rotate_angle = rand_rotate_angle
self.data_aug_flip = data_aug_flip
self.color_aug = color_aug
self.gaussian_blur = gaussian_blur
... |
def symbolic_inputs(func: T.Callable, input_types: T.Sequence[T.ElementOrType]=None) -> Values:
parameters = [p for p in inspect.signature(func).parameters.values() if (p.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD))]
if (input_types is None):
input_types = deduce... |
class BaseLogisticPolicy(BaseContextualPolicy):
alpha_: float = 1.0
lambda_: float = 1.0
def __post_init__(self) -> None:
super().__post_init__()
check_scalar(self.alpha_, 'alpha_', float)
if (self.alpha_ <= 0.0):
raise ValueError(f'`alpha_`= {self.alpha_}, must be > 0.0.... |
class StoppingCondition(ABC):
def should_stop(self, latest_trainer_result: dict, *args, **kwargs) -> bool:
pass |
def map_obj(boxes_value: List[List[float]], boxes_seq: List[List[int]]) -> List[List[List[float]]]:
try:
ret = []
for boxes in boxes_seq:
boxes_ret = []
for box_index in boxes:
if isinstance(box_index, (list, tuple)):
boxes_ret.append(boxes... |
class ScalarTensorTest(BasePytorchTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def create_inputs_shape(self):
return [[self.val_batch_size, 3, 32, 32]]
def create_feature_network(self, input_shape):
return ScalarTensorNet() |
class StaticRoutingProtocol(StackProtocol):
def __init__(self, own: 'Node', name: str, forwarding_table: Dict):
super().__init__(own, name)
self.forwarding_table = forwarding_table
def add_forwarding_rule(self, dst: str, next_node: str):
assert (dst not in self.forwarding_table)
... |
class R2L(nn.Module):
def __init__(self, args, input_dim, output_dim):
super(R2L, self).__init__()
self.args = args
self.input_dim = input_dim
(D, W) = (args.netdepth, args.netwidth)
Ws = (([W] * (D - 1)) + [3])
act = get_activation(args.activation_fn)
self.he... |
def test():
array = ak.Array([1, [2, 3]])
result = (array + array)
array_tt = ak.Array(array.layout.to_typetracer(forget_length=True))
result_tt = (array_tt + array_tt)
assert (result_tt.layout.form == result.layout.form) |
_numpy_output()
def test_sum_multiple_axes(A: dace.float64[(10, 5, 3)]):
return np.mean(A, axis=((- 1), 0)) |
def load_mnist8():
(X_train, y_train, X_test, y_test) = load_mnist()
selected = (y_train == 8)
y_train[selected] = 1
y_train[(~ selected)] = 0
selected = (y_test == 8)
y_test[selected] = 1
y_test[(~ selected)] = 0
return (X_train, y_train, X_test, y_test) |
def add_gaussian_noise_to_pcloud(pcloud, mu=0, sigma=1):
gnoise = np.random.normal(mu, sigma, pcloud.shape[0])
gnoise = np.tile(gnoise, (3, 1)).T
pcloud += gnoise
return pcloud |
def test_load_audio():
dataset = fsdnoisy18k.Dataset(TEST_DATA_HOME)
clip = dataset.clip('17')
audio_path = clip.audio_path
(audio, sr) = fsdnoisy18k.load_audio(audio_path)
assert (sr == 44100)
assert (type(audio) is np.ndarray)
assert (len(audio.shape) == 1)
assert (len(audio) == 47786) |
def main():
parser = argparse.ArgumentParser('Produce optimization output baseline from PyTorch')
parser.add_argument('-i', '--iterations', default=1001, type=int)
parser.add_argument('-s', '--sample-every', default=100, type=int)
options = parser.parse_args()
optimizer_parameter_map = {}
for op... |
def panoptic_to_lidarseg(panoptic_labels: np.ndarray) -> np.ndarray:
return (panoptic_labels // 1000).astype(np.uint8) |
def replace_value(line, key, new_value):
line = standardize_metricline(line)
value = line.split(key)[1].strip().split()[0]
line = line.replace(f' {key} {value} ', f' {key} {new_value} ')
return line |
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None, log_writer=None, start_steps=None, lr_schedule_values=Non... |
class GAT(torch.nn.Module):
def __init__(self, num_features, num_classes, dims, drop=0.0):
super(GAT, self).__init__()
heads = 8
self.conv1 = GATConv(num_features, dims, heads=heads, dropout=0.3, concat=False)
self.conv2 = GATConv(dims, num_classes, heads=heads, concat=False, dropout... |
class BERTDataset(Dataset):
def __init__(self, corpus_path, tokenizer, seq_len, encoding='utf-8', corpus_lines=None, on_memory=True, mlm_loss=False):
self.tokenizer = tokenizer
self.seq_len = seq_len
self.on_memory = on_memory
self.corpus_lines = corpus_lines
self.corpus_path... |
class NonPersonalizedRecommender(Recommender, ABC):
can_predict_cold_users = True
can_predict_cold_items = True
item_popularity: SparkDataFrame
add_cold_items: bool
cold_weight: float
sample: bool
fill: float
seed: Optional[int] = None
def __init__(self, add_cold_items: bool, cold_we... |
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(self, in_ch: int, out_ch: int, s: int=1, downsample: Optional[nn.Module]=None) -> None:
super().__init__()
self.conv1 = nn.Conv2d(in_ch, out_ch, 3, s, 1, bias=False)
self.bn1 = nn.BatchNorm2d(out_ch)
self.relu = nn.ReLU... |
class Swinv2Model(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class A006882(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
self._b = []
self._precompute(2)
def _repr_(self):
return 'Double factorials n!!: a(n)=n*a(n-2).'
def _precompute(self, how_many=10):
try:
f = self._f
except... |
def log(*args):
msg = ' '.join(map(str, args))
sys.stdout.write((msg + '\n'))
sys.stdout.flush() |
class TestUtilAssertFloatEqInt(unittest.TestCase):
def test_success(self):
util.assert_float_eq_int(12.0, 12)
util.assert_float_eq_int(12.3, 12)
util.assert_float_eq_int(12.99, 12)
util.assert_float_eq_int(11.01, 12)
util.assert_float_eq_int((- 11.8), (- 12))
util.ass... |
class WordTokenizer(object):
def __init__(self, vocab=None, unk_token='[UNK]', pad_token='[PAD]'):
self.vocab = load_vocab(vocab)
self.inv_vocab = {v: k for (k, v) in self.vocab.items()}
self.unk_token = unk_token
self.pad_token = pad_token
if (not (pad_token in self.vocab)):... |
class DenseNASTrainTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
pprint.pformat(config)
cudnn.benchmark = True
cudnn.enabled = ... |
def makeData(srcFile, srcDicts):
(src, tgt) = ([], [])
sizes = []
(count, ignored) = (0, 0)
print(('Processing %s ...' % srcFile))
srcF = codecs.open(srcFile, 'r', 'utf-8')
while True:
sline = srcF.readline()
if (sline == ''):
break
sline = sline.strip()
... |
def evaluate_bivariate(range, npoints):
side = np.linspace(range[0], range[1], npoints)
(z1, z2) = np.meshgrid(side, side)
zv = np.hstack([z1.reshape((- 1), 1), z2.reshape((- 1), 1)])
return (z1, z2, zv) |
class PQ():
def __init__(self, seq):
from sage.sets.set import Set
self._children = []
for e in seq:
if isinstance(e, list):
e = Set(e)
if (e not in self._children):
self._children.append(e)
def reverse(self):
for i in self.... |
def get_pytorch_test_tpc_dict(tp_model, test_name, ftp_name):
return {test_name: generate_pytorch_tpc(name=ftp_name, tp_model=tp_model)} |
def launch_search(exp_config: Union[(List[str], str)], name: str, workers: int, gpus_per_worker: float, cpus_per_worker: float, eval_only: bool, samples: int, seed: int) -> None:
if (len(path.split(exp_config)[0]) > 0):
CFG_PATH = exp_config
else:
CFG_PATH = path.join(DEFAULT_CONFIG_DIR, exp_con... |
def compute_geodesic_distance(scene_id, start_pt, end_pt):
return GeodesicDistanceComputer().compute_distance(scene_id, start_pt, end_pt) |
def register_Ns3LteCcmMacSapProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteCcmMacSapProvider const &', 'arg0')])
cls.add_method('ReportMacCeToScheduler', 'void', [param('ns3::MacCeListElement_s', 'bsr')], is_pure_virtual=True, is_virtual=True)
cls.add_met... |
def find_filter_conditions(str_const, int_const, str_attr, int_attr, new_int_attr, aggrs, files, necessary_conditions, summarise_conditions):
conditions = []
int_ops = ['==', '>', '<', '>=', '<=']
str_ops = ['==', '!=']
happens_before = []
for sc in (str_const + int_const):
necessary_conditi... |
class IterativeRefinementGenerator(object):
def __init__(self, tgt_dict, eos_penalty=0.0, max_iter=10, max_ratio=2, decoding_format=None, retain_dropout=False, adaptive=True):
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
... |
def compute_flat_grad(output, inputs, filter_input_ids=set(), retain_graph=False, create_graph=False):
if create_graph:
retain_graph = True
inputs = list(inputs)
params = []
for (i, param) in enumerate(inputs):
if (i not in filter_input_ids):
params.append(param)
grads = ... |
class Conv1_1_Branch(nn.Module):
def __init__(self, in_ch, block_ch):
super(Conv1_1_Branch, self).__init__()
self.conv1_1 = nn.Sequential(nn.Conv2d(in_channels=in_ch, out_channels=block_ch, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(block_ch, affine=False, track_running_stats=Tr... |
class TestTreeManager(unittest.TestCase):
def setUp(self):
self.tree_fname = os.path.join(os.path.dirname(__file__), 'test_tree_manager.yaml')
self.tm = TreeManager(task_config=TaskConfig(self.tree_fname))
def mock_leaf_handler(self, asr_out):
if (asr_out == 'yes'):
self.tm.u... |
def test_error_on_no_matches(testdir):
testdir.make_test('\(operation_id=["does-not-exist"])\(max_examples=1)\ndef test_(request, case):\n request.config.HYPOTHESIS_CASES += 1\n')
result = testdir.runpytest('-v')
if IS_PYTEST_ABOVE_54:
key = 'errors'
else:
key = 'error'
result.ass... |
def check_dtrs_protocols(input_name, input_pv):
program_pv = DTRS_PROTOCOL
(ppv_major, ppv_minor) = program_pv.split('.')
(ipv_major, ipv_minor) = input_pv.split('.')
if ((ppv_major != ipv_major) or (int(ppv_minor) < int(ipv_minor))):
msg = ('Incompatible dtrs_protocols: program: %s %s: %s' % (p... |
def __get_mccabe_complexity(tree: (AstroidFunctionDef | None)) -> (int | None):
if (tree is None):
return None
try:
return mccabe_complexity(astroid_to_ast(tree))
except SyntaxError:
return None |
def save_solved_result_in_db(solved_result, data_frame, variables, result_value_name, datasource, result_table):
column_names = []
for col in data_frame.columns:
found = False
for var in variables:
if (var.lower() == col.lower()):
found = True
break
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.