code stringlengths 101 5.91M |
|---|
def findFeatures(dom):
ret = {}
for i in findChildren(dom, ['registry', 'feature']):
n = i.getAttribute('name')
e = []
c = []
for j in findChildren(i, ['require', 'enum']):
e.append(j.getAttribute('name'))
for j in findChildren(i, ['require', 'command']):
... |
class DataloaderAffectnet_MultiTask(data.Dataset):
def __init__(self, img_size=128, exp_classes=7, is_transform=False):
self.img_size = img_size
self.is_transform = is_transform
self.transform = initAlignTransfer(self.img_size, crop_size=self.img_size)
self.exp_classes = exp_classes
... |
def build_scope(images, bottleneck_layer_size, shared_modules, scope_name, shared_scope_name, reuse=tf.AUTO_REUSE):
get_scope = (lambda x: (shared_scope_name if (x in shared_modules) else scope_name))
with tf.variable_scope(get_scope('conv1'), reuse=reuse):
print(tf.get_variable_scope().name)
ne... |
def progress_bar(iterable, desc=None, total=None, disable=False):
if disable:
return iterable
if (total is None):
if (not hasattr(iterable, '__len__')):
return iterable
total = len(iterable)
if sys.stderr.isatty():
return tqdm(iterable, desc=desc, total=total)
... |
def test_data_frame_filter():
array_x = ak.Array([{'x': [1.1, 1.2, 1.3]}, {'x': [2.1, 2.2]}, {'x': [3.1]}, {'x': [4.1, 4.2, 4.3, 4.4]}, {'x': [5.1]}])
array_y = ak.Array([1, 2, 3, 4, 5])
array_z = ak.Array([[1.1], [2.1, 2.3, 2.4], [3.1], [4.1, 4.2, 4.3], [5.1]])
df = ak.to_rdataframe({'x': array_x, 'y':... |
class TestEnvSpec():
def test_pickleable(self):
env_spec = EnvSpec(akro.Box((- 1), 1, (1,)), akro.Box((- 2), 2, (2,)))
round_trip = pickle.loads(pickle.dumps(env_spec))
assert round_trip
assert (round_trip.action_space == env_spec.action_space)
assert (round_trip.observation_... |
def _raise_not_supported(name):
raise ValueError('Method ``{}`` not supported for RemoteModule'.format(name)) |
def parse_cremona_label(label, numerical_class_code=False):
m = cremona_label_regex.match(str(label))
if (m is None):
m = old_cremona_label_regex.match(str(label))
if (m is None):
raise ValueError((label + ' is not a valid Cremona label'))
(conductor, iso, num) = m.groups()
i... |
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30, use_cuda=True):
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for (i, (path, img, img0)) in enumer... |
class BertForPretraining(BertPretrainedModel):
def __init__(self, bert):
super(BertForPretraining, self).__init__()
self.bert = bert
self.cls = BertPretrainingHeads(self.bert.config['hidden_size'], self.bert.config['vocab_size'], self.bert.config['hidden_act'], embedding_weights=self.bert.em... |
def batch_pix_accuracy(output, target):
(_, predict) = torch.max(output, 1)
predict = (predict.cpu().numpy().astype('int64') + 1)
target = (target.cpu().numpy().astype('int64') + 1)
pixel_labeled = np.sum((target > 0))
pixel_correct = np.sum(((predict == target) * (target > 0)))
assert (pixel_co... |
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_constructor([])
cls.add_constructor([param('char const *', 'address')])
cl... |
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.Softplus(True))
self.b2 = nn.Sequential(nn.Conv2d(in_plane... |
_model()
class DipoleSource(SimSource):
type = goos.ModelNameType('source.dipole_source')
position = goos.Vec3d()
axis = goos.types.IntType()
phase = goos.types.FloatType(default=0)
power = goos.types.FloatType(default=1) |
class ProgBarCounter():
def __init__(self, total_count):
self.total_count = total_count
self.max_progress = 1000000
self.cur_progress = 0
self.cur_count = 0
if logger.has_output_type(dowel.StdOutput):
self.pbar = pyprind.ProgBar(self.max_progress)
else:
... |
def KD_Loss(old_features, features):
B = features.shape[0]
flat_loss = (F.cosine_embedding_loss(features.view(B, (- 1)), old_features.detach().view(B, (- 1)), torch.ones(features.shape[0]).to(features.device)) * lambda_f_base)
spatial_loss = (pod_spatial_lossv2([old_features], [features]) * lambda_c_base)
... |
class MLP(nn.Module):
def __init__(self, layer_sizes, final_relu=False, normalized_feat=False):
super().__init__()
self.normalized_feat = normalized_feat
layer_list = []
layer_sizes = [int(x) for x in layer_sizes]
num_layers = (len(layer_sizes) - 1)
final_relu_layer =... |
class TestQuantizationAwareTraining(QuantizationTestCase):
def test_manual(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualLinearQATModel(qengine)
model = prepare_qat(model)
self.checkObservers(model... |
def init_logger(is_main=True, is_distributed=False, filename=None):
if is_distributed:
torch.distributed.barrier()
handlers = [logging.StreamHandler(sys.stdout)]
if (filename is not None):
handlers.append(logging.FileHandler(filename=filename))
logging.basicConfig(datefmt='%m/%d/%Y %H:%M... |
def _get_train_length(task):
if (task == 'sst-2'):
return 67349
elif (task == 'sts-b'):
return 5749 |
def build_model_filename(paths, short_name, command_args, extra_args):
(short_language, dataset) = short_name.split('_', 1)
default_args = build_default_args(paths, short_language, dataset, command_args, extra_args)
train_args = ['--shorthand', short_name, '--mode', 'train']
train_args = (train_args + d... |
def specht_module_rank(D, base_ring=None):
D = _to_diagram(D)
span_set = specht_module_spanning_set(D)
if (base_ring is None):
base_ring = QQ
return matrix(base_ring, [v.to_vector() for v in span_set]).rank() |
class DipoleMoment(Scalar):
def __init__(self, hidden_channels, activation='silu'):
super(DipoleMoment, self).__init__(hidden_channels, activation, allow_prior_model=False)
atomic_mass = torch.from_numpy(ase.data.atomic_masses).float()
self.register_buffer('atomic_mass', atomic_mass)
def... |
class GRSBerlekampWelchDecoder(Decoder):
def __init__(self, code):
if (not isinstance(code, GeneralizedReedSolomonCode)):
raise ValueError('code has to be a generalized Reed-Solomon code')
super().__init__(code, code.ambient_space(), 'EvaluationPolynomial')
def __eq__(self, other):
... |
class up(nn.Module):
def __init__(self, in_ch, bilinear=False):
super(up, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch, in_ch, 2, stride=2)
def forward(self, x):
... |
def chunk_text(text, k, use_spacy=True):
if use_spacy:
if (text == ''):
return ['']
chunks = [i.text for i in nlp(text).sents]
res = []
carryover = ''
for i in chunks:
if (len((carryover + i).split()) < k):
carryover = ((carryover + i) ... |
def make_field_desc_map(features):
fd_map = {}
for (_, fc_list) in features.items():
for fc in fc_list:
for fd in fc.get_field_desc():
fd_map[fd.name] = fd
return fd_map |
class TestLabelField(AllenNlpTestCase):
def test_as_tensor_returns_integer_tensor(self):
label = LabelField(5, skip_indexing=True)
tensor = label.as_tensor(label.get_padding_lengths()).data.cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([5]))
def test_label_fie... |
def get_file_size(path, unit=SIZE_UNIT_K):
size = os.path.getsize(get_absolute_path(path))
return ((size * 1.0) / unit) |
def get_electra_train_flops(h_d, l_d, h_g, l_g, batch_size, train_steps, tied_embeddings, e=None, s=512, output_frac=0.15625):
if (e is None):
e = h_d
disc = TransformerHparams(h_d, l_d, s=s, e=e, output_frac=output_frac).get_train_flops(batch_size, train_steps, True)
gen = TransformerHparams(h_g, l... |
class ArmActionMode(object):
def action(self, scene: Scene, action: np.ndarray):
pass
def action_step(self, scene: Scene, action: np.ndarray):
pass
def action_pre_step(self, scene: Scene, action: np.ndarray):
pass
def action_post_step(self, scene: Scene, action: np.ndarray):
... |
('/ngsi-ld/v1/entities/urn:ngsi-ld:Device:water001/attrs/on', methods=['PATCH'])
def upsertNotificationNew():
entities = request.get_json()
print(dir(request))
print(entities)
return 'Done' |
def test_label_combination_hoeffding_tree_nba(test_path):
stream = MultilabelGenerator(n_samples=10000, n_features=15, n_targets=3, n_labels=4, random_state=112)
learner = LabelCombinationHoeffdingTreeClassifier(n_labels=3)
cnt = 0
max_samples = 5000
predictions = []
proba_predictions = []
w... |
def create_files(output_dir, script_dir, basename, launcher_file):
settings_files = ['config-files/skull2.json']
views = [8, 32]
image_losses = [(1, 0, 0, 0), (0, 1, 0, 0)]
prior_losses = [0.0, 0.01, 0.1]
minOpacity = [0.0, 0.1, 0.5]
onlyOpacityUntil = [0, 50, 100]
seeds = [42]
tfmode = ... |
def _coco_eval_to_mask_results(coco_eval):
res = _empty_mask_results()
if (coco_eval is not None):
s = coco_eval.stats
res['mask']['AP'] = s[COCO_AP]
res['mask']['AP50'] = s[COCO_AP50]
res['mask']['AP75'] = s[COCO_AP75]
res['mask']['APs'] = s[COCO_APS]
res['mask']... |
class SpeechFeatures(object):
default_rate = 16000
default_filters_number = 13
default_augmented = True
def mfcc(signal, rate=default_rate, filters_number=default_filters_number, augmented=default_augmented):
mfcc_features = mfcc(signal, rate, numcep=filters_number)
if (not augmented):
... |
class RoCBertForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class Tasks():
IC_MULTILABEL = DatasetTypes.IC_MULTILABEL
IC_MULTICLASS = DatasetTypes.IC_MULTICLASS
OBJECT_DETECTION = DatasetTypes.OD
VALID_TYPES = [IC_MULTILABEL, IC_MULTICLASS, OBJECT_DETECTION]
def is_valid(task):
return (task in Tasks.VALID_TYPES) |
class UpConvBlock(nn.Module):
def __init__(self, conv_block, in_channels, skip_channels, out_channels, num_convs=2, stride=1, dilation=1, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), upsample_cfg=dict(type='InterpConv'), dcn=None, plugins=None):
super(UpConvBlock, self)... |
class DatasetMapper_detr_instance_exp():
def __init__(self, is_train: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], augmentations_with_crop: List[Union[(T.Augmentation, T.Transform)]], image_format: str, use_instance_mask: bool=False, use_keypoint: bool=False, instance_mask_format: str='polygo... |
.parametrize('device', ['cpu', 'cuda'])
def test_compatibility(device, M=9, alpha=0.1, B=2):
b2mc = diffsptk.MLSADigitalFilterCoefficientsToMelCepstrum(M, alpha)
U.check_compatibility(device, b2mc, [], f'nrand -l {(B * (M + 1))}', f'b2mc -m {M} -a {alpha}', [], dx=(M + 1), dy=(M + 1))
U.check_differentiable... |
def convert_from_milnor_matrix(n, basis, p=2, generic='auto'):
mat = convert_to_milnor_matrix(n, basis, p, generic)
if (mat.nrows() != 0):
return convert_to_milnor_matrix(n, basis, p, generic).inverse()
else:
return mat |
class Alignment():
def __init__(self, node, url, amr, indexes, score):
self.node = node
self.url = url
self.amr = amr
self.aligned_token_indexes = indexes
self.score = score
def __str__(self):
return 'node: {}\nurl: {}\naligned_token_indexes: {}\naligned_tokens: {... |
def keep_largest_connected_components(mask):
num_channel = mask.shape[1]
out_img = np.zeros(mask.shape, dtype=np.uint8)
for struc_id in range(1, (num_channel + 1)):
binary_img = (mask == struc_id)
blobs = measure.label(binary_img, connectivity=1)
props = measure.regionprops(blobs)
... |
class COCOVQAEvalDataset(VQAEvalDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
self.vis_root = vis_root
self.annotation = json.load(open(ann_paths[0]))
answer_list_path = ann_paths[1]
if os.path.exists(answer_list_path):
se... |
class ConsisMeanAggregator(SageMeanAggregator):
def __init__(self, src_dim, dst_dim, **kwargs):
super().__init__(src_dim, dst_dim, activ=False, **kwargs)
def __call__(self, dstsrc_features, dstsrc2src, dstsrc2dst, dif_mat, relation_vec, attention_vec):
x = super().__call__(dstsrc_features, dstsr... |
def callable_for_fwd_module(module: 'daceml.torch.DaceModule', forward_compiled: CompiledSDFG):
assert forward_compiled._initialized
fwd_arglist = forward_compiled.sdfg.arglist()
(input_names, output_names) = get_arglist(module)
constants = init_remaining_parameters(module, fwd_arglist, input_names, out... |
def test_singling_out_queries_unique():
df = pd.DataFrame({'c1': [1], 'c2': [2]})
queries = UniqueSinglingOutQueries()
(q1, q2) = ('c1 == 1', 'c2 == 2')
queries.check_and_append(q1, df=df)
queries.check_and_append(q1, df=df)
assert (queries.queries == [q1])
queries.check_and_append(q2, df=df... |
def load_sys(paths):
(src, tgt, hypos, log_probs) = ({}, {}, {}, {})
for path in paths:
with open(path) as f:
for line in f:
line = line.rstrip()
if line.startswith(('S-', 'T-', 'D-')):
i = int(line[(line.find('-') + 1):line.find('\t')])
... |
def get_model(model):
if (isinstance(model, torch.nn.DataParallel) or isinstance(model, torch.nn.parallel.DistributedDataParallel)):
return model.module
else:
return model |
def torch_distributed_zero_first(*args, **kwargs):
requires_backends(torch_distributed_zero_first, ['torch']) |
class SectionHeaderTagger(Tagger):
def __init__(self, header_dict=None, stop_headers=None, max_token_len=6):
self.stop_headers = ({} if (not stop_headers) else stop_headers)
self.header_dict = ({} if (not header_dict) else {'headers': header_dict})
self.max_token_len = max_token_len
... |
def install(subcommand='checkout', branch=None, name=None, prefix=None, channels=('pytorch-nightly',), override_channels=False, logger=None):
global LOGGER
logger = (logger or LOGGER)
(deps, pytorch, platform, existing_env, env_opts) = conda_solve(name=name, prefix=prefix, channels=channels, override_channe... |
def test_integration_post_dominator_tree(conditional_jump_example_bytecode):
control_flow_graph = CFG.from_bytecode(conditional_jump_example_bytecode)
post_dominator_tree = DominatorTree.compute_post_dominator_tree(control_flow_graph)
dot_representation = post_dominator_tree.dot
graph = 'strict digraph ... |
class Speech2Text2Processor(ProcessorMixin):
feature_extractor_class = 'AutoFeatureExtractor'
tokenizer_class = 'Speech2Text2Tokenizer'
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
sel... |
class Unet_bn(object):
def __init__(self, img_channels=3, truth_channels=3, cost='mean_squared_error', cost_kwargs={}, **kwargs):
tf.reset_default_graph()
self.summaries = kwargs.get('summaries', True)
self.img_channels = img_channels
self.truth_channels = truth_channels
self... |
class FocalNetConfig(BackboneConfigMixin, PretrainedConfig):
model_type = 'focalnet'
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, use_conv_embed=False, hidden_sizes=[192, 384, 768, 768], depths=[2, 2, 6, 2], focal_levels=[2, 2, 2, 2], focal_windows=[3, 3, 3, 3], hidden_act='gel... |
def cauchy(omega, lambd):
cauchy_dot = (lambda _omega: (1.0 / (_omega - lambd)).sum())
return jax.vmap(cauchy_dot)(omega) |
def _transform_hms(result_str: str, hms_token: str, ispm: bool, hms_value: int) -> str:
result = deepcopy(result_str)
if (hms_token != ''):
if (hms_value == (- 1)):
if (len(hms_token) == 2):
result = result.replace(hms_token, '--')
elif (len(hms_token) == 1):
... |
def main():
opts_dict = {'radius': 3, 'stdf': {'in_nc': 1, 'out_nc': 64, 'nf': 32, 'nb': 3, 'base_ks': 3, 'deform_ks': 3}, 'qenet': {'in_nc': 64, 'out_nc': 1, 'nf': 48, 'nb': 8, 'base_ks': 3}}
model = MFVQE(opts_dict=opts_dict)
msg = f'loading model {ckp_path}...'
print(msg)
checkpoint = torch.load(... |
def test_MemoryWithRandomCoherenceTime__schedule_expiration():
NUM_TRIALS = 200
coherence_period_avg = 1
coherence_period_stdev = 0.15
tl = Timeline()
mem = MemoryWithRandomCoherenceTime('mem', tl, fidelity=1, frequency=0, efficiency=1, coherence_time=coherence_period_avg, coherence_time_stdev=coher... |
def _play_with_pyaudio(seg):
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(seg.sample_width), channels=seg.channels, rate=seg.frame_rate, output=True)
try:
for chunk in make_chunks(seg, 500):
stream.write(chunk._data)
finally:
stream.... |
def generate_field_end_methods(byte_array, template):
s = StringIO()
offset = 0
for chunk in template.chunks:
offset += len(chunk)
if isinstance(chunk, Field):
s.write((' pub const %s_END : usize = %d;\n' % (chunk.name.upper(), offset)))
return s.getvalue() |
def test_extract_entities_from_subfolder(dataset):
entities = extract_entities_from_subfolder('sample', dataset)
assert (len(entities) == 1)
assert (len(entities['1-p']) == 1)
assert (len(entities['1-p']['1.39-s']) == 39)
assert (entities['1-p']['1.39-s']['1.1-seg'] == EXPECTED_TOKENS[0])
assert... |
class HfApi():
def __init__(self, endpoint=None):
self.endpoint = (endpoint if (endpoint is not None) else ENDPOINT)
def login(self, username: str, password: str) -> str:
path = '{}/api/login'.format(self.endpoint)
r = requests.post(path, json={'username': username, 'password': password}... |
_task('semisupervised_translation')
class SemisupervisedTranslationTask(MultilingualTranslationTask):
def add_args(parser):
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--lambda-parallel-config', default='1.0', type=str, metavar='CONFIG', help='cross-entropy reconstruction coeff... |
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu=True):
super(MLP, self).__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.l... |
def _impl(array, list_to32, string_to32, bytestring_to32, emptyarray_to, categorical_as_dictionary, extensionarray, count_nulls):
from awkward._connect.pyarrow import direct_Content_subclass, pyarrow
layout = ak.operations.to_layout(array, allow_record=True, primitive_policy='error')
if isinstance(layout, a... |
def boxbar(height, bar, ranges=[0.02, 0.08], threshold=[0.05, 0.06]):
width = 15
box = np.zeros((height, width, 3), np.uint8)
h = level_height(bar, ranges)
(x1, y1) = (0, int(((1 - h) * height)))
(x2, y2) = (int(width), int(height))
cv2.rectangle(box, (x1, y1), (x2, y2), (0, 1, 0), (- 1))
fo... |
.parametrize('type_, result', [(str, 2), (int, 2), (float, 1), (bytes, 2)])
def test_collect_constants(type_, result, fixture_dir):
constants = collect_static_constants(fixture_dir)
assert (len(constants.get_all_constants_for(type_)) == result) |
class PingClient(AppConfig):
def __init__(self, server_ip: str='192.168.64.1') -> None:
super().__init__()
self.server_ip = server_ip
def run_cmds(self, node: NodeConfig) -> tp.List[str]:
return [f'ping {self.server_ip} -c 10'] |
_torch
class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = ((AlbertModel, AlbertForMaskedLM) if is_torch_available() else ())
class AlbertModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=Tru... |
class CharacterTable(object):
def __init__(self, chars, maxlen):
self.chars = sorted(set(chars))
self.char_indices = dict(((c, i) for (i, c) in enumerate(self.chars)))
self.indices_char = dict(((i, c) for (i, c) in enumerate(self.chars)))
self.maxlen = maxlen
def encode(self, C, ... |
class ReplayBuffer(object, metaclass=abc.ABCMeta):
def add_sample(self, observation, action, reward, next_observation, terminal, **kwargs):
pass
def terminate_episode(self):
pass
def num_steps_can_sample(self, **kwargs):
pass
def add_path(self, path):
for (i, (obs, action... |
.parametrize('x_star,expected_ids', (([[0.25], [0.1], [0.09], [0.51], [0.05]], [0, 3, 3]), ([[0.25], [0.24], [0.25], [0.01], [0.25]], [0, 1, 2, 4]), ([[0.1], [0.2], [0.3], [0.4], [0.0]], [1, 2, 3, 3])))
.qhsri
def test_pareto_sample_diverse_subset_choose_batch_with_repeats(x_star: list[list[float]], expected_ids: list[... |
def is_prediction_correct(trainer: Trainer, model: torch.nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])]) -> bool:
(preds, label_ids, step_eval_loss) = predict(trainer=trainer, model=model, inputs=inputs)
if (preds.shape[0] != 1):
raise ValueError('This function only works on instances.')
... |
class MinSegmentTree(SegmentTree):
def __init__(self, capacity: int):
super(MinSegmentTree, self).__init__(capacity=capacity, operation=min, init_value=float('inf'))
def min(self, start: int=0, end: int=0) -> float:
return super(MinSegmentTree, self).operate(start, end) |
def transform_tweet_nopadding(dictionary, words):
data = list()
unk_count = 0
for word in words:
if (word in dictionary):
index = dictionary[word]
else:
index = 0
unk_count += 1
data.append(index)
return data |
def delete_non_hyperparameters(cfg: OmegaConf) -> dict:
hyperparameters = OmegaConf.to_container(cfg)
for key in non_hyperparameters:
if (key in hyperparameters):
del hyperparameters[key]
return hyperparameters |
class TestThread(object):
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty(((len(self.seeds),) + sz))
out2 = np.empty(((len(self.seeds),) + sz))
t = [Thread(target=function, args=(np.random.RandomS... |
def sample_episode_performance(policy, env: Union[(GCSLToGym, offline_env.OfflineEnv)], env_name: str, max_episode_steps: int, traj_samples: int=2000, kitchen_subtask: str='all') -> np.ndarray:
if (env_name[:7] == 'kitchen'):
if (kitchen_subtask == 'dynamic'):
return sample_cumulative_reward(pol... |
def create_modal(modal_id, header, content, content_id, button_id):
modal = html.Div([dbc.Modal([dbc.ModalHeader(dbc.ModalTitle(header)), dbc.ModalBody(content, id=content_id), dbc.ModalFooter(dbc.Button('Close', id=button_id, className='ml-auto', n_clicks=0))], id=modal_id, is_open=False)])
return modal |
def test_knn_adwin():
stream = ConceptDriftStream(stream=SEAGenerator(random_state=1), drift_stream=SEAGenerator(random_state=2, classification_function=2), random_state=1, position=250, width=10)
learner = KNNADWINClassifier(n_neighbors=8, leaf_size=40, max_window_size=200)
cnt = 0
max_samples = 1000
... |
class ModularAbelianVariety_newform(ModularAbelianVariety_modsym_abstract):
def __init__(self, f, internal_name=False):
if (not isinstance(f, Newform)):
raise TypeError('f must be a newform')
if (f.weight() != 2):
raise TypeError('f must have weight 2')
self.__f = f
... |
def subsample(samples, n=1000):
selected_idxes = list(range(len(samples)))
random.shuffle(selected_idxes)
selected_idxes = selected_idxes[:n]
return [samples[i] for i in sorted(selected_idxes)] |
def load_bert(config: Config) -> Tuple[(AutoModel, AutoTokenizer)]:
logger.debug(f'Loading {config.bert_model}...')
base_bert_name = config.bert_model.split('/')[(- 1)]
tokenizer_kwargs = config.tokenizer_kwargs.get(base_bert_name, {})
if tokenizer_kwargs:
logger.debug(f'Using tokenizer kwargs: ... |
def generate_adversaries(image_tensor, model, true_class_index):
delta = torch.zeros_like(image_tensor, requires_grad=True)
optimizer = opt = torch.optim.Adam([delta], lr=0.001)
losses = []
for t in range(ITERATIONS):
inp = torch.clamp((image_tensor + delta), (- 1), 1)
(logits, _) = mode... |
def CheckForCopyright(filename, lines, error):
for line in xrange(1, min(len(lines), 11)):
if _RE_COPYRIGHT.search(lines[line], re.I):
error(filename, 0, 'legal/copyright', 5, 'Copyright message found. You should not include a copyright line.') |
def t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream():
return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': Fals... |
def sigmoid_cross_entropy_with_logits_with_log_D_trick(x, z):
return ((- ((2 * z) - 1.0)) * np.log(sigmoid(x))) |
def shufflenet_v2_x0_5(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ShuffleNetV2(num_classes, loss, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['shufflenetv2_x0.5'])
return model |
def get_model_opt(hid_layers, dropout=0.0):
base_model = get_layers(hid_layers, dropout=dropout)
augmented_model = ExpectedGradientsModel(base_model.cuda(), refset)
optimizer = torch.optim.Adam(augmented_model.parameters(), lr=learning_rate)
return (augmented_model, optimizer) |
def calculate_gain(nonlinearity, param=None):
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if ((nonlinearity in linear_fns) or (nonlinearity == 'sigmoid')):
return 1
elif (nonlinearity == 'tanh'):
return (5.0 / 3)
elif ... |
def test_da_head():
inputs = [torch.randn(1, 16, 23, 23)]
head = DAHead(in_channels=16, channels=8, num_classes=19, pam_channels=8)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (isinstance(outputs, tuple) and (len(outputs) == 3))
for ... |
def moltosvg_interaction_known(mol, atom_list, atom_predictions, molecule_prediction, molecule_experiment, max_atom_pred, min_atom_pred, Number):
note = ((((('(' + str(Number)) + ") y-y' : ") + str(round(molecule_experiment, 2))) + '-') + str(round(molecule_prediction, 2)))
norm = matplotlib.colors.Normalize(vm... |
def ParseArgs():
Args = argparse.ArgumentParser(description='Parser to parse vulnerability result file into JSON')
Args.add_argument('--src', required=True, help='result file absolute path to parse')
Args.add_argument('--dst', required=True, help='output file absolute path to generate JSON file from result'... |
def build_and_train():
affinity = dict(cuda_idx=None, workers_cpus=list(range(15)))
sampler = CpuSampler(EnvCls=_make_env, env_kwargs=dict(rank=0), batch_T=6000, batch_B=20)
algo = SAC(bootstrap_timelimit=False)
agent = SacAgent()
runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, n_steps... |
class CTCOpsTest(test_util.TestCase):
def verify_cost(self, device_option, is_test, skip_input_lengths=False):
alphabet_size = 5
N = 1
T = 2
inputs = np.asarray([[[0.1, 0.6, 0.1, 0.1, 0.1]], [[0.1, 0.1, 0.6, 0.1, 0.1]]]).reshape(T, N, alphabet_size).astype(np.float32)
labels ... |
class ChamferDistanceL2(torch.nn.Module):
def __init__(self, ignore_zeros=False):
super().__init__()
self.ignore_zeros = ignore_zeros
def forward(self, xyz1, xyz2):
batch_size = xyz1.size(0)
if ((batch_size == 1) and self.ignore_zeros):
non_zeros1 = torch.sum(xyz1, di... |
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, 'ctc', False):
self.dictionary.add_symbol('<ctc_blank>')
self.tgt_dict = self.dictionary
def target_dictionary(self):
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.