code stringlengths 101 5.91M |
|---|
class ScheduleInitTest(unittest.TestCase):
m = (torch.nn.Linear(50, 50) if is_torch_available() else None)
optimizer = (AdamW(m.parameters(), lr=10.0) if is_torch_available() else None)
num_steps = 10
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
... |
_quantizer(quantization_target=QuantizationTarget.Activation, quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC], identifier=TrainingMethod.LSQ)
class LSQActivationQATQuantizer(BasePytorchQATTrainableQuantizer):
def __init__(self, quantization_config: TrainableQuantizerActivationCon... |
class IQL(nn.Module):
def __init__(self, qf, vf, policy, max_steps, tau, alpha, value_lr=0.0001, policy_lr=0.0001, discount=0.99, beta=0.005):
super().__init__()
self.qf = qf.to(DEFAULT_DEVICE)
self.q_target = copy.deepcopy(qf).requires_grad_(False).to(DEFAULT_DEVICE)
self.vf = vf.to... |
def remove_attributes(obj, target_attr):
lines = obj.split(os.linesep)
target_idx = None
for (idx, line) in enumerate(lines):
if line.lstrip().startswith(f'{target_attr} = '):
target_idx = idx
break
elif line.lstrip().startswith(f'def {target_attr}('):
tar... |
def _add_object_output(scene):
result_socket = scene.node_tree.nodes['Render Layers'].outputs['Image']
outnode = scene.node_tree.nodes.new('CompositorNodeOutputFile')
outnode.name = 'Object File Output'
scene.node_tree.links.new(result_socket, outnode.inputs['Image']) |
(_reducers.Count)
class Count(JAXReducer):
name: Final = 'count'
preferred_dtype: Final = np.float64
needs_position: Final = False
def from_kernel_reducer(cls, reducer: Reducer) -> Self:
assert isinstance(reducer, _reducers.Count)
return cls()
def _return_dtype(cls, given_dtype):
... |
def truncate_class_name(class_name: str) -> str:
string_mapper = {'noise': 'noise', 'human.pedestrian.adult': 'adult', 'human.pedestrian.child': 'child', 'human.pedestrian.wheelchair': 'wheelchair', 'human.pedestrian.stroller': 'stroller', 'human.pedestrian.personal_mobility': 'p.mobility', 'human.pedestrian.police... |
class Voc2007Cfg(VocCfg):
variant: str = '2007'
splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(split_filename='VOC2007/ImageSets/Main/train.txt', ann_filename='VOC2007/Annotations/%s.xml', img_dir='VOC2007/JPEGImages'), val=dict(split_filename='VOC2007/ImageSets/Main/val.txt', ann_f... |
class SplinterPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class Encoder(nn.Module):
def __init__(self, layer, layer_size, N, tie_layers=False):
super(Encoder, self).__init__()
if tie_layers:
self.layer = layer()
self.layers = [self.layer for _ in range(N)]
else:
self.layers = clones(layer, N)
self.norm = ... |
def test_integerindex_null_more():
f = ak.highlevel.Array([[0, None, 2], None, [3, 4], []]).layout
g1 = ak.highlevel.Array([[1, 2, None], None, [], [None]]).layout
g2 = ak.highlevel.Array([[], None, None, []]).layout
g3 = ak.highlevel.Array([[], [], [], []]).layout
assert (to_list(f[g1]) == [[None, ... |
class TestRouge(unittest.TestCase):
def setUp(self):
self.evaluator = rouge.TimelineRougeEvaluator()
self.ground_truth = timelines.GroundTruth([timelines.Timeline({datetime.date(2010, 1, 1): ['timeline summarization .'], datetime.date(2010, 1, 2): ['timeline summarization is awesome .', 'coreference... |
def read_config(method: str, config: Optional[str]) -> dict:
if (config is None):
return {}
with open(METHODS_CONFIGS_JSON, 'r', encoding='utf-8') as file:
all_configs = json.load(file)
if (method not in all_configs):
raise ValueError(f'No available config for {method} in {str(METHOD... |
def embed_texts(path, texts, tokenized_texts, vocab):
model = SentenceTransformer('clip-ViT-B-32')
(texts_s, texts_w, lengths) = ([], [], [])
for text in tqdm(texts):
try:
e = model.encode(text)
except:
e = model.encode('.'.join(text.split('.')[:(- 2)]))
texts... |
def main(cfg, comet=False):
cfg = Config(cfg)
if comet:
experiment = Experiment(api_key=cfg.api_key, project_name=cfg.project_name, workspace=cfg.workspace)
experiment.log_parameters(cfg)
else:
experiment = None
device = (torch.device(f'cuda:{cfg.gpu_id}') if (torch.cuda.is_avail... |
class TakeTrayOutOfOven(Task):
def init_task(self) -> None:
success_detector = ProximitySensor('success')
tray = Shape('tray')
self.register_graspable_objects([tray])
self.register_success_conditions([DetectedCondition(tray, success_detector, negated=True), NothingGrasped(self.robot.... |
def get_top_attrs(attributes, k):
attr_to_asins = defaultdict(list)
for (asin, attr_scores) in attributes.items():
top_attr_scoress = attr_scores[:k]
for (attr, score) in top_attr_scoress:
attr_to_asins[attr].append(asin)
total = len([asin for (asin, _) in attributes.items()])
... |
def download_pretrained_from_hf(model_id: str, filename: str='open_clip_pytorch_model.bin', revision=None, cache_dir: Union[(str, None)]=None):
has_hf_hub(True)
cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)
return cached_file |
(**njit_dict_no_parallel)
def get_index(value, array):
if (value <= array[0]):
return 0
elif (value > array[(- 1)]):
return (len(array) - 1)
i = 0
while (value > array[(i + 1)]):
i += 1
return i |
def main():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='config/semseg_nuscenes.yaml', help='specify the config for training')
parser.add_argument('--resume_path', type=str, default=None, help='provide a path to resume an incomplete training... |
def train(epoch, model, dataloader, optimizer, lr_scheduler, cfg, logger, writer):
model.train()
iter_time = AverageMeter()
data_time = AverageMeter()
meter_dict = {}
end = time.time()
if ((dataloader.sampler is not None) and cfg.dist):
dataloader.sampler.set_epoch(epoch)
for (i, bat... |
def getembeddings(srcpath, trgpath, compath, cutoff=50000):
ts = '/home/15CS10013/important-sai/ts12'
tsdata = (ts + '/tsdata')
compath = (tsdata + '/fk.lower.vec')
srcpath = (tsdata + '/fkdifficpart.lower.vec.id')
trgpath = (tsdata + '/fkeasypart.lower.vec.id')
vocabcom = data.read_embeddings(o... |
def remove_prefixes_summary(summary):
pat_period_line_break = '.*(\\.(\\n\\t)+[ ]?(\\n\\t)*).*'
if re.match(pat_period_line_break, summary):
to_replace = re.match(pat_period_line_break, summary).group(1)
summary = summary.replace(to_replace, '. ')
pat_line_break = '.*((\\n\\t)+[ ]?(\\n\\t)*)... |
class AlisaTaksStatus(Enum):
ALISA_TASK_WAITING = 1
ALISA_TASK_RUNNING = 2
ALISA_TASK_COMPLETED = 3
ALISA_TASK_ERROR = 4
ALISA_TASK_FAILOVER = 5
ALISA_TASK_KILLED = 6
ALISA_TASK_RERUN = 8
ALISA_TASK_EXPIRED = 9
ALISA_TASK_ALISA_RERUN = 10
ALISA_TASK_ALLOCATE = 11 |
_model('model_parallel_transformer')
class ModelParallelTransformerModel(TransformerModel):
def build_embedding(cls, args, dictionary, embed_dim, path=None):
if (not has_megatron_submodule):
raise ImportError('\n\nPlease install the megatron submodule:\n\n git submodule update --init fairseq/mo... |
def test_branch_coverage_no_branch(subject_properties_mock, trace_mock):
subject_properties_mock.existing_predicates[0] = MagicMock(PredicateMetaData)
assert (ff.compute_branch_coverage(trace_mock, subject_properties_mock) == 0.0) |
_metric
def fid50k(opts):
opts.dataset_kwargs.update(max_size=None)
fid = frechet_inception_distance.compute_fid(opts, max_real=50000, num_gen=50000)
return dict(fid50k=fid) |
def get_gpu_info(run_lambda):
if ((get_platform() == 'darwin') or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and (torch.version.hip is not None))):
if (TORCH_AVAILABLE and torch.cuda.is_available()):
return torch.cuda.get_device_name(None)
return None
smi = get_nvidia_smi()
... |
class SegmentationBase(Dataset):
def __init__(self, data_csv, data_root, segmentation_root, size=None, random_crop=False, interpolation='bicubic', n_labels=182, shift_segmentation=False):
self.n_labels = n_labels
self.shift_segmentation = shift_segmentation
self.data_csv = data_csv
s... |
def read_array(path):
with open(path, 'rb') as fid:
(width, height, channels) = np.genfromtxt(fid, delimiter='&', max_rows=1, usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if (byte == b'&'):
num_delimite... |
def _group_normalization_v1(x, beta, gamma, num_groups, channel_axis=1, batch_axis=0, eps=1e-05, output_stat=False):
_check_axis(len(x.shape), channel_axis)
cdim = x.shape[channel_axis]
if ((cdim % num_groups) > 0):
raise ValueError('Channel dim ({}) must be integer multiple of num_groups ({}).'.for... |
def make_read_row():
sdfg = SDFG('spmv_read_row')
begin = sdfg.add_state('begin')
entry = sdfg.add_state('entry')
end = sdfg.add_state('end')
body = sdfg.add_state('body')
sdfg.add_edge(begin, entry, InterstateEdge(assignments={'h': '0'}))
sdfg.add_edge(entry, body, InterstateEdge(condition=... |
def paint_mouse_ball():
mouse = window.get_cursor_pos()
mouse_circle[0] = ti.Vector([mouse[0], mouse[1]])
ball_circle[0] = ball_pos |
def register_Ns3PcapFileWrapper_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('Fail', 'bool', [], is_const=True)
cls.add_method('Eof', 'bool', [], is_const=True)
cls.add_method('Clear', 'void', [])
cls.add_method(... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('ishape, index, oshape', [((2,), [[1, 3]], (10,)), ((2,), [[(- 1), (- 3)]], (10,)), ((3,), [[1, 1, 0], [0, 1, 0]], (2, 2)), ((4,), [[4, 3, 1, 7]], (8,)), ((2, 4), [[0, 1], [2, 3]], (4, 4, 4)), ((2, 4, 4), [[0, 2]], (4, 4, 4)), ((2, 2, 2), [[0... |
def get_dataloader(args, unit_batch=False, no_randomness=False):
if unit_batch:
bsz = (1, 1)
else:
bsz = (args.batch_size_train, args.batch_size_test)
if no_randomness:
enable_shuffle = False
else:
enable_shuffle = True
if (args.dataset.lower() == 'mnist'):
tr... |
def get_kmer_list(k, upto, alphabet):
if upto:
k_list = list(range(1, (k + 1)))
else:
k_list = list(range(k, (k + 1)))
kmer_list = make_upto_kmer_list(k_list, alphabet)
return kmer_list |
class GPT2Tokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|end... |
def normalize_path(filename):
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename)))) |
class PATM(nn.Module):
def __init__(self, dim):
super().__init__()
self.fc_h = nn.Conv2d(dim, dim, 1, bias=False)
self.fc_w = nn.Conv2d(dim, dim, 1, bias=False)
self.fc_c = nn.Conv2d(dim, dim, 1, bias=False)
self.tfc_h = nn.Conv2d((2 * dim), dim, (1, 7), 1, (0, (7 // 2)), gro... |
def main(learning_rate=0.0005, batch_size=20, epochs=10, train_url='train-clean-100', test_url='test-clean', experiment=Experiment(api_key='dummy_key', disabled=True)):
hparams = {'n_cnn_layers': 2, 'n_rnn_layers': 2, 'rnn_dim': 512, 'n_class': 29, 'n_feats': 128, 'stride': 2, 'dropout': 0.5, 'learning_rate': learn... |
def parse_args():
parser = argparse.ArgumentParser(description='Convert Cityscapes annotations to TrainIds')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
pars... |
.expansion
class ExpandSolveMKL(ExpandTransformation):
environments = [blas_environments.intel_mkl.IntelMKL]
def expansion(node, parent_state, parent_sdfg, **kwargs):
return _make_sdfg_getrs(node, parent_state, parent_sdfg, 'MKL') |
class OurRLAlgorithm(BaseRLAlgorithm, metaclass=abc.ABCMeta):
def __init__(self, trainer, exploration_env, evaluation_env, exploration_data_collector: PathCollector, evaluation_data_collector: PathCollector, offline_replay_buffer: ReplayBuffer, online_replay_buffer: ReplayBuffer, priority_replay_buffer: ReplayBuffe... |
def gen_colormap():
global color_mapping
with open(config_fn) as config_file:
config = json.load(config_file)
config_labels = config['labels']
colormap = []
id2name = {}
for i in range(0, len(config_labels)):
colormap = (colormap + config_labels[i]['color'])
id2name[i] = ... |
def extras(cfg: DictConfig) -> None:
if (not cfg.get('extras')):
log.warning('Extras config not found! <cfg.extras=null>')
return
if cfg.extras.get('ignore_warnings'):
log.info('Disabling python warnings! <cfg.extras.ignore_warnings=True>')
warnings.filterwarnings('ignore')
i... |
def model_load(framework, text_type, text_rep):
(audio_embs, msdid) = pre_extract_audio_embedding(framework, text_type, text_rep)
(model, tokenizer, config) = get_model(framework=framework, text_type=text_type, text_rep=text_rep)
return (model, audio_embs, tokenizer, msdid) |
def test_line_intersect():
assert (line_intersect((0, 0), (0, 1), (0, 0), (1, 0))[:2] == (0, 0))
assert (line_intersect((0, 0), (0, 1), (0, 0), (0, 1))[2] == 0)
assert (ray_segment_intersect(ray=((0, 0), 0), segment=((1, (- 1)), (1, 1))) == (1, 0))
assert (ray_segment_intersect(ray=((0, 0), math.pi), se... |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm... |
class DataFrame(object):
def __init__(self, columns, data):
assert (len(columns) == len(data)), 'columns length does not match data length'
lengths = [mat.shape[0] for mat in data]
assert (len(set(lengths)) == 1), 'all matrices in data must have same first dimension'
self.length = le... |
class Amber():
def __init__(self, types, specs=None):
self.type_dict = types
self.is_built = False
self.model_space = None
self.controller = None
self.model_fn = None
self.knowledge_fn = None
self.reward_fn = None
self.manager = None
self.env =... |
def test_metadata_routing_add():
router = MetadataRouter(owner='test').add(method_mapping='fit', est=ConsumingRegressor().set_fit_request(sample_weight='weights'))
assert (str(router) == "{'est': {'mapping': [{'callee': 'fit', 'caller': 'fit'}], 'router': {'fit': {'sample_weight': 'weights', 'metadata': None}, ... |
def cnn_with_max_pooling(input_var, filter_dims, num_filters, strides, name, pool_shapes, pool_strides, padding, hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer()):
pool_strides = [1, pool_strides[0], pool_strides[1], 1]
pool_shapes = [1, pool_sh... |
def ListsToTensor(xs, vocab=None):
max_len = max((len(x) for x in xs))
ys = []
for x in xs:
if (vocab is not None):
y = (vocab.token2idx(x) + ([vocab.padding_idx] * (max_len - len(x))))
else:
y = (x + ([0] * (max_len - len(x))))
ys.append(y)
data = torch.L... |
(config_path=None, config_name='config')
def pretrain(cfg: PretrainConfig) -> None:
dist.init_process_group(backend='nccl', init_method='env://')
device_id = (dist.get_rank() % torch.cuda.device_count())
(is_rank_zero, rank, world_size) = ((dist.get_rank() == 0), dist.get_rank(), dist.get_world_size())
... |
class TokenClassifierOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None |
class FilterModel(PretrainedModel):
def __init__(self, model_class, model_name_or_path, config, cache_dir, dim=768, side_dim=32):
super(FilterModel, self).__init__()
self.base = model_class.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=config, cache_dir=(c... |
def main(opts):
if (not os.path.exists(opts.save_path)):
os.makedirs(opts.save_path)
out_filepath = os.path.join(opts.save_path, opts.out_file)
if (os.path.splitext(out_filepath)[1] != '.tfrecords'):
out_filepath += '.tfrecords'
else:
(out_filename, ext) = os.path.splitext(out_fi... |
class DataLoader():
def __init__(self, input_src, batch_size, args, vocab=None, evaluation=False, conll_only=False, skip=None):
self.batch_size = batch_size
self.args = args
self.eval = evaluation
self.shuffled = (not self.eval)
if isinstance(input_src, str):
file... |
def train(opt):
opt.use_att = utils.if_use_att(opt)
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
tf_summary_writer = (tf and SummaryWriter(opt.checkpoint_path))
infos = {}
histories = {}
if (opt.start_from is not None):
with open(... |
def _process_example(args):
(example_index, example) = args
example.question_text = example.question_text.replace('\n', ' ')
example.context_text = example.context_text.replace('\n', ' ')
tokenizer = params.tokenizer
def tokenize(text, add_prefix_space=False):
text = text.rstrip()
if... |
def normal_init(module, mean=0, std=1, bias=0):
nn.init.normal_(module.weight, mean, std)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias) |
class ConjugateGradientOptimizer():
def __init__(self, cg_iters=10, reg_coeff=1e-05, subsample_factor=1.0, backtrack_ratio=0.8, max_backtracks=15, accept_violation=False, hvp_approach=None, num_slices=1):
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsampl... |
class RPCServer(AppConfig):
def __init__(self) -> None:
super().__init__()
self.port = 1234
self.threads = 1
self.max_flows = 1234
self.max_bytes = 1024
def run_cmds(self, node: NodeConfig) -> tp.List[str]:
exe = ('echoserver_linux' if (not isinstance(node, MtcpNo... |
class Nag(GradientOptimizer):
def __init__(self, objective: OptimizationFunction, parametrization: Parametrization, learning_rate: float=0.01, gamma: float=0.9):
super().__init__()
self.alpha = learning_rate
self.objective = objective
self.param = parametrization
self.gamma =... |
class SkyplaneCLI():
def __init__(self, src_region_tag: str, dst_region_tag: str, args: Dict[(str, Any)], skyplane_config: Optional[SkyplaneConfig]=None):
(self.src_region_tag, self.dst_region_tag) = (src_region_tag, dst_region_tag)
self.args = args
(self.aws_config, self.azure_config, self.... |
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if (len(set_x) != 1):
raise Exception('All input arrays (x) should have the same number of samples.')
set_y = set(y_lengths... |
class NodeInstanceFilter(NodeFilter):
def __init__(self, node: Node):
self.node = node
def filter(self, node: Node):
return (node.id == self.node.id) |
def wgan_discriminator(batch_local, batch_global, d_cnum, mask=None, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse):
dlocal = wgan_local_discriminator(batch_local, d_cnum, reuse=reuse)
dglobal = wgan_global_discriminator(batch_global, d_cnum, reuse=reuse)
dout_local = tf.... |
class AdapterBertTransformerEncoderLayer(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.embed_dim = args.encoder_embed_dim
self.quant_noise = getattr(args, 'quant_noise_pq', 0)
self.quant_noise_block_size = (getattr(args, 'quant_noise_pq_block_... |
def evaluate_all_datasets(arch: Text, datasets: List[Text], xpaths: List[Text], splits: List[Text], config_path: Text, seed: int, raw_arch_config, workers, logger):
(machine_info, raw_arch_config) = (get_machine_info(), deepcopy(raw_arch_config))
all_infos = {'info': machine_info}
all_dataset_keys = []
... |
def assert_hf_src_format(src):
assert isinstance(src, dict)
dict_keys = list(src.keys())
assert all((isinstance(src[k], list) for k in dict_keys)), f'expected dict of lists, got: {[(k, type(src[k])) for k in dict_keys]}'
assert all(((len(src[k]) == len(src[dict_keys[0]])) for k in dict_keys)), f'expecte... |
def test_field_replace_var(field_mock):
var = vr.VariableReference(MagicMock(), int)
var_2 = vr.VariableReference(MagicMock(), int)
ref = vr.FieldReference(var, field_mock)
ref.replace_variable_reference(var, var_2)
assert (ref.source == var_2) |
def get_shape_from_obs_space(obs_space):
if (obs_space.__class__.__name__ == 'Box'):
obs_shape = obs_space.shape
elif (obs_space.__class__.__name__ == 'list'):
obs_shape = obs_space
elif (obs_space.__class__.__name__ == 'Dict'):
obs_shape = obs_space.spaces
else:
raise No... |
def add_ifc_config(cfg):
cfg.MODEL.IFC = CN()
cfg.MODEL.IFC.NUM_CLASSES = 80
cfg.INPUT.SAMPLING_FRAME_NUM = 5
cfg.INPUT.SAMPLING_FRAME_RANGE = 20
cfg.INPUT.SAMPLING_FRAME_SHUFFLE = False
cfg.INPUT.AUGMENTATIONS = []
cfg.MODEL.IFC.MASK_WEIGHT = 3.0
cfg.MODEL.IFC.DICE_WEIGHT = 3.0
cfg.... |
class BlobProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BLOBPROTO |
def test_json_memory_get(config: Config, memory_item: MemoryItem, mock_get_embedding):
index = JSONFileMemory(config)
assert (index.get('test') == None), 'Cannot test get() because initial index is not empty'
index.add(memory_item)
retrieved = index.get('test')
assert (retrieved is not None)
ass... |
class TestActivationCheckpointing(unittest.TestCase):
def _test_checkpoint_wrapper(self, device, log_memory_usage=False):
def get_loss_and_gnorm(model):
torch.manual_seed(1)
input = torch.rand(2, 16, 32).requires_grad_(True).to(device)
model.zero_grad()
loss =... |
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def print_network(self):
if isinstance(self, list):
self = self[0]
num_params = 0
for param in self.parameters():
num_params += param.numel()
print(('Network [%s]... |
def emulate_int8_tensor(w, scale=None, zero_point=None, bits=8):
if (scale is None):
obs = torch.quantization.observer.MinMaxObserver()
obs.to(device=w.device)
_ = obs(w)
(scale, zero_point) = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_p... |
def _get_hashed_exception(prefix: str, message: str) -> type[CheckFailed]:
messages_digest = sha1(message.encode('utf-8')).hexdigest()
name = f'{prefix}{messages_digest}'
return get_exception(name) |
def test_empty_like2():
A = np.ndarray([N, M, 2], dtype=np.complex64)
out = empty_like2(A)
assert (list(out.shape) == [2, N, N])
assert (out.dtype == np.complex64) |
def log_subprocess_output(i, p, ckpt_path, tag, start, end):
outfile = os.path.join(ckpt_path, 'test', ('%s_range_%s_%s.stdout' % (tag, start, end)))
logging_rank((('# ' + ('-' * 76)) + ' #'))
logging_rank(('stdout of subprocess %s with range [%s, %s]' % (i, (start + 1), end)))
logging_rank((('# ' + ('-... |
class TorchVisionModel(PretrainedModel):
def __init__(self, model_fn, tasks, model_args):
super(TorchVisionModel, self).__init__()
self.tasks = tasks
self.model_uncertainty = model_args.model_uncertainty
self.model = model_fn(pretrained=model_args.pretrained)
self.pool = nn.A... |
def re_key_value(prefix, key_str: str):
keys = key_str.split(' ')
segs = [(('.*' + prefix) + '.*')]
for key in keys[:(- 1)]:
if (key == ''):
continue
seg = '{}=(?P<{}>\\S+)'.format(key, key)
segs.append(seg)
seg = '{}=(?P<{}>.*)'.format(keys[(- 1)], keys[(- 1)])
s... |
class KitchenEnv(GymEnv):
SUBTASKS = ['microwave', 'kettle', 'slide cabinet', 'hinge cabinet', 'bottom burner', 'light switch', 'top burner']
def __init__(self, *args, **kwargs):
if (args[0]['task'] == 'misaligned'):
self.name = 'kitchen-mlsh-v0'
else:
self.name = 'kitche... |
class GeneralMulAttConvLayer(MessagePassing):
def __init__(self, in_channels, out_channels, improved=False, cached=False, bias=True, **kwargs):
super(GeneralMulAttConvLayer, self).__init__(aggr=cfg.gnn.agg, **kwargs)
self.heads = cfg.gnn.att_heads
self.in_channels = int(((in_channels // self... |
def build_model(args, state_dict):
(train_loader, test_loader, data_shape) = get_dataset(args)
hidden_dims = tuple(map(int, args.dims.split(',')))
strides = tuple(map(int, args.strides.split(',')))
if args.autoencode:
def build_cnf():
autoencoder_diffeq = layers.AutoencoderDiffEqNet(... |
_grad()
def generate_latent_ids(H, ae, train_loader, val_loader=None):
train_latent_ids = generate_latents_from_loader(H, ae, train_loader)
if (val_loader is not None):
val_latent_ids = generate_latents_from_loader(H, ae, val_loader)
else:
val_latent_ids = None
save_latents(H, train_late... |
def isend(tensor, dst, group=group.WORLD, tag=0):
_check_single_tensor(tensor, 'tensor')
if _rank_not_in_group(group):
return
if (group == GroupMember.WORLD):
_check_default_pg()
return _default_pg.send([tensor], dst, tag)
else:
group_dst_rank = _get_group_rank(group, dst... |
def read_predictions(submission_file):
predictions = []
with open(submission_file, 'r') as reader:
for line in reader:
line = line.strip()
if line:
predictions.append(json.loads(line)['prediction'])
return predictions |
def register_Ns3MmWaveMacCschedSapProviderCschedLcReleaseReqParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacCschedSapProvider::CschedLcReleaseReqParameters const &', 'arg0')])
cls.add_instance_attribute('m_logicalChannelIdentity', 'std::vector< unsigne... |
class X3DHead(nn.Module):
def __init__(self, dim_in, dim_inner, dim_out, num_classes, pool_size, dropout_rate=0.0, act_func='softmax', inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d, bn_lin5_on=False):
super(X3DHead, self).__init__()
self.pool_size = pool_size
self.drop... |
def simCreateVisionSensor(options, intParams, floatParams, color):
if (color is None):
color = ffi.NULL
ret = lib.simCreateVisionSensor(options, intParams, floatParams, color)
_check_return(ret)
return ret |
class NodeAttributeSpecification():
def __init__(self):
raise ValueError('this functionality has been removed; please use pandas or sklearn for feature preparation') |
def simInvertMatrix(matrix):
c_matrix = ffi.new('float []', matrix)
ret = lib.simInvertMatrix(c_matrix)
_check_return(ret)
return list(c_matrix) |
class F1Benchmark():
def __init__(self, dataset):
self.dataset = dataset
def eval(self, eval_trackers=None):
if (eval_trackers is None):
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
ret = {}
... |
def count_paths_with_label(fsa: Fsa, num_frames: int, label: str):
(_n, _t, count_blank_sym) = count_all_paths_with_label_in_frame(fsa=fsa, label=label)
n_t = sympy.Symbol('T', integer=True)
t1 = sympy.Symbol('t', integer=True)
count_blank_sym = count_blank_sym.subs(_n, n_t).subs(_t, (t1 - 1)).simplify(... |
class DecisionTreeAadWrapper(AadForest):
def __init__(self, x, y, max_depth=10, score_type=IFOR_SCORE_TYPE_CONST, ensemble_score=ENSEMBLE_SCORE_LINEAR, random_state=None, detector_type=AAD_IFOREST):
Aad.__init__(self, detector_type, ensemble_score, random_state)
self.max_depth = max_depth
se... |
def db():
with open(WIKIDATA_FIXTURE_FILE, 'rb') as f:
data = bz2.compress(f.read())
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(data)
temp_file.flush()
os.fsync(temp_file.fileno())
return InterwikiDB.build(temp_file.name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.