code stringlengths 101 5.91M |
|---|
class T1Dataset(torch.utils.data.Dataset):
def __init__(self, X, y, transform=None):
self.X = X
self.y = y
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
image = io.imread(self.X[idx])
if (self.transform is not Non... |
def simulate_policy():
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=True, skip_frame=3, seed=0, max_episode_length=600)
env = GymEnvWrapper(env)
file = './itr_1097499.pkl'
data = torch.load(file)
agent_state_dict = data['agent_state_dict']
... |
class HasNNaNPred(FunPred):
sig = (FastMathInst,)
code = 'hasNoNaN'
type_constraints = _none |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, nc, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes *... |
def get_model_and_data(data_path, dataset_name, model_name, model_path):
if (dataset_name == 'VOC'):
data_module = VOCDataModule(data_path, test_batch_size=1)
if (model_name == 'vgg16'):
model = VGG16ClassifierModel.load_from_checkpoint(model_path, num_classes=20, dataset=dataset_name)
... |
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
network = input_data(shape=[None, width, height], name='input')
network = tflearn.lstm(network, 128, return_seq=True)
network = tflearn.lstm(network, 128)
network = tflearn.fully_connected(network, 9, activation='softmax')
network = tf... |
def test(epoch):
model.eval()
test_loss = 0
correct = 0
for (data, target) in test_loader:
if args.cuda:
(data, target) = (data.cuda(), target.cuda())
(data, target) = (Variable(data, volatile=True), Variable(target))
output = model(data)
test_loss += F.nll_lo... |
class DoWhileScope(ControlFlowScope):
header: cf.DoWhileScope
def as_string(self, indent: int=0):
header = ((indent * INDENTATION) + 'do:\n')
footer = ((indent * INDENTATION) + f'''while {self.header.test.as_string}
''')
return ((header + super().as_string(indent)) + footer) |
class DistilBertModelTest(CommonTestCases.CommonModelTester):
all_model_classes = ((DistilBertModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DistilBertForSequenceClassification) if is_torch_available() else None)
test_pruning = True
test_torchscript = True
test_resize_embeddings = True
... |
class DocumentState(object):
def __init__(self, key):
self.doc_key = key
self.sentence_end = []
self.token_end = []
self.tokens = []
self.subtokens = []
self.info = []
self.segments = []
self.real_segments = []
self.start_indices = []
s... |
def _check_inputs(laplace_rep_func, p, t, recon_dim, ilt_algorithm, use_sphere_projection, ilt_reconstruction_terms, options):
if (not isinstance(laplace_rep_func, nn.Module)):
raise RuntimeError('laplace_rep_func must be a descendant of torch.nn.Module')
if (not isinstance(p, Tensor)):
raise Ru... |
def write_adversarial_robustness_vnnlib(filename, initial_comment, input_domain, ground_truth, n_classes=10):
with open(filename, 'w') as f:
f.write(f'''; {initial_comment}
''')
f.write('\n')
linearized_domain = input_domain.view((- 1), 2)
for i in range(linearized_domain.shape[0]):
... |
def time_features(dates, timeenc=1, freq='h'):
if (timeenc == 0):
dates['month'] = dates.date.apply((lambda row: row.month), 1)
dates['day'] = dates.date.apply((lambda row: row.day), 1)
dates['weekday'] = dates.date.apply((lambda row: row.weekday()), 1)
dates['hour'] = dates.date.app... |
def test_psi_minus_phi_plus():
for i in range(200):
(k1, k2, k3, k4, a3) = create_scenario(psi_minus, phi_plus, i)
state = correct_order(k1.state, k1.keys)
assert numpy.array_equal(state, psi_minus) |
def output_classification(module_name, immediate_output_dict):
return F.softmax(immediate_output_dict[module_name][0], dim=1) |
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model: int=512, num_heads: int=8, d_ff: int=2048, dropout_p: float=0.3) -> None:
super(TransformerEncoderLayer, self).__init__()
self.attention_prenorm = nn.LayerNorm(d_model)
self.feed_forward_prenorm = nn.LayerNorm(d_model)
... |
def ufunc_add_outer_where2(A: dace.int32[(2, 2, 2, 2, 2)], B: dace.int32[(2, 2, 2, 2, 2)], W: dace.bool_[(2, 1, 2)]):
return np.add.outer(A, B, where=W) |
class FlaxRoFormerModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class DeblurDataset(Dataset):
def __init__(self, path, frames, future_frames, past_frames, crop_size=(256, 256), ds_type='train', centralize=True, normalize=True):
ds_name = 'gopro_ds'
self.datapath_blur = join(path, '{}_{}'.format(ds_name, ds_type))
self.datapath_gt = join(path, '{}_{}_gt'.... |
def warn_once(position, message, level=0):
if ((level < LEVEL) or (message in _warn_once_seen)):
return
warn = CompileWarning(position, message)
line = ('warning: %s\n' % warn)
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
_warn_once_seen[m... |
class SubsetComplementVisDial():
def __init__(self, config):
super().__init__()
self.ndcg = NDCG(is_direct_ranks=True)
self.dense_annotations_jsonpath = config.dense_annotations_jsonpath
self.model_preds_root = config.model_preds_root
self.models_list = self.get_model_type_li... |
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True):
assert (ReduceAddCoalesced is not None), 'Can not use Synchronized Batch Normalization without CUDA support.'
super(_SynchronizedBatchNorm, self).__init__(num_f... |
_utils.test(arch=[ti.opengl, ti.vulkan])
def test_non_dense_snode():
n = 8
x = ti.field(dtype=ti.f32)
y = ti.field(dtype=ti.f32)
blk = ti.root.dense(ti.i, n)
blk.place(x)
blk.dense(ti.i, n).place(y)
with pytest.raises(RuntimeError, match='AOT: only supports dense field'):
m = ti.aot.... |
def scatter_gather(data):
if (not torch.distributed.is_initialized()):
return [data]
synchronize()
rank = torch.distributed.get_rank()
data_to_communicate = torch.empty(256, dtype=torch.uint8, device='cuda')
if (rank == 0):
tmp_dir = tempfile.mkdtemp()
_encode(data_to_communi... |
def get_annotations_from_ann_file(nlp, sentence, ann_file):
event_buffer = {}
span_buffer = {}
label_buffer = {}
argmod_buffer = {}
with open(ann_file) as f:
lines = f.readlines()
for (idx, line) in enumerate(lines):
if line.startswith('E'):
(tradeoff_id, ... |
class SuperTanh(SuperModule):
def __init__(self) -> None:
super(SuperTanh, self).__init__()
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, ... |
class PredictRunner(object):
def __init__(self):
self.args = self.parse_args()
self.predictor = CopyRnnPredictor(model_info=self.args.model_path, vocab_info=self.args.vocab_path, beam_size=self.args.beam_size, max_src_length=self.args.max_src_len, max_target_len=self.args.max_target_len)
sel... |
class Encoder(object):
def __init__(self, cfg):
self.x_dim = [cfg.resolution, cfg.resolution, 1]
self.name = 'encoder_net'
self.has_use = False
self.dim = cfg.e_dim
self.ksize = cfg.e_ksize
self.out_dim = cfg.z_dim
self.viewpoints = cfg.viewpoints
self... |
def require_faiss(test_case):
if (not is_faiss_available()):
return unittest.skip('test requires `faiss`')(test_case)
else:
return test_case |
def goal_publisher():
rospy.init_node((((vehicle_type + '_') + vehicle_id) + '_ego_swarm_goal'))
goal_pub = rospy.Publisher((((vehicle_type + '_') + vehicle_id) + '/move_base_simple/goal'), PoseStamped, queue_size=1)
rate = rospy.Rate(20)
while (not rospy.is_shutdown()):
goal_point = PoseStamped... |
def uniform_int(random_state, lower, upper, number, log_scale=False):
if (not isinstance(lower, int)):
raise ValueError("lower must be of type 'int', got {0} instead".format(type(lower)))
if (not isinstance(upper, int)):
raise ValueError("upper must be of type 'int', got {0} instead".format(type... |
def create_harmonic_hparams(hparams_string=None, verbose=False):
hparams = tf.contrib.training.HParams(type=0, layers=3, blocks=2, dilation_channels=130, residual_channels=130, skip_channels=240, input_channel=60, condition_channel=364, output_channel=240, sample_channel=60, initial_kernel=10, kernel_size=2, bias=T... |
def read_prediction(pred_file):
print('Read prediction from', pred_file)
predictions = []
with open(pred_file) as f:
for line in f:
pred = json.loads(line)
predictions.append(pred)
print('Number of predictions', len(predictions))
return predictions |
class FederatedFlow(FLSpec):
def __init__(self, model=None, optimizer=None, rounds=3, **kwargs):
super().__init__(**kwargs)
if (model is not None):
self.model = model
self.optimizer = optimizer
else:
self.model = Net()
self.optimizer = optim.SG... |
def get_single_vectors_n_masks(word_embeddings, sequence):
sequence_embeddings = word_embeddings({'tokens': {'tokens': sequence['tokens']['tokens']}})
sequence = sequence['tokens']
if ('mask' in sequence):
sequence_mask = sequence['mask'].to(dtype=sequence_embeddings.dtype)
elif (len(sequence['t... |
def _a(alf, bet, i, j):
return ((((((sp.S(2) * (j + alf)) * (j + bet)) / (((((sp.S(2) * j) + alf) + bet) + 1) * (((sp.S(2) * j) + alf) + bet))) * delta((i + 1), j)) - ((((alf ** 2) - (bet ** 2)) / (((((sp.S(2) * j) + alf) + bet) + sp.S(2)) * (((sp.S(2) * j) + alf) + bet))) * delta(i, j))) + ((((sp.S(2) * (j + 1)) *... |
class RectBivariateSpline(BivariateSpline):
def __init__(self, x, y, z, bbox=([None] * 4), kx=3, ky=3, s=0):
(x, y) = (ravel(x), ravel(y))
if (not np.all((diff(x) > 0.0))):
raise ValueError('x must be strictly increasing')
if (not np.all((diff(y) > 0.0))):
raise Value... |
def default_matching_networks_support_encoder(feature_dimension: int) -> nn.Module:
return nn.LSTM(input_size=feature_dimension, hidden_size=feature_dimension, num_layers=1, batch_first=True, bidirectional=True) |
def build_head(cfg):
param = dict()
for key in cfg:
if (key == 'type'):
continue
param[key] = cfg[key]
head = models.head.__dict__[cfg.type](**param)
return head |
def tabulate_events(logdir: str, variables: List[str]) -> pd.DataFrame:
all_runs = list()
count = 0
for run_dir in tqdm(os.listdir(logdir)):
if run_dir.startswith('.'):
continue
if (not os.path.isdir(os.path.join(logdir, run_dir))):
print(run_dir)
continue... |
def add_newline_to_end_of_each_sentence(x: str) -> str:
re.sub('<n>', '', x)
assert NLTK_AVAILABLE, 'nltk must be installed to separate newlines between sentences. (pip install nltk)'
return '\n'.join(nltk.sent_tokenize(x)) |
.tensorflow
def test_pooling():
import tensorflow as tf
from dace.frontend.tensorflow import TFSession
size_in = [1, 112, 112, 3]
np.random.seed(0)
input_tensor = np.random.uniform(size=size_in).astype(np.float32)
input_placeholder = tf.placeholder(tf.float32, size_in)
ksize = [1, 3, 3, 1]
... |
class LinkingVariantConfigNode(ConfigNode):
def __init__(self, parent, linking_variant):
super(LinkingVariantConfigNode, self).__init__(parent, linking_variant)
def get_children(self):
return [DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS] |
(config_path='configs/', config_name='config.yaml')
def main(config: DictConfig):
from src.train import train
from src.utils import utils
utils.extras(config)
if config.get('print_config'):
utils.print_config(config, resolve=True)
return train(config) |
def load_encode_dict(dataset):
if (dataset == 'guacamol'):
encode_dict = {'Br': 'Y', 'Cl': 'X', 'Si': 'A', 'Se': 'Z', '': 'R', 'se': 'E'}
elif (dataset == 'zinc'):
encode_dict = {'Br': 'Y', 'Cl': 'X', 'Si': 'A', '': 'R'}
return encode_dict |
def generation_collate_fn(data, tokenizer):
all_input_ids = []
all_labels = []
for feat in data:
all_input_ids.append(feat.src_input_ids)
all_labels.append(feat.tgt_input_ids)
src_encoded = tokenizer.pad({'input_ids': all_input_ids}, return_tensors='pt')
tgt_encoded = tokenizer.pad({... |
def train(train_loader, model, criterion, optimizer, scaler, epoch, lr_schedule, args):
batch_time = AverageMeter('Time', ':6.2f')
data_time = AverageMeter('Data', ':6.2f')
mem = AverageMeter('Mem (GB)', ':6.1f')
metric_names = models.get_metric_names(args.model)
iters_per_epoch = (len(train_loader)... |
def parse_result(fields):
result = Result()
result.instruction_per_byte = float(fields.pop(0))
assert (fields.pop(0) == 'ins/byte')
fields.pop(0)
fields.pop(0)
result.speed_gbs = float(fields.pop(0))
assert (fields.pop(0) == 'GB/s')
fields.pop(0)
fields.pop(0)
result.instruction_... |
class MaskRCNNLossComputation(object):
def __init__(self, proposal_matcher, discretization_size):
self.proposal_matcher = proposal_matcher
self.discretization_size = discretization_size
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, pro... |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinst... |
(name='batcher', params=[_batcher_bs_100, _batcher_full_batch])
def _batcher_fixture(request: Any) -> DatasetTransformer:
return request.param |
def scale_by_learning_rate(learning_rate: ScalarOrSchedule):
if callable(learning_rate):
return optax.scale_by_schedule((lambda count: (- learning_rate(count))))
return optax.scale((- learning_rate)) |
def run_fn(node_rank: int, ip_list: List[str]) -> Optional[str]:
num_nodes = len(ip_list)
return f''' cd pytorch-distributed-resnet
python3 -m torch.distributed.launch --nproc_per_node=1 --nnodes={num_nodes} --node_rank={node_rank} --master_addr={ip_list[0]} --master_port=8008 resnet_ddp.py --num... |
_module()
class VQAEXDataset(MInstrDataset):
def __init__(self, *args, is_e_dataset: bool, has_annotation=True, **kwargs):
super().__init__(*args, **kwargs, placeholders=(IMAGE_PLACEHOLDER, QUESTION_PLACEHOLDER))
self.has_annotation = has_annotation
self.is_e_dataset = is_e_dataset
def _... |
('/quit', methods=['POST'])
def quit_app():
msg = None
image_url = request.get_json()['image_url']
curr_image_url = request.get_json()['curr_image_url']
image_name = image_url[7:]
if (curr_image_url != 'none'):
curr_image_name = curr_image_url[7:]
src = os.path.join(app.config['temp'... |
def test_ClusterGCN_activations():
(G, _) = create_graph_features()
generator = ClusterNodeGenerator(G)
cluster_gcn = ClusterGCN(layer_sizes=[2], generator=generator, activations=['relu'])
assert (cluster_gcn.activations == ['relu'])
cluster_gcn = ClusterGCN(layer_sizes=[2, 2], generator=generator, ... |
def get_distance(dist, v1, v2):
try:
return dist[(v1, v2)]
except KeyError:
return float('inf') |
def is_manylinux1_compatible():
if (get_platform() not in {'linux_x86_64', 'linux_i686'}):
return False
try:
import _manylinux
return bool(_manylinux.manylinux1_compatible)
except (ImportError, AttributeError):
pass
return glibc.have_compatible_glibc(2, 5) |
class TOMTrainer():
def __init__(self, gen, dis, dataloader_train, dataloader_val, gpu_id, log_freq, save_dir, n_step):
if torch.cuda.is_available():
self.device = torch.device(('cuda:' + str(gpu_id)))
else:
self.device = torch.device('cpu')
self.gen = gen.to(self.dev... |
def duplicate_command(ctx, param_hint):
ctx.obj.options_processed = False
error_strs = []
error_strs.append(('Error: Command %s specified multiple times.' % param_hint))
error_strs.append('The %s command may appear only one time.')
logging.error('\n'.join(error_strs))
raise click.BadParameter(('... |
class ReproducibleRandomSampler(RandomSampler):
def __init__(self, data_source, seed=, epoch=0, **kwargs):
if ('generator' in kwargs):
MSG = ('Cannot give a separate generator when using ' + 'ReproducibleRandomSampler')
raise ValueError(MSG)
super().__init__(data_source, **kw... |
def evaluate(args, data_loader, epoch, model):
total_lsd = 0
total_visqol = 0
lsd_count = 0
visqol_count = 0
total_cnt = 0
total_filenames = []
files_to_log = []
wandb_n_files_to_log = (args.wandb.n_files_to_log if ('wandb' in args) else args.wandb_n_files_to_log)
with torch.no_grad(... |
def resnet_v1_50(inputs, num_classes=None, is_training=True, global_pool=False, output_stride=None, reuse=None, scope='resnet_v1_50'):
blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)]))]
return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, ... |
def ring_network(ring_size: int, lookahead: int, stop_time: int, log_path: str):
tick = time()
if (not os.path.exists(log_path)):
os.mkdir(log_path)
CC_DELAY = .0
MEMO_SIZE = 50
RAW_FIDELITY = 0.9
ATTENUATION = 0.0002
SWAP_DEG_RATE = 1
tl = Timeline(stop_time=stop_time)
route... |
def loadids(test_files):
id_dict = test_files[IDS]
for key in id_dict.keys():
for id in id_dict[key]:
(yield id) |
def apply_mv_norm(features):
if (features.size(0) < 2):
return features
(mean, invstddev) = calc_mean_invstddev(features)
res = ((features - mean) * invstddev)
return res |
def draggable_toolbox(*ids):
(id_m, id_toolbox) = ids
return html.Div(ddrage.GridLayout(id=id_m, clearSavedLayout=True, children=[], verticalCompact=False, layout=[{'i': id_toolbox, 'x': 10, 'y': 5, 'w': 3, 'h': 9, 'isResizable': False}], **draggable_layout), style={'position': 'absolute', 'display': 'none'}) |
def visualize_mask_on_image(img, mask, save_path=None, add_edge=False, dark_background=False):
if (mask.max() > 1):
mask = (mask.astype(np.uint8) // 255)
if (len(mask.shape) == 2):
mask = np.expand_dims(mask, axis=2)
mask = np.tile(mask, (1, 1, 3))
cmap = np.array([255, 117, 44], dty... |
def test_string_primitive_statement_randomize_value(default_test_case):
statement = stmt.StringPrimitiveStatement(default_test_case)
statement.randomize_value()
assert (0 <= len(statement.value) <= config.configuration.test_creation.string_length) |
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if (blob in op.output):
return i
raise ValueError('Failed to find last producer of blob, %s', blob) |
class SchemeHomset_generic(HomsetWithBase):
Element = SchemeMorphism
def __reduce__(self):
return (SchemeHomset, (self.domain(), self.codomain(), self.homset_category(), self.base_ring(), False, False))
def __call__(self, *args, **kwds):
return Set_generic.__call__(self, *args, **kwds)
d... |
class TFBinding(flexs.Landscape):
def __init__(self, landscape_file: str):
super().__init__(name='TF_Binding')
data = pd.read_csv(landscape_file, sep='\t')
score = data['E-score']
norm_score = ((score - score.min()) / (score.max() - score.min()))
self.sequences = dict(zip(dat... |
def _cell_list(unit_type, num_units, num_layers, num_residual_layers, forget_bias, dropout, mode, num_gpus, base_gpu=0, single_cell_fn=None):
if (not single_cell_fn):
single_cell_fn = _single_cell
cell_list = []
for i in range(num_layers):
utils.print_out((' cell %d' % i), new_line=False)
... |
class NuDyckWords(Parent):
Element = NuDyckWord
def __init__(self, nu=()):
Parent.__init__(self, category=FiniteEnumeratedSets())
self._nu = to_word_path(nu)
if (self._nu is None):
raise ValueError('invalid nu supplied')
class options(GlobalOptions):
NAME = 'NuDyc... |
def create_spider_chart_plot(axis, data_to_plot, categories, accept_classes):
lables = [category.replace('_', '-') for category in categories]
vals = {cat: [cat_vals['auc'] for (x, cat_vals) in data_to_plot[cat].items() if (x in accept_classes)] for cat in categories}
for key in ['Center_Dist', 'Size_Simila... |
def load_model(model_type):
model_path = ISL_PATHS[model_type]
if (model_type == 'dpt_large'):
if (not os.path.exists(model_path)):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
model = DPT... |
class Partition6(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention... |
class CTCHead(nn.Module):
def __init__(self, in_dim, out_dim=4096, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if (nlayers == 1):
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers ... |
def observer_proc(points_queue, observations_queue):
pid = os.getpid()
while True:
point_to_observe = points_queue.get()
if (point_to_observe is None):
return
print(f'Process {pid}: Observer : observing data at point {point_to_observe}', flush=True)
new_observation = ... |
class StateMap(object):
new_machine = None
old_to_new_dict = None
new_to_old_dict = None
def __init__(self, new_machine):
self.new_machine = new_machine
self.old_to_new_dict = {}
self.new_to_old_dict = {}
def old_to_new(self, old_state_set):
key = self.make_key(old_st... |
def test_IndexedArray():
array = ak.Array([[0.0, 1.1, 2.2, 3.3], [], [4.4, 5.5, 6.6], None, [7.7], None, [8.8, 9.9, 10.0, 11.1, 12.2]])
assert (to_list(ak.operations.combinations(array, 2, replacement=False)) == [[(0.0, 1.1), (0.0, 2.2), (0.0, 3.3), (1.1, 2.2), (1.1, 3.3), (2.2, 3.3)], [], [(4.4, 5.5), (4.4, 6.... |
class NSFWMetric(Metric):
def __init__(self):
self._nsfw_detector: Optional[NSFWDetector] = None
def __repr__(self):
return 'NSFWMetric()'
def evaluate_generation(self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService, eval_cache_path: str) -> List[Stat]:... |
def compare_dict_difference(dict1, dict2, dict1_name='dict1', dict2_name='dict2', print_value_diff=True, verbose=False):
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
shared_keys = keys1.intersection(keys2)
keys1_unique = keys1.difference(shared_keys)
keys2_unique = keys2.difference(shared_key... |
def acoustic_preprocess(args, dim):
todo = list(Path(args.data_path).glob('*.wav'))
print(len(todo), 'audio files found in MOSI')
assert (args.feature_type in ['mel', 'linear', 'fbank']), 'Feature type unsupported'
output_dir = os.path.join(args.output_path, '_'.join(['mosi', (str(args.feature_type) + s... |
def lr_warmup(step):
if ((cfg['training_parameters']['use_warmup'] is True) and (i_iter <= cfg['training_parameters']['warmup_iterations'])):
alpha = (float(i_iter) / float(cfg['training_parameters']['warmup_iterations']))
return ((cfg['training_parameters']['warmup_factor'] * (1.0 - alpha)) + alpha... |
class GymEnv(Env, Serializable):
def __init__(self, env_name, record_video=True, video_schedule=None, log_dir=None, record_log=True, force_reset=False):
if (log_dir is None):
if (logger.get_snapshot_dir() is None):
logger.log('Warning: skipping Gym environment monitoring since sn... |
def get_rolled_and_unrolled_data(input_data, args):
opinionated_tags = ['JJ', 'JJR', 'JJS', 'RB', 'RBR', 'RBS', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
all_rolled = []
all_unrolled = []
mixed_rolled = []
mixed_unrolled = []
unrolled = []
mixed = []
unrolled_ours = []
mixed_ours = []... |
def decode(codes, encoding):
resolution = encoding['resolution']
notes = decode_notes(codes, encoding)
music = reconstruct(notes, resolution)
return music |
_args('v')
def relu(g, input):
if (input not in sym_help._quantized_ops):
from torch.onnx.symbolic_opset9 import relu
return relu(g, input)
kwargs = {'Y_scale_f': input.node()['Y_scale'], 'Y_zero_point_i': input.node()['Y_zero_point']}
output = g.op('_caffe2::Int8Relu', input, **kwargs)
... |
def one_hot_embedding(label, classes):
vector = np.zeros(classes, dtype=np.float32)
if (len(label) > 0):
vector[label] = 1.0
return vector |
_args('v', 'v', 'v', 'i', 'i', 'f', 'i', 'i', 'i')
def _lstm_full(g, input, hidden_v, weight_v, has_biases, num_layers, dropout, train, bidirectional, batch_first):
(hidden, weight) = (sym_help._unpack_list(hidden_v), sym_help._unpack_list(weight_v))
return _generic_rnn(g, 'LSTM', input, hidden, weight, has_bia... |
class LazyCompletionGradedAlgebraElement(LazyCauchyProductSeries):
def _format_series(self, formatter, format_strings=False):
P = self.parent()
cs = self._coeff_stream
v = cs._approximate_order
if isinstance(cs, Stream_exact):
if (not cs._constant):
m = cs... |
def _dict_to_filename(dict_):
if hasattr(dict_, 'items'):
return (('(' + '_'.join((('%s=%s' % (k, _dict_to_filename(v))) for (k, v) in dict_.items()))) + ')')
else:
return dict_ |
class TestProjections(TestCase):
def test_nullspace_and_least_squares_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]])
At_dense = A_dense.T
A = csc_matrix(A_dense)
test_points = ([1, 2, 3, 4, 5, 6, 7, 8], [1, 10, 3, 0, 1... |
class VarCopy(spacepy.datamodel.dmarray):
Allowed_Attributes = (spacepy.datamodel.dmarray.Allowed_Attributes + ['_cdf_meta'])
def __new__(cls, zVar):
obj = super(VarCopy, cls).__new__(cls, zVar[...], zVar.attrs.copy())
obj._cdf_meta = {k: getattr(zVar, k)() for k in ('compress', 'dv', 'nelems', ... |
def setup_s3():
print(('Creating S3 bucket at s3://%s' % S3_BUCKET_NAME))
s3_client = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET)
try:
s3_client.create_bucket(ACL='private', Bucket=S3_BUCKET_NAME)
except botocore.exceptions.ClientError as e:
if (... |
def acc_topk(logits, labels, topk=(1,)):
top = lax.top_k(logits, max(topk))[1].transpose()
correct = (top == labels.reshape(1, (- 1)))
return [((correct[:k].reshape((- 1)).sum(axis=0) * 100) / labels.shape[0]) for k in topk] |
.gpu
def test_gpu_localstorage():
sdfg = cudahello.to_sdfg()
assert (sdfg.apply_transformations([GPUTransformMap, InLocalStorage], options=[{}, {'array': 'gpu_A'}]) == 2)
_test(sdfg) |
def add_ResNet_convX_body(model, block_counts, freeze_at=2):
assert (freeze_at in [0, 2, 3, 4, 5])
p = model.Conv('data', 'conv1', 3, 64, 7, pad=3, stride=2, no_bias=1)
p = model.AffineChannel(p, 'res_conv1_bn', inplace=True)
p = model.Relu(p, p)
p = model.MaxPool(p, 'pool1', kernel=3, pad=1, stride... |
class CSVLogger(LoggerBase):
def experiment(self) -> dict[(str, object)]:
if (not hasattr(self, '_experiment')):
self._experiment = self.config
return self._experiment
_enabled
def log(self, metrics: dict[(str, object)]):
self.experiment.update(metrics)
_enabled
d... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.