code stringlengths 101 5.91M |
|---|
def drawArrows(_gui):
global fade
if (fade < fadeMax):
fade += 1
if (fade == 0):
updateArrows()
else:
arr = arrows.to_numpy()
vel = arr['vel'].reshape(1, (- 1))[0]
vel = ((((vel / vel.max()) * 221) + 17) * abs((fade / fadeMax)))
mean = vel.mean()
i... |
def plot_log_line(df, x, y, col, row, hue, name, ci=None, hue_order=model_names, title=None, xlabel=None, ylabel=None):
g = sns.relplot(data=df, x=x, y=y, col=col, row=row, hue=hue, kind='line', facet_kws={'sharey': False}, hue_order=hue_order, ci=ci, marker='o')
if (xlabel is None):
xlabel = x
if (... |
def all_gather_embeddings_labels(embeddings, labels):
if c_f.is_list_or_tuple(embeddings):
assert c_f.is_list_or_tuple(labels)
(all_embeddings, all_labels) = ([], [])
for i in range(len(embeddings)):
(E, L) = all_gather(embeddings[i], labels[i])
all_embeddings.append(... |
def _check_py_package(package):
try:
import_module(package)
except ImportError:
return False
else:
return True |
def execute_fixed_length_gru(xs_np, h0_np, w0_np, w_np, b_np, num_layers=1, dropout=0.0, bidirectional=False, training=True):
num_directions = (2 if bidirectional else 1)
seq_len = xs_np.shape[0]
batch_size = xs_np.shape[1]
hidden_size = h0_np.shape[3]
xs = nn.Variable.from_numpy_array(xs_np)
h0... |
def init_video_transform_dict(input_res=224, center_crop=256, randcrop_scale=(0.5, 1.0), color_jitter=(0, 0, 0), norm_mean=(0.485, 0.456, 0.406), norm_std=(0.229, 0.224, 0.225)):
print('Video Transform is used!')
normalize = NormalizeVideo(mean=norm_mean, std=norm_std)
tsfm_dict = {'train': transforms.Compo... |
def gradient_test(f: nn.Module, input_shape: List[int], max_iter: int=10, dtype: torch.dtype=torch.float64) -> Generator[(GradientTestResult, None, None)]:
def inner(p: nn.Parameter) -> GradientTestResult:
def loss(x, y):
try:
p.grad.zero_()
except AttributeError:
... |
class FunkyMagicMixin(object):
def funky_magic(self, outputs, good_indices, bad_indices):
filtered_targets = [([1.0] + ([0.0] * 99)) for _ in range(len(outputs))]
filtered_outputs = []
for (output, index, b_is) in zip(outputs, good_indices, bad_indices):
filtered_output = []
... |
def _seg_56():
return [(70163, 'V'), (70207, 'X'), (70272, 'V'), (70279, 'X'), (70280, 'V'), (70281, 'X'), (70282, 'V'), (70286, 'X'), (70287, 'V'), (70302, 'X'), (70303, 'V'), (70314, 'X'), (70320, 'V'), (70379, 'X'), (70384, 'V'), (70394, 'X'), (70400, 'V'), (70404, 'X'), (70405, 'V'), (70413, 'X'), (70415, 'V'),... |
def k_adjacency(A, k, with_self=False, self_factor=1):
assert isinstance(A, np.ndarray)
I = np.eye(len(A), dtype=A.dtype)
if (k == 0):
return I
Ak = (np.minimum(np.linalg.matrix_power((A + I), k), 1) - np.minimum(np.linalg.matrix_power((A + I), (k - 1)), 1))
if with_self:
Ak += (self... |
def get_lstm_cell():
single_cell = (tf.nn.rnn_cell.BasicLSTMCell(FLAGS.size) if (FLAGS.lstm_cell == 'lstm') else tf.nn.rnn_cell.GRUCell(FLAGS.size))
cell = single_cell
if (FLAGS.num_layers > 1):
cell = tf.nn.rnn_cell.MultiRNNCell(([single_cell] * FLAGS.num_layers))
return cell |
class DynamicLossScaler():
def __init__(self, init_scale=(2 ** 32), scale_factor=2.0, scale_window=1000, min_scale=1, delayed_shift=1, consecutive_hysteresis=False):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = (- 1)
self.scale_factor = scale_factor
... |
def is_image_file(filename):
support_list = {'.jpg', '.bmp', '.png', '.jpeg', '.jfif'}
for type in support_list:
if filename.strip().lower().endswith(type):
return True
return False |
def _rename_path(path):
new_name = (path + ('.OLD.%s' % time.time()))
log.warn('Renaming %s to %s', path, new_name)
os.rename(path, new_name)
return new_name |
def run_naive_sgd(opt_beta, alpha, gamma, max_norm, min_eps, max_eps, sv_sens, data, compute_err_func, lamb):
max_step = int((math.log2((max_eps / min_eps)) + 1.0))
(test_thresh, test_eps) = compute_test_epsilon(alpha, gamma, sv_sens, (max_step + 1.0))
eps_list = np.array([(min_eps * (2.0 ** k)) for k in ra... |
def infer(valid_queue, model, log=True, _eval=True, weights_dict=None):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
(model.eval() if _eval else model.train())
with torch.no_grad():
for (step, (input, target)) in enumerate(valid_queue):
... |
class ComplExScore(nn.Block):
def __init__(self):
super(ComplExScore, self).__init__()
def edge_func(self, edges):
(real_head, img_head) = nd.split(edges.src['emb'], num_outputs=2, axis=(- 1))
(real_tail, img_tail) = nd.split(edges.dst['emb'], num_outputs=2, axis=(- 1))
(real_rel... |
_module(name=['PointCloud', 'pointcloud', 'point_cloud', 'pointcloud_renderer', 'PointCloudRenderer'])
class PointCloudRenderer(BaseRenderer):
def __init__(self, resolution: Tuple[(int, int)]=None, device: Union[(torch.device, str)]='cpu', output_path: Optional[str]=None, out_img_format: str='%06d.png', radius: Opt... |
def register_Ns3PfsFlowPerf_t_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::pfsFlowPerf_t const &', 'arg0')])
cls.add_instance_attribute('flowStart', 'ns3::Time', is_const=False)
cls.add_instance_attribute('lastAveragedThroughput', 'double', is_const=False)
cls.... |
class VeRi(BaseImageDataset):
dataset_dir = 'veri'
def __init__(self, root='./toDataset', verbose=True, **kwargs):
super(VeRi, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'image_train')
self.query_dir = osp.join... |
class Seq_DK_Dataset(th.utils.data.Dataset):
def __init__(self, data: Sequence):
super().__init__()
self.d = data
def __getitem__(self, node_id):
item = self.d.get_DPK_tokens(node_id)
return item
def __len__(self):
return self.d.n_nodes |
class kSplit(CombinatorialFreeModule):
def __init__(self, kBoundedRing):
CombinatorialFreeModule.__init__(self, kBoundedRing.base_ring(), kBoundedRing.indices(), category=KBoundedSubspaceBases(kBoundedRing, kBoundedRing.t), prefix=('ksp%d' % kBoundedRing.k))
self._kBoundedRing = kBoundedRing
... |
def xavier_uniform_(tensor: Tensor, gain: float=1.0) -> Tensor:
(fan_in, fan_out) = _calculate_fan_in_and_fan_out(tensor)
std = (gain * math.sqrt((2.0 / float((fan_in + fan_out)))))
a = (math.sqrt(3.0) * std)
return _no_grad_uniform_(tensor, (- a), a) |
class SEBlock(nn.Module):
def __init__(self, nc, in_channels, reduce_channels):
super(SEBlock, self).__init__()
self.gap = GlobalAvgPool2d()
self.conv_reduce = nn.Sequential(ConvBN(nc, in_channels, reduce_channels, 1, disable_bn=True), NonLinear(nc, reduce_channels, NonLinearType.SWISH))
... |
class LDConditioner(nn.Module):
def __init__(self, input_dim, judge_dim, num_judges=None):
super().__init__()
self.input_dim = input_dim
self.judge_dim = judge_dim
self.num_judges = num_judges
assert (num_judges != None)
self.judge_embedding = nn.Embedding(num_judges,... |
class SpeechT5Tokenizer(metaclass=DummyObject):
_backends = ['sentencepiece']
def __init__(self, *args, **kwargs):
requires_backends(self, ['sentencepiece']) |
(scope='function')
def reference_decayed_abundance():
decay_index = pd.Index([1, 2, 26, 27, 28], name='atomic_number')
reference_decayed_abundance = pd.DataFrame([[0.0, 0.33, 0.3, 0.5, 0.4, 0.2], [0.98, 0.64, 0.6, 0.4, 0.55, 0.79], [0., 0., 0., 0., 0., 6.e-05], [0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0.]... |
def snapshot_pd(snapshot, tardis_snapshot_path, pandas_snapshot_extention):
refpath = tardis_snapshot_path.joinpath(SNAPSHOT_LOCATION)
class PandasSnapshotExtenstionRefdata(pandas_snapshot_extention):
def dirname(cls, *, test_location: 'PyTestLocation') -> str:
return str(Path(test_location.... |
def test_IndexedArray_deep_at():
content = ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5]))
index1 = ak.index.Index32(np.array([1, 2, 3, 4], dtype=np.int32))
indexedarray1 = ak.contents.IndexedArray(index1, content)
index2 = ak.index.Index64(np.array([1, 2, 3], dtype=np.int64))
indexedarr... |
def modS_relations(syms):
if (not isinstance(syms, ManinSymbolList)):
raise TypeError('syms must be a ManinSymbolList')
tm = verbose()
rels = set()
for i in range(len(syms)):
(j, s) = syms.apply_S(i)
assert (j != (- 1))
if (i < j):
rels.add(((i, 1), (j, s)))
... |
class TriFingerRobot(object):
def __init__(self, action_mode, observation_mode, skip_frame, normalize_actions, normalize_observations, simulation_time, pybullet_client_full_id, pybullet_client_w_goal_id, pybullet_client_w_o_goal_id, revolute_joint_ids, finger_tip_ids, cameras=None, camera_indicies=np.array([0, 1, 2... |
def build_single_variable_quadratic():
x_var = Variable(1)
x_var_squared = Product([x_var, x_var])
obj = Sum([x_var_squared, Product([Constant((- 4)), x_var]), Constant(1)])
param = DirectParam(np.array([0]), bounds=[(- 10), 10])
return (obj, param, [2]) |
def init_bert_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None... |
def split_interstate_edges(sdfg: SDFG) -> None:
for e in sdfg.edges():
if (e.data.assignments and (not e.data.is_unconditional())):
tmpstate = sdfg.add_state()
sdfg.add_edge(e.src, tmpstate, InterstateEdge(condition=e.data.condition))
sdfg.add_edge(tmpstate, e.dst, Inters... |
class DocumentDatabase():
def __init__(self, reduce_memory=False):
if reduce_memory:
self.temp_dir = TemporaryDirectory()
self.working_dir = Path(self.temp_dir.name)
self.document_shelf_filepath = (self.working_dir / 'shelf.db')
self.document_shelf = shelve.op... |
def can_compile_class(cls):
if is_ignored_fn(cls):
return False
names = cls.__dict__
fns = [getattr(cls, name) for name in names if inspect.isroutine(getattr(cls, name, None))]
has_code = [hasattr(fn, '__code__') for fn in fns]
return all(has_code) |
def TestConv2dOperator(math_inst, alignment, tiling, arch, stride_supports=[StrideSupport.Strided, StrideSupport.Strided, StrideSupport.Strided], epilogue_functor=None, swizzling_functor=cutlass.IdentitySwizzle1, interleaved=False, **kwargs):
mixeds = [False, True, False]
conv_kinds = [cutlass.conv.Operator.fpr... |
def test(net, r, g, b, pokemonType, TEST_SAMPLES):
temp = torch.tensor(np.asarray([r, g, b]).astype(np.float32)).to(DEVICE)
result = []
for i in range(TEST_SAMPLES):
output = net.forward(temp)
a = output[0].data.cpu().numpy()
result.append((np.exp(a) / np.exp(a).sum()))
mean = np... |
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, dict):
self.__dict__[key] = AttrDict(value)
... |
.parametrize('start_date', [pd.to_datetime('2020/01/10 08:00:00')])
.parametrize('end_date', [pd.to_datetime('2020/01/01 08:00:00')])
.parametrize('spatial_tessellation', [tessellation])
.parametrize('social_graph', ['random'])
.parametrize('n_agents', [5])
.parametrize('random_state', [2])
.parametrize('show_progress'... |
def _parse_list_num_ranges(s):
ranges = s.split(',')
return [_parse_num_range(r) for r in ranges] |
class IntentParser(with_metaclass(ABCMeta, ProcessingUnit)):
def unit_name(cls):
return IntentParser.registered_name(cls)
def fit(self, dataset, force_retrain):
pass
def parse(self, text, intents, top_n):
pass
def get_intents(self, text):
pass
def get_slots(self, text... |
def gold_pipeline_path(test_name: str) -> str:
return os.path.join(Path.cwd(), EXAMPLE_PATH, test_name, f'pipelined_{test_name}') |
def corr(pred, true):
u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0)
d = np.sqrt((((true - true.mean(0)) ** 2) * ((pred - pred.mean(0)) ** 2)).sum(0))
return (u / d).mean((- 1)) |
def download_blob(bucket_name, source_file_name, blob_name):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.download_to_filename(source_file_name)
print('File {} downloaded to {}.'.format(blob, source_file_name)) |
.parametrize('round_number', range(ROUNDS_TO_TRAIN))
def test_get_collaborators_for_task(assigner, task_groups, round_number, authorized_cols):
for task_name in task_groups[0]['tasks']:
cols = assigner.get_collaborators_for_task(task_name, round_number)
assert (set(cols) == set(authorized_cols)) |
def cross_entropy_loss(logits, labels, label_smoothing=0.0, dtype=jnp.float32):
num_classes = logits.shape[(- 1)]
labels = jax.nn.one_hot(labels, num_classes, dtype=dtype)
if (label_smoothing > 0):
labels = ((labels * (1 - label_smoothing)) + (label_smoothing / num_classes))
logp = jax.nn.log_so... |
def infer(model, query_loader, support_sample, args, logger, label_name):
model.eval()
support_image = [support_sample['image'][i].float().cuda() for i in range(support_sample['image'].shape[0])]
support_fg_mask = [support_sample['label'][[i]].float().cuda() for i in range(support_sample['image'].shape[0])]... |
('/get_base_fees/<lastN>', methods=('GET',))
def get_base_fees(lastN):
web3 = connect_to_geth(app.web3_url, app.consensus)
latest = web3.eth.getBlock('latest').number
start = ((latest - int(lastN)) + 1)
if (start <= 0):
start = 1
base_fees = {}
for bk in range(start, (latest + 1)):
... |
class TestSensitivityEvalWithNonSupportedOutputBase(BasePytorchTest):
def create_inputs_shape(self):
return [[1, 3, 16, 16]]
def representative_data_gen(self, n_iters=1):
input_shapes = self.create_inputs_shape()
for _ in range(n_iters):
(yield self.generate_inputs(input_shap... |
def random_brightness(image, max_delta, impl='simclrv2'):
if (impl == 'simclrv2'):
factor = tf.random_uniform([], tf.maximum((1.0 - max_delta), 0), (1.0 + max_delta))
image = (image * factor)
elif (impl == 'simclrv1'):
image = random_brightness(image, max_delta=max_delta)
else:
... |
def norm_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if (getattr(module, 'affine', False) or getattr(module, 'elementwise_affine', False)):
batch_flops *= 2
module.__flops__ += int(batch_flops) |
def check_if_bounds_is_locked(col: int, bounds_locked: list):
if (col in map(get_index, bounds_locked)):
conflict = [x for x in bounds_locked if (x.index == col)][0]
return Information(True, True, conflict.presolver, ((('DETECTED CONFLICT for bounds column ' + col.__str__()) + ' presolver ') + confl... |
def validate_file(download_url, download_path):
if (not os.path.isfile(download_path)):
return False
actual_size = urllib.request.urlopen(download_url, context=ssl.create_default_context(cafile=certifi.where())).length
download_size = os.path.getsize(download_path)
print('File: {}, \t downloaded... |
class BootstrCurriculum(TrainingCurriculum):
def __init__(self, args, dataset, tokenizer):
super().__init__(args, dataset, tokenizer)
self.bs_start = args.bootstrapping_start
self.bs_update_epochs = args.bootstrapping_update_epochs
self.advanced_collate_fn = partial(contrastive_colla... |
def unpack(stream, **kwargs):
warnings.warn("Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", PendingDeprecationWarning)
data = stream.read()
return unpackb(data, **kwargs) |
def calib_err(confidence, correct, p='2', beta=100):
idxs = np.argsort(confidence)
confidence = confidence[idxs]
correct = correct[idxs]
bins = [[(i * beta), ((i + 1) * beta)] for i in range((len(confidence) // beta))]
bins[(- 1)] = [bins[(- 1)][0], len(confidence)]
cerr = 0
total_examples =... |
def build_conv_layer(cfg: Optional[Dict], *args, **kwargs) -> nn.Module:
if (cfg is None):
cfg_ = dict(type='Conv2d')
else:
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the ... |
class ResidualEdgeAttConvv1(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(ResidualEdgeAttConvv1, self).__init__()
self.model = ResidualEdgeAttConvv1Layer(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.node_feature = self.model(batch.node_featur... |
def blit_from_field_to_field(dst: template(), src: template(), offset: i32, size: i32):
dst_offset = static((dst.snode.ptr.offset if (len(dst.snode.ptr.offset) != 0) else 0))
src_offset = static((src.snode.ptr.offset if (len(src.snode.ptr.offset) != 0) else 0))
for i in range(size):
dst[((i + dst_of... |
def build_model(rnn_size=RNN_SIZE, num_layers=NUM_LAYERS, seg_length=SEGMENT_LENGTH, dropout=DROPOUT, weights_path=None, training=False):
input_melody_left = Input(shape=(seg_length, 130), name='input_melody_left')
melody_left = TimeDistributed(Dense(rnn_size, activation='relu'), name='melody_left_embedding')(i... |
def get_transforms_field(transforms):
if isinstance(transforms, np.ndarray):
return transforms
transforms_arr = transforms.to_numpy()
transforms_ndarray_cache[transforms] = transforms_arr
return transforms_arr |
def compute_univariate(df: Union[(dd.DataFrame, pd.DataFrame)], col: Union[(str, LatLong)], cfg: Config, dtype: Optional[DTypeDef]) -> Intermediate:
(new_col_names, ndf) = gen_new_df_with_used_cols(df, col, None, None)
x = new_col_names[col]
if (x is None):
raise ValueError
frame = EDAFrame(ndf,... |
class SORE_filter():
def __init__(self, csv_path='data/narrowIE/tradeoffs_and_argmods.csv', sore_output_dir='SORE/data/processed_data/'):
self.csv_path = csv_path
self.sore_output_dir = sore_output_dir
def start(self, prefix, filter_settings, IDF_weights_path, SUBWORDUNIT, irrelevant_cluster_ids... |
class UnionCombinatorialClass(CombinatorialClass):
def __init__(self, left_cc, right_cc, name=None):
self.left_cc = left_cc
self.right_cc = right_cc
self._name = name
def __repr__(self) -> str:
if self._name:
return self._name
else:
return ('Union ... |
def test_mul_64_64():
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
for (a, b) in it:
c = (a * b)
d = mt.extint_mul_64_64(a, b)
if (c != d):
assert_equal(d, c) |
class MAP(Metric):
_scala_udf_name = 'getMAPMetricValue'
def _get_metric_value_by_user(k, pred, ground_truth) -> float:
length = min(k, len(pred))
if ((len(ground_truth) == 0) or (len(pred) == 0)):
return 0
tp_cum = 0
result = 0
for i in range(length):
... |
def remove_scalar_reads(sdfg: sd.SDFG, array_names: Dict[(str, str)]):
for state in sdfg.nodes():
scalar_nodes = [n for n in state.nodes() if (isinstance(n, nodes.AccessNode) and (n.data in array_names))]
for node in scalar_nodes:
symname = array_names[node.data]
for out_edge... |
def test_timedelta64():
stream = io.StringIO()
ak.Array([timedelta(days=1, hours=12, minutes=1, seconds=30)]).show(stream=stream, formatter={'datetime': '<TD {}>'.format})
assert (stream.getvalue() == '[ microseconds]\n') |
class LogBERTConfig(Config):
pretrain_from_scratch: bool = True
model_name: str = 'bert-base-cased'
model_dirname: str = None
mlm_probability: float = 0.15
mask_ngram: int = 1
max_token_len: int = 384
evaluation_strategy: str = 'steps'
num_train_epochs: int = 20
learning_rate: float ... |
def register_Ns3WifiPhy_methods(root_module, cls):
cls.add_constructor([param('ns3::WifiPhy const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddSupportedChannelWidth', 'void', [param('uint16_t', 'channelwidth')])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virt... |
class PeriodicBC(Condition):
def __init__(self, name, regions, dofs, match, key='', times=None):
Condition.__init__(self, name=name, regions=regions, dofs=dofs, match=match, key=key, times=times)
def canonize_dof_names(self, dofs):
self.dofs[0] = _canonize(self.dofs[0], dofs)
self.dofs[1... |
.filterwarnings('ignore:.*method is good for exploring strategies.*')
def test_invalid_body_in_get(swagger_20):
swagger_20.validate_schema = True
operation = APIOperation(path='/foo', method='GET', definition=OperationDefinition({}, {}, 'foo', []), schema=swagger_20, body=PayloadAlternatives([OpenAPI20Body({'na... |
def logmelfilterbank(audio, sampling_rate, fft_size=1024, hop_size=256, win_length=None, window='hann', num_mels=80, fmin=None, fmax=None, eps=1e-10):
x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size, win_length=win_length, window=window, pad_mode='reflect')
spc = np.abs(x_stft).T
fmin = (0 ... |
class DataCollatorForLanguageModeling():
def __init__(self, tokenizer, rap_no_grad=True, model_type='transformer'):
self.tokenizer = tokenizer
self.rap_no_grad = rap_no_grad
self.model_type = model_type
def __call__(self, examples):
batch = self._tensorize_batch([example['input_i... |
class TorchvisionBenchmark(Benchmark):
def __init__(self, device, distributed_backend, bucket_size, model):
super(TorchvisionBenchmark, self).__init__(device, distributed_backend, bucket_size)
self.model = model
def __str__(self):
return '{} with batch size {}'.format(self.model, self.ba... |
_grad()
def test(loader):
model.eval()
loss_test = 0
out_log = []
for data in loader:
data = data.to(device)
(out, _, _) = model(data.x, data.adj, data.mask)
out_log.append([F.softmax(out, dim=1), data.y])
loss_test += (data.y.size(0) * F.nll_loss(out, data.y.view((- 1)))... |
def print_params(model):
for param in model.params():
print(param.name)
print(param.get_value()) |
def get_table_dict(table_data_path):
data = json.load(open(table_data_path))
table = dict()
for item in data:
table[item['db_id']] = item
return table |
def no_tf_warnings() -> Iterator[None]:
tf_logging_level = os.environ.get(TF_LOG_LEVEL_KEY, TF_LOG_LEVEL_NO_WARNINGS_VALUE)
os.environ[TF_LOG_LEVEL_KEY] = TF_LOG_LEVEL_NO_WARNINGS_VALUE
(yield)
os.environ[TF_LOG_LEVEL_KEY] = tf_logging_level |
class RandomStructured(BasePruningMethod):
PRUNING_TYPE = 'structured'
def __init__(self, amount, dim=(- 1)):
_validate_pruning_amount_init(amount)
self.amount = amount
self.dim = dim
def compute_mask(self, t, default_mask):
_validate_structured_pruning(t)
_validate_p... |
class LearningNodeMC(LearningNode):
def update_stats(self, y, weight):
try:
self.stats[y] += weight
except KeyError:
self.stats[y] = weight
self.stats = dict(sorted(self.stats.items()))
def learn_one(self, X, y, *, weight=1.0, tree=None):
super().learn... |
_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 'skin', 'face')
PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), (0, 192, 224), (0, 192, 192), (128, 192, 6... |
class ImageClassifierCLI(CLI):
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
super().add_arguments_to_parser(parser)
parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate')
parser.link_arguments('data.image_shape', 'model.image_sha... |
def _populate_unbound(kwds, unbound_symbols, locals=None, globals=None):
for symbol in unbound_symbols:
if (symbol not in kwds):
if ((locals is None) or (globals is None)):
calling_frame = inspect.currentframe().f_back.f_back.f_back
if (locals is None):
... |
class RagModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def compute_function(*, target: Target) -> Callable[([NativeFunction], Optional[str])]:
_native_function
def go(f: NativeFunction) -> Optional[str]:
if f.manual_kernel_registration:
return None
if (Variant.function not in f.variants):
return None
name = cpp.name(f... |
def get_gcda_files() -> List[str]:
folder_has_gcda = os.path.join(get_pytorch_folder(), 'build')
if os.path.isdir(folder_has_gcda):
output = subprocess.check_output(['find', folder_has_gcda, '-iname', '*.gcda'])
output = output.decode('utf-8').split('\n')
return output
else:
... |
def _handle_ns(packageName, path_item):
importer = get_importer(path_item)
if (importer is None):
return None
loader = importer.find_module(packageName)
if (loader is None):
return None
module = sys.modules.get(packageName)
if (module is None):
module = sys.modules[packag... |
_grad()
def evaluate_real(data_loader, model, device, real_labels, ds=False, bf16=False):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
... |
def test_simple_tensor_ops(backend):
tb = pyhf.tensorlib
assert (tb.tolist((tb.astensor([1, 2, 3]) + tb.astensor([4, 5, 6]))) == [5, 7, 9])
assert (tb.tolist((tb.astensor([1]) + tb.astensor([4, 5, 6]))) == [5, 6, 7])
assert (tb.tolist((tb.astensor([1, 2, 3]) - tb.astensor([4, 5, 6]))) == [(- 3), (- 3), ... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [520])
.parametrize('test', [True])
.parametrize('w_bias', [True])
.parametrize('channel_last', [True, False])
.parametrize('graph_ref, graph_act, folding, self_folding, rec_lays, rec_pos, skip_lays', [(small_nonqnn_to_recording_resnet,... |
def conv_layer2(U, params):
U(params, wires=[0, 6])
U(params, wires=[0, 2])
U(params, wires=[4, 6])
U(params, wires=[2, 4]) |
def main(unused_argv):
df = load_annotations(filename=FLAGS.annotation_file, n_top=FLAGS.n_top, n_audios_per_shard=FLAGS.n_audios_per_shard)
if (not tf.gfile.IsDirectory(FLAGS.output_dir)):
tf.logging.info('Creating output directory: %s', FLAGS.output_dir)
tf.gfile.MakeDirs(FLAGS.output_dir)
... |
class Sampler():
def __init__(self, ratings, users, items):
np.random.seed(42)
self._ratings = ratings
self._users = users
self._items = items
def step(self, events: int):
r_int = np.random.randint
n_users = len(self._users)
n_items = len(self._items)
... |
class ChineseCLIPImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[(str, int)]=None, do_rescale: bool=True, rescale_fac... |
def register_Ns3EpcX2_methods(root_module, cls):
cls.add_constructor([param('ns3::EpcX2 const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddX2Interface', 'void', [param('uint16_t', 'enb1CellId'), param('ns3::Ipv4Address', 'enb1X2Address'), param('uint16_t', 'enb2CellId'), param('ns3::Ipv4Address'... |
def _seg_55():
return [(68681, 'X'), (68736, 'M', u''), (68737, 'M', u''), (68738, 'M', u''), (68739, 'M', u''), (68740, 'M', u''), (68741, 'M', u''), (68742, 'M', u''), (68743, 'M', u''), (68744, 'M', u''), (68745, 'M', u''), (68746, 'M', u''), (68747, 'M', u''), (68748, 'M', u''), (68749, 'M', u''), (68750, 'M', ... |
_utils.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_cpu_debug_snode_reader_out_of_bound_negative():
x = ti.field(ti.f32, shape=3)
with pytest.raises(AssertionError):
a = x[(- 1)] |
def concatenate(datasets: Sequence[LAMLDataset]) -> LAMLDataset:
(conc, klass) = get_common_concat([ds for ds in datasets if (ds is not None)])
if (klass is not None):
n = 0
for (n, ds) in enumerate(datasets):
if (type(ds) is klass):
break
datasets = ([dataset... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.