code stringlengths 101 5.91M |
|---|
def rand_binomial_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, n=1, p=0.5, shape=[], seed=(- 1)):
return ([None] * (len(grad_inputs) + len(inputs))) |
def filter_answer_by_class(classes, answers_ids):
classes_ids = [_id for e in classes for _id in e]
kg = HDTDocument((hdt_path + hdt_file))
a_ids = [_id for e in answers_ids for _id in e]
a_ids = kg.filter_types(a_ids, classes_ids)
kg.remove()
a_ids = [_id for _a_ids in a_ids for _id in _a_ids]
... |
def test_clean_default(df_urls: pd.DataFrame) -> None:
df_clean = clean_url(df_urls, column='messy_url')
df_check = df_urls.copy()
df_check['messy_url_details'] = [np.nan, {'scheme': ' 'host': 'www.facebookee.com', 'messy_url_clean': ' 'queries': {'auth': 'facebookeeauth', 'token': 'iwusdkc', 'not_token': '... |
def _fit_model(sizes: np.ndarray, successes: np.ndarray) -> Callable:
w_eff_guess = (1 / np.max(sizes))
norm_guess = (1 / singling_out_probability_integral(n=np.max(sizes), w_min=0, w_max=w_eff_guess))
(popt, _) = curve_fit(_model, xdata=sizes, ydata=successes, bounds=(0, (1, np.inf)), p0=(w_eff_guess, norm... |
_node(optplan.Epsilon)
def create_epsilon(params: optplan.Epsilon, work: workspace.Workspace) -> Epsilon:
return Epsilon(input_function=work.get_object(workspace.VARIABLE_NODE), wlen=params.wavelength, simspace=work.get_object(params.simulation_space)) |
class IterationCounter():
def __init__(self, opt, dataset_size):
self.opt = opt
self.dataset_size = dataset_size
self.first_epoch = 1
self.total_epochs = (opt.niter + opt.niter_decay)
self.epoch_iter = 0
self.iter_record_path = os.path.join(self.opt.checkpoints_dir, s... |
class Generator(chainer.Chain):
def __init__(self, args, pretrained_model=None):
super(Generator, self).__init__()
with self.init_scope():
self.encoder = Encoder(args, pretrained_model=pretrained_model)
self.decoder = Decoder(args)
def __call__(self, x):
h = self.... |
def adjust_learning_rate(optimizer, init_lr, epoch, factor, every):
lrd = (init_lr / every)
old_lr = optimizer.param_groups[0]['lr']
lr = (old_lr - lrd)
if (lr < 0):
lr = 0
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
class BeamSearch(object):
def __init__(self, model, beam_size, start_token, end_token, max_steps):
self._model = model
self._beam_size = beam_size
self._start_token = start_token
self._end_token = end_token
self._max_steps = max_steps
def BeamSearch(self, sess, enc_inputs... |
class InputExamples(object):
def __init__(self, utterances, response, label, seq_lengths):
self.utterances = utterances
self.response = response
self.label = label
self.dialog_len = seq_lengths[0]
self.response_len = seq_lengths[1] |
def make_notice(comment_style: CommentStyle, ctime_year: str) -> List[str]:
lines = []
if (comment_style == CommentStyle.C_STYLE):
lines.append((('/*' + ('*' * 78)) + '\n'))
line_start = ' '
elif (comment_style == CommentStyle.CPP_STYLE):
line_start = '//'
elif (comment_style ... |
def _sync_params_avg(*, module: torch.nn.Module, sync_on_cpu: bool=False):
import torch.distributed as dist
if (dist.get_backend() == 'gloo'):
reduce_op = dist.ReduceOp.SUM
elif hasattr(dist.ReduceOp, 'AVG'):
reduce_op = dist.ReduceOp.AVG
else:
reduce_op = dist.ReduceOp.SUM
o... |
def curves_with_j_1728(K):
if (not K.is_finite()):
raise ValueError('field must be finite')
p = K.characteristic()
if (p == 2):
return curves_with_j_0_char2(K)
if (p == 3):
return curves_with_j_0_char3(K)
q = K.cardinality()
if ((q % 4) == 3):
return [EllipticCurv... |
def amsterdam_test_allways(listener=False):
data = [('', 0, [(300.0, 45.0, 100.0), (300.0, 100.0, 100.0)]), ('light', 0, [(300.0, 45.0, 100.0), (300.0, 100.0, 100.0)]), ('pinkish', 0, [(300.0, 45.0, 100.0), (300.0, 100.0, 100.0)]), ('purple', 0, [(300.0, 45.0, 100.0), (300.0, 100.0, 100.0)]), ('light purple', 0, [(... |
def test(args):
path = osp.join(osp.dirname(osp.realpath(__file__)), 'data/ShapeNet')
pre_transform = Compose((T.NormalizeScale(), T.GeodesicFPS(args.num_points)))
transform = Compose((T.RandomScale(((2 / 3), (3 / 2))), T.RandomTranslateGlobal(0.1)))
test_dataset = ShapeNet(path, categories=args.class_c... |
class PaviLoggerHook(LoggerHook):
def __init__(self, url, username=None, password=None, instance_id=None, config_file=None, interval=10, ignore_last=True, reset_flag=True):
self.pavi = PaviClient(url, username, password, instance_id)
self.config_file = config_file
super(PaviLoggerHook, self)... |
def register_Ns3RadvdHelper_methods(root_module, cls):
cls.add_constructor([param('ns3::RadvdHelper const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddAnnouncedPrefix', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv6Address', 'prefix'), param('uint32_t', 'prefixLength')])
cls.add_m... |
def scores(label_trues, label_preds, num_classes=21):
hist = np.zeros((num_classes, num_classes))
for (lt, lp) in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), num_classes)
acc = (np.diag(hist).sum() / hist.sum())
acc_cls = (np.diag(hist) / hist.sum(axis=1))
a... |
class SalObjDataset_var_unlabel(data.Dataset):
def __init__(self, image_root, gt_root, depth_root, trainsize):
self.trainsize = trainsize
self.images = [(image_root + f) for f in os.listdir(image_root) if f.endswith('.png')]
self.gts = [(gt_root + f) for f in os.listdir(gt_root) if (f.endswi... |
class IndexMapper(object):
special_tokens = {PAD_TOKEN: PAD, UNK_TOKEN: UNK}
special_tokens_segment = {PAD_TOKEN: PAD, UNK_TOKEN: UNK, BOS_TOKEN: BOS, EOS_TOKEN: EOS}
def __init__(self, vocab_file=None, threshold=(- 1), segment_func=(lambda line: line.lower().strip().split()), segment_infix='_token', suffix... |
def test_mae_backbone():
with pytest.raises(TypeError):
model = MAE()
model.init_weights(pretrained=0)
with pytest.raises(TypeError):
model = MAE(img_size=512.0)
with pytest.raises(TypeError):
model = MAE(out_indices=1.0)
with pytest.raises(AssertionError):
MAE(im... |
.parametrize('has_colscale', [True, False])
.parametrize('has_residual', [True, False])
.parametrize('dropout_p', [0.37, 0.0])
.parametrize('weight_dtype', [torch.float32, torch.float16])
.parametrize('input_dtype,residual_dtype', ([(torch.float16, torch.float16), (torch.float16, torch.float32), (torch.float32, torch.f... |
def spec_normsys():
with open('validation/data/2bin_histosys_example2.json', encoding='utf-8') as spec_file:
source = json.load(spec_file)
spec = {'channels': [{'name': 'singlechannel', 'samples': [{'name': 'signal', 'data': source['bindata']['sig'], 'modifiers': [{'name': 'mu', 'type': 'normfactor', 'd... |
def targeted_coref_evaluation(config: DictConfig, model: EntityRankingModel, data_iter_map: Dict, dataset: str, split='test') -> Dict:
log_dir = path.join(config.paths.model_dir, dataset)
if (not path.exists(log_dir)):
os.makedirs(log_dir)
log_file = path.join(log_dir, (split + '.log.jsonl'))
wi... |
def test_arraymap_call():
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.intp)
(relabeled, fw, inv) = relabel_sequential(ar)
testing.assert_array_equal(relabeled, fw(ar))
testing.assert_array_equal(ar, inv(relabeled)) |
def _layerinfo(file):
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
y0 = i32(read(4))
x0 = i32(read(4))
y1 = i32(read(4))
x1 = i32(read(4))
info = []
mode = []
types = list(range(i16(read(2))))
if (len(types) > 4):
... |
def _gaussian_kernel(x: torch.Tensor, y: torch.Tensor, sigma: float) -> torch.Tensor:
return ((- ((x - y) ** 2).sum(dim=3)) / (2 * sigma)).exp() |
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj |
def create_annotation_coco_format(min_x, min_y, width, height, score, image_id, category_id, annotation_id, args):
bbox = (min_x, min_y, width, height)
area = (width * height)
print((category_id + 1))
if (args.type == 'GT'):
annotation = {'id': annotation_id, 'image_id': image_id, 'bbox': bbox, ... |
def seq_hidden_masking_before_pooling(seq_hidden_input, len_input):
return seq_hidden_masking(seq_hidden_input, len_input, mask_value=tf.float32.min) |
def load_trees(dirpath):
(const_trees, dep_trees, toks) = ([], [], [])
with open(os.path.join(dirpath, 'parents.txt')) as parentsfile, open(os.path.join(dirpath, 'dparents.txt')) as dparentsfile, open(os.path.join(dirpath, 'sents.txt')) as toksfile:
(parents, dparents) = ([], [])
for line in par... |
class CactusGroup(UniqueRepresentation, Group):
def __init__(self, n):
self._n = n
ell = len(str(n))
names = ['s{}{}'.format((('0' * (ell - len(str(i)))) + str(i)), (('0' * (ell - len(str(j)))) + str(j))) for i in range(1, (self._n + 1)) for j in range((i + 1), (self._n + 1))]
cat = ... |
class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):
estimator_name: str = 'sips'
def estimate_policy_value(self, slate_id: np.ndarray, reward: np.ndarray, position: np.ndarray, pscore: np.ndarray, evaluation_policy_pscore: np.ndarray, **kwargs) -> float:
check_sips_inputs(slate_id=slate_id, re... |
def variableNameGenerator() -> Iterator[str]:
def f():
temp_idx = (- 1)
while True:
temp_idx += 1
(yield f't_{temp_idx}')
return iter(f()) |
def get_norm_layer(norm_type='instance'):
if (norm_type == 'batch'):
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif (norm_type == 'instance'):
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True)
elif (norm_type == 'none'):
norm_... |
def run_thread_post_mapping(iteration_dir, design_file, opt):
opt_dir = os.path.join(iteration_dir, opt)
(opt_file, delay, area) = run_post_mapping(opt_dir, opt, design_file, library_file)
log(((((('Optimization: ' + opt) + ' -> delay: ') + str(delay)) + ', area: ') + str(area)))
return (opt, opt_file, ... |
def generate_data(thread_count, iteration_count, thread_dictionary, cpu_timing_dictionary, tardis_file, thread_file, cpu_file):
tardis_config = Configuration.from_yaml(tardis_file)
for threads in range(1, (thread_count + 1)):
for i in range(1, (iteration_count + 1)):
if check_existance(threa... |
def _to_pattern(arg):
if is_pattern(arg):
return arg
else:
return MultiPattern(arg) |
class SegNet(nn.Module):
def __init__(self, num_classes):
super(SegNet, self).__init__()
print('SegNet...')
self.num_classes = num_classes
self.conv3d = nn.Conv3d(in_channels=hyp.feat3D_dim, out_channels=self.num_classes, kernel_size=1, stride=1, padding=0).cuda()
def compute_los... |
def test_convert_crop_cam_to_orig_img():
pred_cam = np.ones([10, 10, 10, 3])
bbox = np.ones([10, 10, 5])
(img_width, img_height) = (224, 224)
convert_crop_cam_to_orig_img(pred_cam, bbox, img_width, img_height, bbox_format='xyxy')
convert_crop_cam_to_orig_img(pred_cam, bbox, img_width, img_height, bb... |
class SqueezeBertPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def mesh_grid(kernel_size):
ax = np.arange((((- kernel_size) // 2) + 1.0), ((kernel_size // 2) + 1.0))
(xx, yy) = np.meshgrid(ax, ax)
xy = np.hstack((xx.reshape(((kernel_size * kernel_size), 1)), yy.reshape((kernel_size * kernel_size), 1))).reshape(kernel_size, kernel_size, 2)
return (xy, xx, yy) |
class TFSegformerOverlapPatchEmbeddings(tf.keras.layers.Layer):
def __init__(self, patch_size, stride, hidden_size, **kwargs):
super().__init__(**kwargs)
self.padding = tf.keras.layers.ZeroPadding2D(padding=(patch_size // 2))
self.proj = tf.keras.layers.Conv2D(filters=hidden_size, kernel_siz... |
class IterativeCSVWriter():
def __init__(self, outstream, data_fields, defaults={}):
self._outstream = outstream
self.fields = (['ID', 'duration'] + self._expand_data_fields(data_fields))
self.defaults = defaults
self._outstream.write(','.join(self.fields))
def set_default(self, ... |
.service(data={'title': 'Bad request', 'status': 400, 'detail': 'Please, upgrade your CLI'}, status=400, method='POST', path='/reports/upload/')
.openapi_version('3.0')
def test_client_error_on_upload(cli, schema_url, service, snapshot_cli):
assert (cli.run(schema_url, 'my-api', f'--schemathesis-io-token={service.t... |
def test_data_parallel_communicator(comm_nccl_opts):
pytest.skip('Always skip. Deprecated.')
if (comm_nccl_opts is None):
pytest.skip('Communicator test is disabled. You can turn it on by an option `--test-communicator`.')
from nnabla.ext_utils import get_extension_context
comm = comm_nccl_opts.... |
def test_pop():
el = generate_event_list_with_random_time(0, 10)
last_ts = (- float('inf'))
while (not el.isempty()):
top_event = el.pop()
assert (top_event.time >= last_ts)
last_ts = top_event.time
random.seed(0)
priorities = list(random.randint(MIN_TS, MAX_TS, 10))
el =... |
def NormalizeToCurrentPlatform(test_output):
if IS_WINDOWS:
test_output = re.sub('\x1b\\[(0;3\\d)?m', '', test_output)
test_output = re.sub(': Failure\\n', ': error: ', test_output)
test_output = re.sub('((\\w|\\.)+)\\((\\d+)\\):', '\\1:\\3:', test_output)
return test_output |
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__: module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while True:
if (len(totraverse) == 0):
break
thisdict = totraverse.pop(0)
... |
def _sympysage_besseli(self):
from sage.functions.bessel import bessel_I
return bessel_I(self.args[0]._sage_(), self.args[1]._sage_()) |
def _dtype_from_pep3118(spec):
stream = _Stream(spec)
(dtype, align) = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype |
class LfExecutor(object):
def __init__(self, sketch_executor=None, kb_mode='offline', use_op_type_constraint=False):
self.skt_exe = (sketch_executor or SketchExecutor())
kb = KB(kb_mode)
self._parser = Parser(kb, use_op_type_constraint=use_op_type_constraint)
self._parser.load_child2... |
.skipif((not has_mpl), reason='matplotlib not installed')
def test_load_preferred_plugins_imread():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins['imread'] = ['pil']
manage_plugins.reset_plugins()
(plug, f... |
class LayoutBuilderType(numba.types.Type):
def _init(self, parameters):
self._parameters = parameters
def parameter(self, name):
if (name in self._parameters):
return numba.types.StringLiteral(self._parameters[name])
else:
raise NumbaTypeError(f'LayoutBuilder.para... |
class KLEJTask(BaseTask):
def read(self, data_path: str, split: str) -> Iterable[DataExample]:
input_path = os.path.join(data_path, self.spec().task_path(), (split + '.tsv'))
has_target = True
if ((split == 'test') and (not os.path.exists(input_path))):
input_path = os.path.join(... |
def perhaps_convert_float(param, total):
if isinstance(param, float):
param = int((param * total))
return param |
def sepia(white='#fff0c0'):
(r, g, b) = ImageColor.getrgb(white)
r = make_linear_lut(0, r)
g = make_linear_lut(0, g)
b = make_linear_lut(0, b)
return ImagePalette('RGB', ((r + g) + b)) |
def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'):
if (reference_pts is None):
if ((crop_size[0] == 96) and (crop_size[1] == 112)):
reference_pts = REFERENCE_FACIAL_POINTS
else:
default_square = False
inne... |
def get_base_type(type: dace.typeclass) -> dace.typeclass:
if isinstance(type, dtypes.vector):
return type.vtype
elif isinstance(type, dtypes.pointer):
return type.base_type
else:
return type |
def gaussian(x, mu, sigma):
bell = torch.exp(((- ((x - mu) ** 2)) / (2.0 * (sigma ** 2))))
return torch.clamp(((GAUSSIAN_SCALER / sigma) * bell), 1e-10, 1.0) |
class ResMLP(BaseModel):
def __init__(self, dropout: float, num_residuals_per_block: int, num_blocks: int, num_classes: int, num_initial_features: int, add_residual: bool=True, add_IC: bool=True):
super().__init__()
blocks = []
for i in range(num_blocks):
blocks.extend(self._crea... |
def _recurse_union_any(layout: ak.contents.UnionArray, type_: ak.types.Type) -> ak.contents.Content:
if isinstance(type_, ak.types.UnionType):
return _recurse_union_union(layout, type_)
else:
return _recurse_union_non_union(layout, type_) |
def test_in_local_sampler(policy, envs):
true_workers = WorkerFactory(seed=100, n_workers=N_TRAJ, max_path_length=MAX_PATH_LENGTH)
true_sampler = LocalSampler.from_worker_factory(true_workers, policy, envs)
vec_workers = WorkerFactory(seed=100, n_workers=1, worker_class=VecWorker, worker_args=dict(n_envs=N_... |
def validate(model, data, device):
model.eval()
(count, correct) = (0, 0)
with torch.no_grad():
for batch in tqdm(data, total=len(data)):
(question, choices, keys, values, answer) = [x.to(device) for x in batch]
logit = model(question, keys, values)
predict = logi... |
class GCN(torch.nn.Module):
def __init__(self, args):
super(GCN, self).__init__()
self.args = args
self.layers = torch.nn.ModuleList([])
for i in range((args['num_layers'] + 1)):
dim_input = (args['num_features'] if (i == 0) else args['hidden_dim'])
conv = GCN... |
def get_data_dim(layer):
if ((layer_type(layer) == 'data') or (layer_type(layer) == 'imagedata')):
try:
scale = layer.transform_param.scale
if (scale <= 0):
scale = 1
except AttributeError:
pass
try:
side = (layer.transform_para... |
def copy_vision_model_and_projection(hf_model, pt_model):
hf_model.visual_projection.weight.data = pt_model.visual.proj.data.T
copy_linear(hf_model.vision_model.pre_layernorm, pt_model.visual.ln_pre)
copy_linear(hf_model.vision_model.post_layernorm, pt_model.visual.ln_post)
hf_model.vision_model.embeddi... |
def test_render_runner():
if torch.cuda.is_available():
device_name = 'cuda:0'
else:
device_name = 'cpu'
device = torch.device(device_name)
meshes = ico_sphere(3, device)
meshes.textures = TexturesVertex(verts_features=torch.ones_like(meshes.verts_padded()).to(device))
(K, R, T) ... |
def fc(x, c):
num_units_in = x.get_shape()[1]
num_units_out = c['fc_units_out']
weights_initializer = tf.truncated_normal_initializer(stddev=FC_WEIGHT_STDDEV)
weights = _get_variable('weights', shape=[num_units_in, num_units_out], initializer=weights_initializer, weight_decay=FC_WEIGHT_STDDEV)
biase... |
def calculate_delta(threshold: np.ndarray, n_bits: int=8, signed: bool=False) -> np.ndarray:
return (threshold / (2 ** (n_bits - int(signed)))) |
class TestModelFromUniformDensity():
(autouse=True)
def setup(self, example_configuration_dir, atomic_dataset):
self.config = Configuration.from_yaml((example_configuration_dir / 'tardis_configv1_uniform_density.yml'))
self.simulation_state = SimulationState.from_config(self.config, atom_data=at... |
.spark
def test_works(log, model):
log = log.withColumn('relevance', sf.when((sf.col('relevance') < 3), 0).otherwise(1))
dataset = create_dataset(log)
model.fit(dataset)
model.item_popularity.count() |
def evaluation(dev_dataloader, device, model):
eval_loss = 0
nb_eval_steps = 0
preds = []
gold_label_ids = []
for (input_ids, input_mask, segment_ids, label_ids) in dev_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.t... |
def get_all_splits(K: int, clusters, id_to_node: Dict[(int, Node)], to_unify: Dict[(int, List[Union[(List, Any)]])], C: int, reminder_policy: ReminderPolicy=ReminderPolicy.ToLast):
all_splits = []
assert (len(clusters.cluster.unique()) == C)
clusters = [list(clusters.groupby('cluster').get_group(c).sort_val... |
class ExampleDataset(Dataset):
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1 |
def generate_label_plain(inputs, imsize):
pred_batch = []
for input in inputs:
input = input.view(1, 19, imsize, imsize)
pred = np.squeeze(input.data.max(1)[1].cpu().numpy(), axis=0)
pred_batch.append(pred)
pred_batch = np.array(pred_batch)
pred_batch = torch.from_numpy(pred_batc... |
class BaseBackbone(nn.Module, metaclass=ABCMeta):
def init_weights(self, pretrained=None, patch_padding='pad'):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger, patch_padding=patch_padding)
elif (pretr... |
def master_only(func):
(func)
def wrapper(*args, **kwargs):
if is_main_process():
return func(*args, **kwargs)
return wrapper |
def add_update_args(fn):
(fn)
def wrapper(*args, **kwargs):
ret = fn(*args, **kwargs)
ret = update_args(ret)
return ret
return wrapper |
class LfsEnableCommand():
def __init__(self, args):
self.args = args
def run(self):
warnings.warn('Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead.')
local_path = os.path.abspath(self.args.path)
if (not os.path.isdir(local_path))... |
class TrainOptions(BaseOptions):
def __init__(self):
super().__init__()
self.parser = self.add_options_train(self.parser)
def add_options_train(self, parser):
parser.add_argument('--lr', type=float, default=0.0002, help='Learning rate.')
parser.add_argument('--start-epoch', type=... |
class sage__numerical__mip(PythonModule):
def __init__(self):
PythonModule.__init__(self, 'sage.numerical.mip', spkg='sagemath_polyhedra') |
class AsymptoticCalculator(BaseCalculator):
UNBINNED_TO_BINNED_LOSS = {}
try:
from zfit.loss import UnbinnedNLL, BinnedNLL, ExtendedUnbinnedNLL, ExtendedBinnedNLL
except ImportError:
pass
else:
UNBINNED_TO_BINNED_LOSS[UnbinnedNLL] = BinnedNLL
UNBINNED_TO_BINNED_LOSS[Exten... |
def rand_log_uniform(shape, min_value, max_value, device='cpu', dtype=torch.float32):
min_value = math.log(min_value)
max_value = math.log(max_value)
return ((torch.rand(shape, device=device, dtype=dtype) * (max_value - min_value)) + min_value).exp() |
def short_vector_list_up_to_length(self, len_bound, up_to_sign_flag=False):
if (not self.is_positive_definite()):
raise ValueError('Quadratic form must be positive definite in order to enumerate short vectors')
from sage.libs.pari.all import pari
if (len_bound <= 0):
return []
V = FreeMo... |
def get(media_type: str) -> (type[Serializer] | None):
if is_json_media_type(media_type):
media_type = 'application/json'
if is_plain_text_media_type(media_type):
media_type = 'text/plain'
if is_xml_media_type(media_type):
media_type = 'application/xml'
return SERIALIZERS.get(med... |
class TaskEnvironment(object):
def __init__(self, pyrep: PyRep, robot: Robot, scene: Scene, task: Task, action_mode: ActionMode, dataset_root: str, obs_config: ObservationConfig, static_positions: bool=False, attach_grasped_objects: bool=True, shaped_rewards: bool=False):
self._pyrep = pyrep
self._r... |
def recovery_le(pb, corrs, macro):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = (- compute_micro_u(corrs['corrs_le'], macro['strain'], 'u', dim))
out['u_mic'] = Struct(name='output_data', mode='vertex', data=mic_u)
(stress_Y, strain_Y) = compute_stress_strain_u(pb, 'i', 'Y', 'mat.D', 'u... |
def write(file_path, text):
with codecs.open(file_path, 'w', 'utf-8') as text_f:
text_f.write('\n'.join(text)) |
class SubMaskedArray(MaskedArray):
def __new__(cls, info=None, **kwargs):
obj = super(SubMaskedArray, cls).__new__(cls, **kwargs)
obj._optinfo['info'] = info
return obj |
def test_none_convertibles():
myfunc = None
def myprog(A: dace.float64[20]):
if (myfunc is not None):
return myfunc(A)
return A
A = np.random.rand(20)
assert np.allclose(myprog(A), A)
def modifier(a):
return (a + 1)
myfunc = modifier
assert np.allclose(myp... |
def do_annotate(args):
args.props = (dict(args.props) if args.props else {})
if args.sentence_mode:
args.props['ssplit.isOneSentence'] = True
with corenlp.CoreNLPClient(annotators=args.annotators, properties=args.props, be_quiet=(not args.verbose_server)) as client:
for line in args.input:
... |
class BenchmarkThreadExceptions(Exception):
def __init__(self, exceptions):
super(BenchmarkThreadExceptions, self).__init__()
self.exceptions = exceptions |
class PytorchImplementation(FrameworkImplementation):
def __init__(self):
super().__init__()
def constants(self):
return pytorch_constants
def to_numpy(self, tensor: torch.Tensor) -> np.ndarray:
return torch_tensor_to_numpy(tensor)
def to_tensor(self, tensor: Any) -> torch.Tensor... |
class InstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
self.X = ((torch.rand(*dims) - 0.5) * 256)
num_channels = dims[1]
self.weight = torch.rand(num_channels, dtype=torch.float)
self.bias = torch.rand(num_channels, dtype=torch.float)
self.eps = 1e-05... |
def fpMod_using_fpRem(x, y, ctx=None):
y = z3.fpAbs(y)
z = z3.fpRem(z3.fpAbs(x), y, ctx)
r = z3.If(z3.fpIsNegative(z), (z + y), z, ctx)
return z3.If(z3.Not((z3.fpIsNegative(x) == z3.fpIsNegative(r)), ctx), z3.fpNeg(r), r, ctx) |
def partial_fused_bn(x, mean, var, gain=None, bias=None, eps=1e-05):
scale = torch.rsqrt((var + eps))
if (gain is not None):
scale = (scale * gain)
shift = (mean * scale)
if (bias is not None):
shift = (shift - bias)
return ((x * scale) - shift) |
def train(opt):
model = CycleGANModel(opt)
dataset = CDFdata.get_loader(opt)
(img_logs, weight_logs) = init_logs(opt)
for epoch_id in range(opt.epoch_size):
for (batch_id, data) in enumerate(dataset):
model.set_input(data)
model.optimize_parameters()
if ((batc... |
def test_make_args():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
assert (Add.make_args(x) == (x,))
assert (Mul.make_args(x) == (x,))
assert (Add.make_args(((x * y) * z)) == (((x * y) * z),))
assert (Mul.make_args(((x * y) * z)) == ((x * y) * z).args)
assert (Add.make_args(((x + y) +... |
def conv3x3_EW(in_planes, out_planes, stride=1):
return Conv2d_EW(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.