code stringlengths 101 5.91M |
|---|
class StableDiffusionInpaintPipeline(DiffusionPipeline):
def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[(DDIMScheduler, PNDMScheduler)], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPFeatureExtractor):... |
def test_ones_dtype(backend, caplog):
with caplog.at_level(logging.INFO, 'pyhf.tensor'):
with pytest.raises(KeyError):
assert pyhf.tensorlib.ones([1, 2, 3], dtype='long')
assert ('Invalid dtype' in caplog.text) |
def register_Ns3SystemWallClockMs_methods(root_module, cls):
cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')])
cls.add_constructor([])
cls.add_method('End', 'int64_t', [])
cls.add_method('GetElapsedReal', 'int64_t', [], is_const=True)
cls.add_method('GetElapsedSystem', 'int64_t'... |
def content_type_conformance(response: GenericResponse, case: Case) -> (bool | None):
from .schemas import BaseOpenAPISchema
if (not isinstance(case.operation.schema, BaseOpenAPISchema)):
return True
documented_content_types = case.operation.schema.get_content_types(case.operation, response)
if ... |
((device_cc() < 90), 'Device compute capability is insufficient for SM90 tests.')
class GemmS8Sm90(unittest.TestCase):
pass |
def model_class(model_cls, cfg=None, make=True, conv=default_conv):
if (make and hasattr(model_cls, 'get_kwargs')):
return model_cls(**model_cls.get_kwargs(cfg, conv=conv))
else:
return model_cls |
class ResidualBlock(nn.Sequential):
def __init__(self, in_planes, out_planes, dprob, stride=1):
super(ResidualBlock, self).__init__()
self.bn = nn.Sequential(nn.BatchNorm2d(in_planes), nn.ReLU(inplace=True))
self.conv = nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=1,... |
def _filter_batch(np_batch):
for (k, v) in np_batch.items():
if (v.dtype == np.bool):
(yield (k, v.astype(int)))
else:
(yield (k, v)) |
def test_loop_inlining_do_for():
sdfg = dace.SDFG('inlining')
state0 = sdfg.add_state('state0', is_start_block=True)
loop1 = LoopRegion(label='loop1', condition_expr='i < 10', loop_var='i', initialize_expr='i = 0', update_expr='i = i + 1', inverted=True)
sdfg.add_node(loop1)
state1 = loop1.add_state... |
class MultiRNNCell(RNNCell):
def __init__(self, cells, residual_output_layers=None, **kwargs):
super(MultiRNNCell, self).__init__(**kwargs)
self.cells = cells
if (residual_output_layers is None):
self.residual_output_layers = []
else:
self.residual_output_laye... |
class Swin2SRPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def VGGA(order):
model = cnn.CNNModelHelper(order, name='vgg-a', use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv('data', 'conv1', 3, 64, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1)
relu1 = model.Relu(conv1, 'conv1')
pool1 = model.MaxPool(relu1, 'pool1', kernel=2, stride=2)
c... |
def parse_voc_xml(node):
voc_dict = {}
children = list(node)
if children:
def_dic = defaultdict(list)
for dc in map(parse_voc_xml, children):
for (ind, v) in dc.items():
def_dic[ind].append(v)
voc_dict = {node.tag: {ind: (v[0] if (len(v) == 1) else v) for ... |
def test_rpad_list_array():
content = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
starts = ak.index.Index64(np.array([0, 3, 4, 5, 8]))
stops = ak.index.Index64(np.array([3, 3, 6, 8, 9]))
array = ak.contents.ListArray(starts, stops, content)
assert (to_list(ar... |
def register_Ns3OfdmDcdChannelEncodings_methods(root_module, cls):
cls.add_constructor([param('ns3::OfdmDcdChannelEncodings const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetBaseStationId', 'ns3::Mac48Address', [], is_const=True)
cls.add_method('GetChannelNr', 'uint8_t', [], is_const=True)
... |
def init_uniform(module):
if (module.weight is not None):
nn.init.xavier_uniform_(module.weight)
if (module.bias is not None):
nn.init.zeros_(module.bias)
return |
class LineByLineWithSOPTextDataset():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
class KaldiInitializerConfig(FairseqDataclass):
data_dir: str = MISSING
fst_dir: Optional[str] = None
in_labels: str = MISSING
out_labels: Optional[str] = None
wav2letter_lexicon: Optional[str] = None
lm_arpa: str = MISSING
kaldi_root: str = MISSING
blank_symbol: str = '<s>'
silence_... |
def get_requires_for_build_sdist(config_settings=None):
config_settings = _fix_config(config_settings)
return _get_build_requires(config_settings, requirements=['setuptools']) |
def main():
args = parse_args()
mmcv.check_file_exist(args.prediction_path)
cfg = Config.fromfile(args.config)
cfg = replace_cfg_vals(cfg)
update_data_root(cfg)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
cfg.data.test.p... |
def extract_keywords_len(prompt):
keywords = Rake.run(prompt)
length = len(keywords)
return length |
def test_smart_ptr_from_default():
instance = m.HeldByDefaultHolder()
with pytest.raises(RuntimeError) as excinfo:
m.HeldByDefaultHolder.load_shared_ptr(instance)
assert ('Unable to load a custom holder type from a default-holder instance' in str(excinfo)) |
def average_checkpoints(checkpoint_list, recoverable_name, parameter_loader=torch.load, averager=average_state_dicts, device=None):
try:
parameter_iterator = (parameter_loader(ckpt.paramfiles[recoverable_name], map_location=device) for ckpt in checkpoint_list)
except TypeError:
parameter_iterato... |
def is_inline(elem):
return (elem.t in ('heading', 'emph', 'strong', 'link', 'image', 'custom_inline')) |
class BatchLoader():
def __init__(self, with_label=True):
self.with_label = with_label
self.go_token = '<GO>'
self.pad_token = '<PAD>'
self.unk_token = '<UNK>'
with open(FLAGS.DATA_PATH, 'rb') as f:
data = pkl.load(f)
if self.with_label:
with o... |
def test_ByteMaskedArray_RecordArray_NumpyArray():
v1 = json.loads('{"class":"ByteMaskedArray","mask":"i8","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","parameters":{},"form_key":null}},"parameters":{},"form_key":null},"va... |
def format_floats_for_csv(l):
new_l = []
for num in l:
truncated_num = float(('%.2f' % num))
new_l.append(truncated_num)
return new_l |
def bio_to_segments(bio):
segments = []
segment = {'start': None, 'end': None}
for (i, val) in enumerate(bio):
if (segment['start'] is None):
if (val == BIO['B']):
segment['start'] = i
else:
if (val in [BIO['B'], BIO['O']]):
segment['en... |
def cifar_tf_preprocess(random_crop=True, random_flip=True, whiten=True):
image_size = 32
inp = tf.placeholder(tf.float32, [image_size, image_size, 3])
image = inp
if random_crop:
log.info('Apply random cropping')
image = tf.image.resize_image_with_crop_or_pad(inp, (image_size + 4), (ima... |
def register_Ns3EpcS1apSapMmeProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS1apSapMmeProvider const &', 'arg0')])
cls.add_method('SendInitialContextSetupRequest', 'void', [param('uint64_t', 'mmeUeS1Id'), param('uint16_t', 'enbUeS1Id'), param('std::list< ns3:... |
def read_words(filename):
with open(filename, encoding='utf-8') as fin:
text = fin.readlines()
text = [(x.strip().split()[0] if x.strip() else '') for x in text]
return text |
class NearConstantInputWarning(DegenerateDataWarning):
def __init__(self, msg=None):
if (msg is None):
msg = 'All values in data are nearly equal; results may not be reliable.'
self.args = (msg,) |
def onnx_inference(inputs: dict, onnx_file: str, dump_all: bool=True) -> dict:
import onnx
import onnxruntime
def generate_onnx_with_all(onnx_file: str):
output_keys = []
model = onnx.load(onnx_file)
no_list = ['Cast', 'Constant', 'Dropout', 'Loop']
for x in model.graph.node:... |
def register_Ns3Vector3DValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)... |
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser('plot_curve', help='parser for plotting curves')
parser_plt.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_plt.add_argument('--keys', type=str, nargs='+', default=['top1_acc'], help='the metri... |
def get_const_div_inv(expr: Expression, simplifier: Optional[LeanExprSimplifier]) -> Optional[Tuple[(int, bool)]]:
if ((not isinstance(expr, ExprOperator)) or (not (expr.op == '/')) or (simplifier is None)):
return None
s_expr = simplifier.visit(expr)
if isinstance(s_expr, ExprConst):
return... |
def store_data_in_csv(timing_entries):
with open(FLAGS.csv_file, 'wb') as csvfile:
writer = csv.writer(csvfile)
for timing_entry in timing_entries:
writer.writerow([timing_entry.info_string, timing_entry.timestamp, timing_entry.num_batches, timing_entry.mean, timing_entry.sd]) |
class MaxVal(LoopBasedReplacement):
class Transformation(MinMaxValTransformation):
def __init__(self, ast):
super().__init__(ast)
def _result_init_value(self, array: ast_internal_classes.Array_Subscript_Node):
var_decl = self.scope_vars.get_var(array.parent, array.name.name)
... |
def rese_block(x, num_features, weight_decay, amplifying_ratio):
if (num_features != x.shape[(- 1)].value):
shortcut = Conv1D(num_features, kernel_size=1, padding='same', use_bias=True, kernel_regularizer=l2(weight_decay), kernel_initializer='glorot_uniform')(x)
shortcut = BatchNormalization()(short... |
def create_script_module_impl(nn_module, concrete_type, stubs_fn):
cpp_module = torch._C._create_module_with_type(concrete_type.jit_type)
method_stubs = stubs_fn(nn_module)
property_stubs = get_property_stubs(nn_module)
def init_fn(script_module):
for (name, (attr_type, is_param)) in concrete_ty... |
_REGISTRY.register()
class OccludedREID(ImageDataset):
dataset_name = 'occludereid'
def __init__(self, root='datasets'):
self.root = root
self.query_dir = osp.join(self.root, 'OccludedREID/query')
self.gallery_dir = osp.join(self.root, 'OccludedREID/gallery')
(query, gallery) = p... |
class docParamListTypeSub(supermod.docParamListType):
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem) |
class Fact():
def __init__(self, atom):
self.atom = atom
def __str__(self):
return ('%s.' % self.atom) |
class IdentityOp(mx.operator.CustomOp):
def __init__(self, logging_prefix='identity', input_debug=False, grad_debug=False):
super(IdentityOp, self).__init__()
self.logging_prefix = logging_prefix
self.input_debug = input_debug
self.grad_debug = grad_debug
def forward(self, is_tra... |
class FrozenBatchNorm2d(nn.Module):
_version = 3
def __init__(self, num_features, eps=1e-05):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer('weight', torch.ones(num_features))
self.register_buffer('bias', torch.zeros(num_features)... |
def main():
treebank = convert_tiger_treebank('extern_data/constituency/danish/W0084/arboretum.tiger/arboretum.tiger') |
def register_Ns3Ipv6FlowClassifierSortByCount_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Ipv6FlowClassifier::SortByCount const &', 'arg0')])
cls.add_method('operator()', 'bool', [param('std::pair< ns3::Ipv6Header::DscpType, unsigned int >', 'left'), param('std::pair<... |
class MHSA_stage(nn.Module):
def __init__(self, dim, num_layers, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=nn.LayerNorm, crpe_window={3: 2, 5: 3, 7: 3}):
super(MHSA_stage, self).__init__()
self.cpe = ConvPosEnc(dim, k=3)
... |
def Ensemble(*args, **kwargs):
_top_level_deprecation_warning('Ensemble', 'ensemble')
return ensemble.Ensemble(*args, **kwargs) |
class ExampleBatchIterator(BatchIterator):
def __init__(self, total):
self.iterated = 0
self.total = total
super(ExampleBatchIterator, self).__init__(default_batch_size=30)
def next_batch(self, k):
batch = [(self.iterated + i) for i in range(k)]
batch = [b for b in batch ... |
_utils.test(require=ti.extension.adstack)
def test_polar_decompose_2D():
dim = 2
F_1 = ti.Matrix.field(dim, dim, dtype=ti.f32, shape=(), needs_grad=True)
F = ti.Matrix.field(dim, dim, dtype=ti.f32, shape=(), needs_grad=True)
loss = ti.field(dtype=ti.f32, shape=(), needs_grad=True)
def polar_decompos... |
class TSTNetNormal(nn.Module):
def __init__(self):
self.conv_bn_1 = ConvBn(1)
self.conv_bn_2 = ConvBn(1)
def call(self, x1, x2):
y1 = self.conv_bn_1(x1)
y2 = self.conv_bn_2(x2)
y = F.concatenate(y1, y2, axis=1)
return y |
class HashableDict(dict):
def __eq__(self, other):
if isinstance(other, self.__class__):
return ((frozenset(self), frozenset(self.values())) == (frozenset(other), frozenset(other.values())))
return NotImplemented
def __ne__(self, other):
r = self.__eq__(other)
if (r i... |
class SubTensor(torch.Tensor):
def __torch_function__(self, func, types, args=(), kwargs=None):
if (kwargs is None):
kwargs = {}
if (func not in HANDLED_FUNCTIONS_SUB):
return NotImplemented
return HANDLED_FUNCTIONS_SUB[func](*args, **kwargs) |
def train(args):
if args['use_dictionary']:
(lexicon, args['num_dict_feat']) = load_lexicon(args)
dictionary = create_dictionary(lexicon)
args['feat_dim'] += (args['num_dict_feat'] * 2)
else:
args['num_dict_feat'] = 0
lexicon = None
dictionary = None
mwt_dict ... |
class FindStatMapQuery(FindStatMap):
def __init__(self, data=None, values_of=None, distribution_of=None, domain=None, codomain=None, known_terms=None, function=None, depth=FINDSTAT_DEFAULT_DEPTH, debug=False):
self._first_terms = data
if ((data is not None) and (known_terms is None)):
se... |
def filter_tracks_by_time_breaks(detections, max_time_break_ratio_car, max_time_break_ratio_person):
tracks = detections_to_tracks(detections)
filtered_dets = []
for t in tracks:
if ((t[0].class_id == CAR_CLASS_ID) and ((float(compute_nbr_time_breaks(t)) / float(len(t))) > max_time_break_ratio_car))... |
def load_and_cache_examples(args, tokenizer, evaluate=False, split='val'):
if (not evaluate):
file_path = args.train_data_file
elif (split == 'val'):
file_path = args.eval_data_file
elif (split == 'test'):
file_path = args.test_data_file
else:
raise TypeError('split value... |
class HELEN(Dataset):
CLASSES = ['background', 'skin', 'l-brow', 'r-brow', 'l-eye', 'r-eye', 'nose', 'u-lip', 'i-mouth', 'l-lip', 'hair']
PALETTE = torch.tensor([[0, 0, 0], [127, 0, 0], [254, 0, 0], [0, 84, 0], [169, 0, 50], [254, 84, 0], [255, 0, 84], [0, 118, 220], [84, 84, 0], [0, 84, 84], [84, 50, 0]])
... |
class WrappedOptimizerBase(OptimizerBase, IterativeMixin):
def set_callback(self, callback):
def callback_wrapper(*args):
nonlocal self
self.iteration += 1
callback(*args)
super().set_callback(callback_wrapper) |
def strToHeading(string, level=0):
lens = [len(v) for v in string.split()]
if (level in [0, (- 2)]):
mark = '='
elif (level in [1, (- 1)]):
mark = '-'
elif (level == 2):
mark = '~'
else:
raise ValueError('Bad level given')
marks = mark.join([(mark * v) for v in le... |
.hypothesis_nested
.operations('custom_format')
def test_before_process_path_hook(wsgi_app_schema):
_app_schema.hook
def before_process_path(context, path, methods):
methods['get']['parameters'][0]['name'] = 'foo'
methods['get']['parameters'][0]['enum'] = ['bar']
strategy = wsgi_app_schema['... |
class Min(Module):
def __init__(self, dimension=0):
super(Min, self).__init__()
self.dimension = dimension
self._output = None
self._indices = None
def _getPositiveDimension(self, input):
dimension = self.dimension
if (dimension < 0):
dimension = (inpu... |
def create_dataset():
time1 = time.time()
if (cfg.dataset.format in ['OGB']):
(graphs, splits) = load_dataset()
else:
graphs = load_dataset()
time2 = time.time()
min_node = filter_graphs()
dataset = GraphDataset(graphs, task=cfg.dataset.task, edge_train_mode=cfg.dataset.edge_trai... |
def summarize_full_df(full_df: pd.DataFrame) -> pd.DataFrame:
algs = [col[len('sMAPE'):] for col in full_df.columns if (col.startswith('sMAPE') and (not full_df[col].isna().any()))]
summary_df = pd.DataFrame({alg.lstrip('_'): [] for alg in algs})
(mean_smape, med_smape, mean_rmse, med_rmse) = [[] for _ in r... |
def _format_score(score: Dict[(str, float)]) -> str:
if (not score):
return 'None'
if (len(score) == 1):
return _format_value(list(score.values())[0])
return ' '.join([('%s %s' % (key.split(':', 2)[(- 1)], _format_value(score[key]))) for key in sorted(score.keys())]) |
def register_Ns3LteRrcSapRrcConnectionSetup_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::RrcConnectionSetup const &', 'arg0')])
cls.add_instance_attribute('radioResourceConfigDedicated', 'ns3::LteRrcSap::RadioResourceConfigDedicated', is_const=False)
cls... |
def bbox_target(pos_bboxes_list, neg_bboxes_list, gt_labels, cfg):
(labels, label_weights) = ([], [])
pos_weight = (1.0 if (cfg.pos_weight <= 0) else cfg.pos_weight)
assert (len(pos_bboxes_list) == len(neg_bboxes_list) == len(gt_labels))
length = len(pos_bboxes_list)
for i in range(length):
... |
class PinSAGE(BasicModel):
def __init__(self, emb_dim: int, num_layers: int, item_encoder: torch.nn.Module, num_users: Optional[int]=None):
super(PinSAGE, self).__init__()
self.emb_dim = emb_dim
self.num_layers = num_layers
self.item_encoder = item_encoder
self.f = nn.Sigmoid... |
class SdfgLocation():
def __init__(self, sdfg_id, state_id, node_ids):
self.sdfg_id = sdfg_id
self.state_id = state_id
self.node_ids = node_ids
def printer(self):
print('SDFG {}:{}:{}'.format(self.sdfg_id, self.state_id, self.node_ids)) |
class FullyRandomProtocol(ProtocolBase):
def __init__(self, name, variable_space='space_a_b'):
super().__init__(name)
self._variable_space = variable_space
def get_intervention(self, episode, timestep):
if (timestep == 0):
if (self._variable_space == 'space_a_b'):
... |
def compute_similarities(args, tag_sims, train_csv, rootpath, DEVICE, SIMS, training_log, placement_node, parent):
cnd_drop_n = (args.dataset == constants.CAM16)
cnd_drop_n &= (args.al_type != constants.AL_WSL)
if (args.al_type != constants.AL_LP):
return 0
current_dir = dirname(abspath(__file__... |
.gpu
def test_persistent_fusion():
(sdfg, s_init) = _make_sdfg()
sdfg.apply_gpu_transformations(validate=False, simplify=False)
content_nodes = (set(sdfg.nodes()) - {sdfg.start_state, sdfg.sink_nodes()[0], s_init})
subgraph = SubgraphView(sdfg, content_nodes)
transform = GPUPersistentKernel()
tr... |
def patch_all():
distutils.core.Command = setuptools.Command
has_issue_12885 = (sys.version_info <= (3, 5, 3))
if has_issue_12885:
distutils.filelist.findall = setuptools.findall
needs_warehouse = ((sys.version_info < (2, 7, 13)) or ((3, 0) < sys.version_info < (3, 3, 7)) or ((3, 4) < sys.versio... |
.parametrize('agg_mode', ['last', 'mean'])
def test_flair_embeddings(agg_mode, flair_lm):
batch_tokenized_text = [['I', 'like', 'it', '.'], ['Do', 'you', 'love', 'me', '?'], ['Sure', '!'], ['Future', 'it', 'out']]
flair_emb = flair.embeddings.FlairEmbeddings(flair_lm)
flair_sentences = [flair.data.Sentence(... |
def read_data_from_csv_file(fileName_train, fileName_test, max_num_problems):
inputs = []
targets = []
rows = []
max_skill_num = 0
tuple_rows = []
train_rows = []
test_rows = []
with open(fileName_train, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row... |
def get_default_depparse_package(lang, ud_package):
charlm_package = get_depparse_charlm_package(lang, ud_package)
if (charlm_package is not None):
return (ud_package + '_charlm')
if (lang in no_pretrain_languages):
return (ud_package + '_nopretrain')
return (ud_package + '_nocharlm') |
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, 'ctc', False):
self.dictionary.add_symbol('<ctc_blank>')
self.src_dict = self.dictionary
self.tgt_dict = self.dictio... |
class OPTLoraInt8(CausalLoraInt8Model):
config_name: str = 'opt_lora_int8'
def __init__(self, weights_path: Optional[str]=None):
super().__init__(OPTLoraInt8Engine.config_name, weights_path) |
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
lr = min(max_lr, (init_lr + (((max_lr - init_lr) * step) / max(max_step, 1))))
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
def cross_entropy(input, target, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='elementwise_mean'):
if ((size_average is not None) or (reduce is not None)):
reduction = _Reduction.legacy_get_string(size_average, reduce)
return nll_loss(log_softmax(input, 1), target, weight... |
def load_shanten_cache():
with open(os.path.join(DIR, 'shanten_cache.json')) as f:
return jnp.array(json.load(f), dtype=jnp.uint32) |
def decode_segmap(image, objects, nc=21):
r = np.zeros_like(image).astype(np.uint8)
for l in objects:
idx = (image == l)
r[idx] = 255
return np.array(r) |
def is_active_bc(bc, ts=None, functions=None):
if ((bc.times is None) or (ts is None)):
active = True
elif isinstance(bc.times, list):
for tt in bc.times:
if (tt[0] <= ts.time < tt[1]):
active = True
break
else:
active = False
e... |
def build_transformer_layer_sequence(cfg, default_args=None):
return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args) |
class DocumentQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool('document-question-answering')
self.tool.setup()
self.remote_tool = load_tool('document-question-answering', remote=True)
def test_exact_match_arg(self):
dataset ... |
class RepConv(nn.Module):
def __init__(self, c1, c2, k=3, s=1, p=None, g=1, act=True, deploy=False):
super(RepConv, self).__init__()
self.deploy = deploy
self.groups = g
self.in_channels = c1
self.out_channels = c2
assert (k == 3)
assert (autopad(k, p) == 1)
... |
def _seg_5():
return [(525, 'V'), (526, 'M', u'o'), (527, 'V'), (528, 'M', u'r'), (529, 'V'), (530, 'M', u'r'), (531, 'V'), (532, 'M', u'u'), (533, 'V'), (534, 'M', u'u'), (535, 'V'), (536, 'M', u's'), (537, 'V'), (538, 'M', u't'), (539, 'V'), (540, 'M', u''), (541, 'V'), (542, 'M', u'h'), (543, 'V'), (544, 'M', u'... |
_utils.test(arch=get_host_arch_list())
def test_classfunc():
_oriented
class Foo():
def __init__(self):
self.val = ti.Matrix.field(n=3, m=3, dtype=ti.f32, shape=3)
def add_mat(self, a, b):
return (a + b)
def fill(self):
self.val[0] = self.add_mat(self.... |
def valsartan_smarts() -> GoalDirectedBenchmark:
sitagliptin_smiles = 'NC(CC(=O)N1CCn2c(nnc2C(F)(F)F)C1)Cc1cc(F)c(F)cc1F'
valsartan_smarts = 'CN(C=O)Cc1ccc(c2ccccc2)cc1'
specification = uniform_specification(1, 10, 100)
return GoalDirectedBenchmark(name='Valsartan SMARTS', objective=smarts_with_other_ta... |
class HamNoSysTokenizer(BaseTokenizer):
def __init__(self, starting_index=None, **kwargs):
self.font_path = Path(__file__).parent.joinpath('HamNoSysUnicode.ttf')
with TTFont(self.font_path) as font:
tokens = [chr(key) for key in font['cmap'].getBestCmap().keys()]
super().__init__... |
class TensorBoardOutputFormat(KVWriter):
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
path = osp.join(osp.abspath(dir), datetime.now().strftime('%b%d_%H-%M-%S'))
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(log_dir=path)
de... |
def soft_dice_coef(target, prediction, axis=(1, 2, 3), smooth=0.0001):
intersection = tf.reduce_sum((target * prediction), axis=axis)
union = tf.reduce_sum((target + prediction), axis=axis)
numerator = ((tf.constant(2.0) * intersection) + smooth)
denominator = (union + smooth)
coef = (numerator / de... |
class TrivialMapPseudoInitEliminationTest(unittest.TestCase):
def test_can_be_applied(self):
graph = trivial_map_pseudo_init_sdfg()
count = graph.apply_transformations(TrivialMapElimination, validate=False, validate_all=False)
graph.validate()
graph.view()
self.assertGreater(... |
class DatasetReader(Registrable):
def __init__(self, lazy: bool=False) -> None:
self.lazy = lazy
def read(self, file_path: str) -> Iterable[Instance]:
lazy = getattr(self, 'lazy', None)
if (lazy is None):
logger.warning('DatasetReader.lazy is not set, did you forget to call t... |
class FormatControl(object):
__slots__ = ['no_binary', 'only_binary']
def __init__(self, no_binary=None, only_binary=None):
if (no_binary is None):
no_binary = set()
if (only_binary is None):
only_binary = set()
self.no_binary = no_binary
self.only_binary ... |
def init_hf_modules():
if (HF_MODULES_CACHE in sys.path):
return
sys.path.append(HF_MODULES_CACHE)
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
init_path = (Path(HF_MODULES_CACHE) / '__init__.py')
if (not init_path.exists()):
init_path.touch()
importlib.invalidate_caches() |
def mix(request, nb_frames, nb_channels, nb_bins):
return torch.rand((nb_frames, nb_bins, nb_channels, 2)) |
_start_docstrings('XLM-RoBERTa Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. ', XLM_ROBERTA_START_DOCSTRING)
class TFXLMRobertaForTokenClassification(TFRobertaForTokenClassification):
config_class = XLMRobertaCon... |
def escapeRegexp(string):
specialCharacters = ('.', '^', '$', '*', '+', '?', '{', '}', '[', ']', '|', '(', ')', '-')
for char in specialCharacters:
string = string.replace(char, ('\\' + char))
return string |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.