code
stringlengths
101
5.91M
class Bounds(object): def __init__(self, center, size): self.center = center self.size = size def to_dict(self): return {'center': self.center, 'size': self.size}
def eval_mode2(mode, measurements, label_file): (run, qrels) = eval_mode(mode, measurements, label_file) output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/bm25/eval/{}'.format(mode[1]) output_file = 'eval_bm25_{}_{}_aggregate_{}.txt'.format(mode[0], mode[1], mode[2]) ranking_eval2(qrels, ru...
def reduce(inputs: Sequence[torch.Tensor], output: Optional[Union[(torch.Tensor, Sequence[torch.Tensor])]]=None, root: int=0, op: int=SUM, streams: Optional[Sequence[torch.cuda.Stream]]=None, comms=None, *, outputs: Optional[Sequence[torch.Tensor]]=None) -> None: _check_sequence_type(inputs) _output: torch.Tens...
def run_export_coarse(args, cfg, device, save_path=None): verbose = (args.block_num <= 1) if verbose: print('Export coarse visualization...') with torch.no_grad(): ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'coarse_last.tar') (model, _, _) = load_existing_model(args, cfg, cfg...
def entryset(project_path): with open(os.path.join(project_path, 'setup.py'), 'r') as f: lines = f.readlines() marker_index = None for (i, line) in enumerate(lines): flag = '### Command Entries Will Be Set From Here. Do Not Delete This Line! ###' if (flag in line): marker...
class RandomSamples(object): def __init__(self, h, w, percentage=0.5): self.idx = torch.randperm((h * w))[:int(((h * w) * percentage))] def __call__(self, tensor): return tensor.view((- 1), tensor.shape[(- 1)])[self.idx]
class ExhaustiveBFSController(Controller): def __init__(self, grid_size=0.25, fov=90.0, grid_file=None, graph_file=None, metadata_file=None, images_file=None, seg_file=None, class_file=None, depth_file=None, debug_mode=True, grid_assumption=False, local_executable_path=None, actions=['MoveAhead', 'RotateLeft', 'Rot...
def zipdir(path, ziph, include_format): for (root, dirs, files) in os.walk(path): for file in files: if (os.path.splitext(file)[(- 1)] in include_format): filename = os.path.join(root, file) arcname = os.path.relpath(os.path.join(root, file), os.path.join(path, '....
class SimpleProgressBar(): def __init__(self, header): self.header = header self.atIndex = None def update(self, percent, message): if (self.atIndex is None): sys.stdout.write(self.header) self.atIndex = 0 next = int((percent * 50)) if (next == sel...
def _is_utf(encoding): try: .encode(encoding) except UnicodeEncodeError: return False except Exception: try: return (encoding.lower().startswith('utf-') or ('U8' == encoding)) except: return False else: return True
def get_statestore_config_from_file(init): with open(init, 'r') as file: try: settings = dict(yaml.safe_load(file)) return settings except yaml.YAMLError as e: raise e
def build_data(path='./data/WN18RR/', is_unweigted=False, directed=True): entity2id = read_entity_from_id((path + 'entity2id.txt')) relation2id = read_relation_from_id((path + 'relation2id.txt')) (train_triples, train_adjacency_mat, unique_entities_train) = load_data(os.path.join(path, 'train.txt'), entity2...
class PathTableau(ClonableArray, metaclass=InheritComparisonClasscallMetaclass): _method def local_rule(self, i): def size(self): return len(self) def initial_shape(self): return self[0] def final_shape(self): return self[(- 1)] def promotion(self): with self.clon...
def make_evaluator(opt, *args, **kwargs): return ConceptNetGenerationEvaluator(opt, *args, **kwargs)
class BgpAttackerInjectorHook(Hook): __component: BgpAttackerComponent def __init__(self, component: 'BgpAttackerComponent'): self.__component = component def getName(self) -> str: return 'BgpAttackerInjectorAs{}'.format(self.__component.getHijackerAsn()) def getTargetLayer(self) -> str:...
def _unquote_to_bytes(string, unsafe=''): if isinstance(string, text_type): string = string.encode('utf-8') if isinstance(unsafe, text_type): unsafe = unsafe.encode('utf-8') unsafe = frozenset(bytearray(unsafe)) groups = iter(string.split(b'%')) result = bytearray(next(groups, b'')) ...
def test_old_bounds_to_new(): bounds = ([1, 2], (None, 3), ((- 1), None)) lb_true = np.array([1, (- np.inf), (- 1)]) ub_true = np.array([2, 3, np.inf]) (lb, ub) = old_bound_to_new(bounds) assert_array_equal(lb, lb_true) assert_array_equal(ub, ub_true) bounds = [((- np.inf), np.inf), (np.arra...
class PongScene(Scene): multiplayer = False players_count = 1 VIDEO_W = 600 VIDEO_H = 400 def __init__(self): Scene.__init__(self, gravity=9.8, timestep=(0.0165 / 4), frame_skip=4) self.score_left = 0 self.score_right = 0 self.ball_x = 0 def actor_introduce(self, ...
_grad() def extract_features(args, loader, inception, device): (pools, logits) = ([], []) for data in tqdm(loader): if isinstance(data, torch.Tensor): img = data else: img = data['image'] if (img.shape[1] != 3): img = img.expand((- 1), 3, (- 1), (- 1))...
class AlbertForSequenceClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class BlenderbotSmallModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
.parametrize('optimizer_name', ['fail', b'fail']) def test_supported_optimizers(optimizer_name): with pytest.raises(pyhf.exceptions.InvalidOptimizer): pyhf.set_backend(pyhf.tensorlib, optimizer_name)
class ConvBlock(nn.Module): def __init__(self, in_c, out_c, k, s=1, p=0, g=1): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p, bias=False, groups=g) self.bn = nn.BatchNorm2d(out_c) def forward(self, x): return F.relu6(self.bn(self.conv...
def np_ify(tensor_or_other): if isinstance(tensor_or_other, torch.autograd.Variable): return get_numpy(tensor_or_other) else: return tensor_or_other
def rotate_transformation_matrix(transfromation_matrix, rotation, size): rotation = (- rotation) sn = tf.sin((rotation * (PI / 180))) csn = tf.cos((rotation * (PI / 180))) rot_matrix = tf.TensorArray(tf.float32, size=3, dynamic_size=False) rot_matrix = rot_matrix.write(0, value=[csn, (- sn), 0]) ...
def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch): logger.info('Running validation... ') pipeline = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, vae=accelerator.unwrap_model(vae), text_encoder=accelerator.unwrap_model(text_encoder), tok...
class Vgg19Model(model.Model): def __init__(self): super(Vgg19Model, self).__init__('vgg19', 224, 64, 0.005) def add_inference(self, cnn): _construct_vgg(cnn, [2, 2, 4, 4, 4])
class _SynchronizedBatchNorm(_BatchNorm): def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True): super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine) self._sync_master = SyncMaster(self._data_parallel_master) self._is_parallel...
def generate_matrix(output_dir: Path, matrix_name: str, matrix: scipy.sparse.csr_matrix, symforce_result_is_sparse: bool, i: int) -> None: symbols = sf.symbols(f'x:{N_SYMBOLS}') gen = unary_binary_expression_gen.UnaryBinaryExpressionGen(unary_ops=[op_probabilities.OpProbability('neg', (lambda x: (- x)), 3)], bi...
def run_ransac(filename): folder_script = os.path.dirname(__file__) file_noisy_line = os.path.join(folder_script, './input/', filename) np_image = skimage.io.imread(file_noisy_line, as_gray=True) lst_all_points = Util.create_points_from_numpyimage(np_image) ransac_maxiterations = 12000 ransac_mi...
_numpy_output() def test_degenerate_reduction_explicit(A: dace.float64[20]): return np.sum(A, axis=())
def flatten_dict(d: dict, sep: str='/', pre='') -> dict: return ({(((pre + sep) + k) if pre else k): v for (kk, vv) in d.items() for (k, v) in flatten_dict(vv, sep, kk).items()} if isinstance(d, dict) else {pre: d})
def Generate_Text_to_Text_Question(sample, q_number=3): (passage1, passage2) = (sample[0], sample[1]) bridge_entities = get_bridge_entites(passage1, passage2) if (len(bridge_entities) == 0): return None valid_triples = ask_questions_in_text(passage1, bridge_entities, 1) valid_triples += ask_...
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, scope='conv'): with tf.variable_scope(scope): if (pad_type == 'zero'): x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]]) if (pad_type == 'reflect'): x = tf.pad(x, [[0, 0], [pad, pad], [pad,...
def add_arguments(parser): parser.add_argument('file', help='path to input star file') parser.add_argument('-o', '--output', help='path to write particle stack file') parser.add_argument('-t', '--threshold', type=float, default=(- np.inf), help='only take particles with scores >= this value (default: -inf)'...
class SqueezeNet(nn.Module): def __init__(self, version=1.0, num_classes=1000): super(SqueezeNet, self).__init__() if (version not in [1.0, 1.1]): raise ValueError('Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'.format(version=version)) self.num_classes = num_class...
def _impl(array, axis, highlevel, behavior, attrs): axis = regularize_axis(axis) with HighLevelContext(behavior=behavior, attrs=attrs) as ctx: layout = ctx.unwrap(array, allow_record=False, primitive_policy='error') if (axis is None): if layout.is_option: layout = layout.drop_non...
def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=True): device = img1.device weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device) levels = weights.size()[0] mssim = [] mcs = [] for _ in range(levels): (sim, cs) = ssim(img1, i...
class FlavaImageProcessor(metaclass=DummyObject): _backends = ['vision'] def __init__(self, *args, **kwargs): requires_backends(self, ['vision'])
def init_dist_env_variables(args): if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank)
def register_Ns3QueueDiscItem_methods(root_module, cls): cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Address const &', 'addr'), param('uint16_t', 'protocol')]) cls.add_method('GetAddress', 'ns3::Address', [], is_const=True) cls.add_method('GetProtocol', 'uint16_t', [], is_const=T...
def main(): parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=10000, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size...
def prepare_common_voice(data_folder, save_folder, train_tsv_file=None, dev_tsv_file=None, test_tsv_file=None, accented_letters=False, language='en', skip_prep=False): if skip_prep: return if (train_tsv_file is None): train_tsv_file = (data_folder + '/train.tsv') else: train_tsv_file...
def test_errstate_c_basic(): olderr = sc.geterr() with sc.errstate(domain='raise'): with assert_raises(sc.SpecialFunctionError): sc.spence((- 1)) assert_equal(olderr, sc.geterr())
class VecEnvExecutor(): def __init__(self, envs, max_path_length): self.envs = envs self._action_space = envs[0].action_space self._observation_space = envs[0].observation_space self.ts = np.zeros(len(self.envs), dtype='int') self.max_path_length = max_path_length def ste...
def config_reader(config_path): json_data = open(config_path).read() config = json.loads(json_data) return config
def measures_to_tokens(measures, soup, staff=None, note_name=True): divisions = 0 tokens = [] for measure in measures: tokens.append('bar') if (staff is not None): notes = [n for n in measure.find_all('note') if (n.staff and (int(n.staff.text) == staff))] else: ...
class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, iw=0): super(InvertedResidual, self).__init__() if (not (1 <= stride <= 3)): raise ValueError('illegal stride value') self.stride = stride branch_features = (oup // 2) assert ((self.stride != ...
def compute_valid_map(mdp, state, player_idx, terrain_type, obj_lst): player = state.players[player_idx] valid_map = np.zeros((len(mdp.terrain_mtx), len(mdp.terrain_mtx[0])), dtype=np.int32) for terrain in terrain_type: positions = list(mdp.terrain_pos_dict[terrain]) for pos in positions: ...
class TestEnvironmentReset(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dm = PyCUDADataManager(num_agents=5, num_envs=2, episode_length=2) self.fm = PyCUDAFunctionManager(num_agents=int(self.dm.meta_info('n_agents')), num_envs=int(self.dm.me...
def test(args): device = ('cuda' if torch.cuda.is_available() else 'cpu') print('load test data') vocab_json = os.path.join(args.input_dir, 'vocab.json') test_pt = os.path.join(args.input_dir, 'test.pt') data = DataLoader(vocab_json, test_pt, 128, training=False) vocab = data.vocab kb = Data...
def ref_slice(x, start, stop, step): s = [slice(start[axis], stop[axis], step[axis]) for axis in range(len(start))] return x[tuple(s)]
def clean_defs(definitions, output_file, vocab): regouped_dictionary = defaultdict(list) with open(definitions) as f: for line in f: line = line.strip() ar = line.split()[1:] (word, defs) = (ar[0], ar[1:]) regouped_dictionary[word].append(defs) of = op...
def get_model(test): test = test.split('::')[0] if test.startswith('tests/models/'): test = test.split('/')[2] else: test = None return test
def create_new_state_dict(current_state_dict): new_state_dict = OrderedDict() for (k, v) in current_state_dict.items(): name = k[7:] new_state_dict[name] = v return new_state_dict
class KerasImplementation(FrameworkImplementation): def __init__(self): super().__init__() def constants(self): return keras_constants def model_reader(self, model: Model, representative_data_gen: Callable) -> Graph: return model_reader(model) def to_numpy(self, tensor: tf.Tensor...
def is_memoryviewslice_access(s): saved = [(s.sy, s.systring)] s.next() retval = False if (s.systring == ':'): retval = True elif (s.sy == 'INT'): saved.append((s.sy, s.systring)) s.next() if (s.sy == ':'): retval = True for sv in saved[::(- 1)]: ...
def clean_figi(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', split: bool=False, inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame: if (output_format not in {'compact', 'standard'}): raise ValueError(f'output_format {output_format} is inval...
(scope='session') def is_older_subtests(): version_string = metadata.version('pytest_subtests') return (version.parse(version_string) < version.parse('0.6.0'))
def _convert_from_string(data): for char in '[]': data = data.replace(char, '') rows = data.split(';') newdata = [] count = 0 for row in rows: trow = row.split(',') newrow = [] for col in trow: temp = col.split() newrow.extend(map(ast.literal_e...
class EarlyStopCallback(Callback): def __init__(self, patience): super(EarlyStopCallback, self).__init__() self.patience = patience self.wait = 0 def on_valid_end(self, eval_result, metric_key, optimizer, is_better_eval): if (not is_better_eval): if (self.wait == self...
class Application(object): def __init__(self, timeout, quiet): import socket socket.setdefaulttimeout(timeout) self.quiet = quiet def print_fastest_mirror(self): print(MirrorList().fastest) def download_url(self, url, destination): Download(url, destination, progress=...
def train(): opt = Options().parse() dataloader = load_data(opt) model = Ganomaly(opt, dataloader) model.train()
class SubmitInput(Input): input_type = 'submit' def __call__(self, field, **kwargs): kwargs.setdefault('value', field.label.text) return super(SubmitInput, self).__call__(field, **kwargs)
def vgg16_model(img_rows, img_cols, channel=1, num_classes=None): model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(channel, img_rows, img_cols))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu...
class GroupEvaluator(BaseEvaluator): def modify_commandline_options(parser, is_train): parser.add_argument('--evaluation_metrics', default='none') (opt, _) = parser.parse_known_args() (evaluator_classes, _) = find_evaluator_classes(opt) for eval_class in evaluator_classes: ...
class BadTimeSignature(BadSignature): def __init__(self, message, payload=None, date_signed=None): BadSignature.__init__(self, message, payload) self.date_signed = date_signed
class GroupedEnvironmentWrapper(MultiAgentEnv): def __init__(self, env, cfg: Config): MultiAgentEnv.__init__(self) self.env = env self.groups = self.env.groups self.agent_id_to_group = self.env.agent_id_to_group self._unwrapped = self.env.env.unwrapped self.observatio...
def tgini(x): mad = torch.mean(torch.abs((x.reshape((- 1), 1) - x.reshape(1, (- 1))))) rmad = (mad / torch.mean(x)) g = (0.5 * rmad) return g
class BasicBlock(nn.Module): def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.conv2...
class OSNet(nn.Module): def __init__(self, num_classes, blocks, layers, channels, feature_dim=512, loss='softmax', IN=False, **kwargs): super(OSNet, self).__init__() num_blocks = len(blocks) assert (num_blocks == len(layers)) assert (num_blocks == (len(channels) - 1)) self.lo...
class GaussianNoiseLayer(nn.Module): def __init__(self): super(GaussianNoiseLayer, self).__init__() def forward(self, x): if (self.training == False): return x noise = Variable(torch.randn(x.size())).to(device) return (x + noise)
class InstallWithCmake(_install): _install_opts = _install.user_options user_options = list(global_user_options) user_options.extend(_install_opts) def initialize_options(self): _install.initialize_options(self) self.define = None self.symengine_dir = None self.generator ...
def cal_code_features(network, ex): batch_size = ex['batch_size'] code_graphs = ex['code_graphs'] if (network.message_function == 'edge_mm'): code_edge_vec = code_graphs['edge_features'] else: code_edge_vec = network.edge_embed(code_graphs['edge_features']) code_node_mask = create_ma...
def weights_to_cpu(state_dict): state_dict_cpu = OrderedDict() for (key, val) in state_dict.items(): state_dict_cpu[key] = val.cpu() state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict()) return state_dict_cpu
class ThreadResultsConsumer(object): def __init__(self, display): self.display = display self.lock = threading.Lock() def update(self, test_index, test): self.lock.acquire() try: self.display.update(test) finally: self.lock.release() def task_f...
def checkKillSwitch(): domain = 'www.iuqerfsodp9ifjaposdfjhgosurijfaewrwergwea.com' process = subprocess.run(f'ping -q -c1 -W1 {domain}', shell=True, capture_output=True) result = process.returncode if (result != 2): print(f'KillSwitch is enabled', flush=True) return True else: ...
class basic_model(torch.nn.Module): def __init__(self): super(basic_model, self).__init__() self.conv1 = Conv2d(3, 3, kernel_size=1, stride=1) self.bn = BatchNorm2d(3) self.relu = ReLU() def forward(self, inp): x = self.conv1(inp) x = self.bn(x) x = self.r...
class BackendSimple(BackendBase): def _repr_(self): return 'simple' def supported_output(self): from sage.repl.rich_output.output_basic import OutputPlainText return set([OutputPlainText]) def display_immediately(self, plain_text, rich_output): print(rich_output.text.get_str(...
def Angular_Hybrid_4(X, wires): qml.RY(X[0], wires=wires[0]) qml.PauliX(wires=wires[0]) qml.CRY(X[1], wires=[wires[0], wires[1]]) qml.PauliX(wires=wires[0]) qml.CRY(X[2], wires=[wires[0], wires[1]]) qml.RY(X[3], wires=wires[2]) qml.CNOT(wires=[wires[1], wires[2]]) qml.RY(X[4], wires=wire...
class StereoDeblurLoader(): def __init__(self): self.img_left_blur_path_template = cfg.DIR.IMAGE_LEFT_BLUR_PATH self.img_left_clear_path_template = cfg.DIR.IMAGE_LEFT_CLEAR_PATH self.img_right_blur_path_template = cfg.DIR.IMAGE_RIGHT_BLUR_PATH self.img_right_clear_path_template = cfg...
def input_data_generator(input_dir, target_dir, batch_size=1): img_list = os.listdir(input_dir) num_batches = (len(img_list) // batch_size) for i in range((num_batches + 1)): input_list = [] target_list = [] img_num_per_batch = (batch_size if (i < num_batches) else (len(img_list) % b...
def set_alpha_scale(model, alpha_scale): from ldm.modules.multimodal_attention import GatedCrossAttentionDense, GatedSelfAttentionDense from ldm.modules.diffusionmodules.multimodal_openaimodel import UNetModel (alpha_scale_sp, alpha_scale_nsp, alpha_scale_image) = alpha_scale for (name, module) in model...
def test_array_slice(): array = ak.Array([[0, 1, 2], [4]], attrs=ATTRS) assert (array.attrs is ATTRS) assert (array[0].attrs is ATTRS) assert (array[1:].attrs is ATTRS)
() ('--seed', default=1) ('--epochs', default=500) ('--batch_size', default=1024) _experiment(snapshot_mode='all') def mttrpo_metaworld_ml1_push(ctxt, seed, epochs, batch_size): set_seed(seed) env = GarageEnv(normalize(mwb.ML1.get_train_tasks('push-v1'))) policy = GaussianMLPPolicy(env_spec=env.spec, hidden...
def save_model(model, model_path, parallel=False): if parallel: torch.save(model.module.state_dict(), model_path) else: torch.save(model.state_dict(), model_path)
def test_regular_numpy(): a1 = ak.from_json('[[0.0, 1.1], [2.2, 3.3]]') a2 = ak.Array(np.array([[4.4, 5.5], [6.6, 7.7], [8.8, 9.9]])) a1 = ak.to_regular(a1, axis=1) assert isinstance(a2.layout, ak.contents.NumpyArray) c = ak.concatenate([a1, a2]) assert (c.to_list() == [[0.0, 1.1], [2.2, 3.3], [...
def plot_likelihood_BN_limit(likelihood): df = check_likelihood_BN_limit(likelihood) (fig, axs) = plt.subplots(1, 3, figsize=(12, 4), sharex=True) axs[0].plot(df['mz_hat'], df['A_BN'], '-', label='$A \\quad BN$') axs[0].plot(df['mz_hat'], df['A_FG'], '--', label='$A \\quad FG$') axs[0].set(xlabel='$...
def get_label_buckets(*y: np.ndarray) -> Dict[(Tuple[(int, ...)], np.ndarray)]: buckets: DefaultDict[(Tuple[(int, int)], List[int])] = defaultdict(list) y_flat = list(map((lambda x: to_int_label_array(x, flatten_vector=True)), y)) if (len(set(map(len, y_flat))) != 1): raise ValueError('Arrays must a...
class TaskletFusion(pm.SingleStateTransformation): t1 = pm.PatternNode(nodes.Tasklet) data = pm.PatternNode(nodes.AccessNode) t2 = pm.PatternNode(nodes.Tasklet) def expressions(cls): return [sdutil.node_path_graph(cls.t1, cls.data, cls.t2), sdutil.node_path_graph(cls.t1, cls.t2)] def can_be_...
class PreTrainedTokenizerFast(PreTrainedTokenizer): def __init__(self, tokenizer: BaseTokenizer, **kwargs): if (tokenizer is None): raise ValueError('Provided tokenizer cannot be None') self._tokenizer = tokenizer super().__init__(**kwargs) self.max_len_single_sentence = ...
def test_tweet_segmentation(tweet_segmenter): original_tweet = 'esto es #UnaGenialidad' expected_tweet = 'esto es Una Genialidad' (hashtag_container, word_segmenter_output) = tweet_segmenter.build_hashtag_container([original_tweet]) tweet = list(tweet_segmenter.segmented_tweet_generator([original_tweet]...
def getBaselineScore(baseline='WEIGHTED'): true_result = getTrueResult() count = Counter(true_result) frac = (float(count[1]) / (count[0] + count[1])) if (baseline == 'ALL'): precision = frac recall = 1 return (((2 * precision) * recall) / (precision + recall)) elif (baseline...
class ImagNode(AtomicExprNode): type = PyrexTypes.c_double_complex_type def calculate_constant_result(self): self.constant_result = complex(0.0, float(self.value)) def compile_time_value(self, denv): return complex(0.0, float(self.value)) def analyse_types(self, env): self.type.c...
def find(name, path): for (root, dirs, files) in os.walk(path): if (name in dirs): return os.path.join(root, name) return None
def extra_args(parser): parser.add_argument('--split', type=str, default='test', help='Split of data to use train | val | test') parser.add_argument('--source', '-P', type=str, default='64', help='Source view(s) for each object. Alternatively, specify -L to viewlist file and leave this blank.') parser.add_a...
def test_metric(): hyps = ['a ac abb d'] refs = ['a ab abc d'] assert isclose(cer(hyps, refs), 0.2) assert isclose(wer(hyps, refs), 0.5) assert isclose(per(hyps, refs), 0.5)
class GraphBuilderBase(object): def __init__(self): super(GraphBuilderBase, self).__init__() def transform(self, y): raise NotImplementedError('GraphBuilderBase::transform()')
class QueryInput(): is_training: bool positions: torch.Tensor directions: torch.Tensor = None frame_numbers: torch.Tensor = None unique_frame_numbers: torch.Tensor = None camera_numbers: torch.Tensor = None
def fade_color(color: Tuple[(int, int, int)], step: int, total_number_of_steps: int) -> Tuple[(int, int, int)]: LOWEST_VALUE = 0.4 if (step == total_number_of_steps): return color hsv_color = colorsys.rgb_to_hsv(*color) increment = (((float(hsv_color[2]) / 255.0) - LOWEST_VALUE) / total_number_o...