text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import time from collections import OrderedDict from options.train_options import TrainOptions from data.data_loader import CreateDataLoader from models.mapping_model import Pix2PixHDModel_Mapping import util.util as util from util.visualizer imp...
Bringing-Old-Photos-Back-to-Life/Global/train_mapping.py/0
{ "file_path": "Bringing-Old-Photos-Back-to-Life/Global/train_mapping.py", "repo_id": "Bringing-Old-Photos-Back-to-Life", "token_count": 2535 }
161
# TEXT ENCODER CONFIG text_model: 'gpt2' transformer_embed_dim: 768 freeze_text_encoder_weights: True # AUDIO ENCODER CONFIG audioenc_name: 'HTSAT' out_emb: 768 sampling_rate: 44100 duration: 7 fmin: 50 fmax: 8000 n_fft: 1024 hop_size: 320 mel_bins: 64 window_size: 1024 # PROJECTION SPACE CONFIG d_proj: 1024 temperat...
CLAP/msclap/configs/config_clapcap.yml/0
{ "file_path": "CLAP/msclap/configs/config_clapcap.yml", "repo_id": "CLAP", "token_count": 240 }
162
.. role:: hidden :class: hidden-section .. _Criterions: Criterions ========== Criterions compute the loss function given the model and batch, roughly:: loss = criterion(model, batch) .. automodule:: fairseq.criterions :members: .. autoclass:: fairseq.criterions.FairseqCriterion :members: :undoc-...
COCO-LM/fairseq/docs/criterions.rst/0
{ "file_path": "COCO-LM/fairseq/docs/criterions.rst", "repo_id": "COCO-LM", "token_count": 284 }
163
Tutorial: Classifying Names with a Character-Level RNN ====================================================== In this tutorial we will extend fairseq to support *classification* tasks. In particular we will re-implement the PyTorch tutorial for `Classifying Names with a Character-Level RNN <https://pytorch.org/tutoria...
COCO-LM/fairseq/docs/tutorial_classifying_names.rst/0
{ "file_path": "COCO-LM/fairseq/docs/tutorial_classifying_names.rst", "repo_id": "COCO-LM", "token_count": 6519 }
164
#!/bin/bash if [ $# -ne 5 ]; then echo "usage: $0 [dataset=wmt14/full] [langpair=en-de] [databin] [bpecode] [model]" exit fi DATASET=$1 LANGPAIR=$2 DATABIN=$3 BPECODE=$4 MODEL=$5 SRCLANG=$(echo $LANGPAIR | cut -d '-' -f 1) TGTLANG=$(echo $LANGPAIR | cut -d '-' -f 2) BPEROOT=examples/backtranslation/subwor...
COCO-LM/fairseq/examples/backtranslation/sacrebleu.sh/0
{ "file_path": "COCO-LM/fairseq/examples/backtranslation/sacrebleu.sh", "repo_id": "COCO-LM", "token_count": 450 }
165
#!/bin/bash # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. SPM_ENCODE=flores/scripts/spm_encode.py DATA=data_tmp SPM_MODEL=criss_checkpoints/sentence.bpe.model DICT=criss...
COCO-LM/fairseq/examples/criss/download_and_preprocess_flores_test.sh/0
{ "file_path": "COCO-LM/fairseq/examples/criss/download_and_preprocess_flores_test.sh", "repo_id": "COCO-LM", "token_count": 719 }
166
# Jointly Learning to Align and Translate with Transformer Models (Garg et al., 2019) This page includes instructions for training models described in [Jointly Learning to Align and Translate with Transformer Models (Garg et al., 2019)](https://arxiv.org/abs/1909.02074). ## Training a joint alignment-translation mode...
COCO-LM/fairseq/examples/joint_alignment_translation/README.md/0
{ "file_path": "COCO-LM/fairseq/examples/joint_alignment_translation/README.md", "repo_id": "COCO-LM", "token_count": 1128 }
167
#!/usr/bin/env bash # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. CWD=`pwd` INSTALL_PATH=$CWD/tokenizers/thirdparty MOSES=$INSTALL_PATH/mosesdecoder if [ ! -d $MOSES ]; then echo 'Cl...
COCO-LM/fairseq/examples/m2m_100/install_dependecies.sh/0
{ "file_path": "COCO-LM/fairseq/examples/m2m_100/install_dependecies.sh", "repo_id": "COCO-LM", "token_count": 1106 }
168
from typing import NamedTuple, List from urllib.parse import urlparse import os, sys import subprocess from subprocess import check_call, check_output import glob import wget import re import multiprocessing as mp from functools import partial import pathlib from collections import OrderedDict WORKDIR_ROOT = os.envir...
COCO-LM/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py/0
{ "file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py", "repo_id": "COCO-LM", "token_count": 18741 }
169
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from contextlib import redirect_stdout from fairseq import options from fairseq_cli import generate from examples.noisychannel imp...
COCO-LM/fairseq/examples/noisychannel/rerank_score_bw.py/0
{ "file_path": "COCO-LM/fairseq/examples/noisychannel/rerank_score_bw.py", "repo_id": "COCO-LM", "token_count": 2012 }
170
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This file defines example configuration arguments for quantizing # a transformer model with product quantization # Number of Centroids for ...
COCO-LM/fairseq/examples/quant_noise/transformer_quantization_config.yaml/0
{ "file_path": "COCO-LM/fairseq/examples/quant_noise/transformer_quantization_config.yaml", "repo_id": "COCO-LM", "token_count": 401 }
171
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import LegacyFairseqCriterion, reg...
COCO-LM/fairseq/examples/roberta/wsc/wsc_criterion.py/0
{ "file_path": "COCO-LM/fairseq/examples/roberta/wsc/wsc_criterion.py", "repo_id": "COCO-LM", "token_count": 2907 }
172
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class SubwordSplitter(object): def process_line(self, string): raise NotImplementedError def split(self, string): ra...
COCO-LM/fairseq/examples/simultaneous_translation/eval/agents/word_splitter.py/0
{ "file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/agents/word_splitter.py", "repo_id": "COCO-LM", "token_count": 1005 }
173
# @package hydra.sweeper _target_: hydra_plugins.hydra_ax_sweeper.ax_sweeper.AxSweeper max_batch_size: null ax_config: max_trials: 100 early_stop: minimize: true max_epochs_without_improvement: 10 epsilon: 1.0e-05 experiment: name: ${dataset.gen_subset} objective_name: wer minimize: true ...
COCO-LM/fairseq/examples/speech_recognition/hydra/conf/hydra/sweeper/ax.yaml/0
{ "file_path": "COCO-LM/fairseq/examples/speech_recognition/hydra/conf/hydra/sweeper/ax.yaml", "repo_id": "COCO-LM", "token_count": 271 }
174
[[Back]](..) # S2T Example: Speech Translation (ST) on Multilingual TEDx [Multilingual TEDx](https://arxiv.org/abs/2102.01757) is multilingual corpus for speech recognition and speech translation. The data is derived from TEDx talks in 8 source languages with translations to a subset of 5 target languages. ## Data P...
COCO-LM/fairseq/examples/speech_to_text/docs/mtedx_example.md/0
{ "file_path": "COCO-LM/fairseq/examples/speech_to_text/docs/mtedx_example.md", "repo_id": "COCO-LM", "token_count": 3958 }
175
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Scoring script for computing pairwise BLEU and multi-ref BLEU over a set of candidate hypotheses. See `"Mixture Mod...
COCO-LM/fairseq/examples/translation_moe/score.py/0
{ "file_path": "COCO-LM/fairseq/examples/translation_moe/score.py", "repo_id": "COCO-LM", "token_count": 3146 }
176
# WMT 19 This page provides pointers to the models of Facebook-FAIR's WMT'19 news translation task submission [(Ng et al., 2019)](https://arxiv.org/abs/1907.06616). ## Pre-trained models Model | Description | Download ---|---|--- `transformer.wmt19.en-de` | En->De Ensemble | [download (.tar.gz)](https://dl.fbaipubli...
COCO-LM/fairseq/examples/wmt19/README.md/0
{ "file_path": "COCO-LM/fairseq/examples/wmt19/README.md", "repo_id": "COCO-LM", "token_count": 1900 }
177
/** * Copyright 2017-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the license found in the * LICENSE file in the root directory of this source tree. */ /* This code is partially adpoted from https://github.com/1ytic/pytorch-edit-distance */ #include "edit_dist.h" #incl...
COCO-LM/fairseq/fairseq/clib/libnat_cuda/binding.cpp/0
{ "file_path": "COCO-LM/fairseq/fairseq/clib/libnat_cuda/binding.cpp", "repo_id": "COCO-LM", "token_count": 680 }
178
# @package _group_ quantize_targets: true extractor_mode: layer_norm layer_norm_first: true final_dim: 768 latent_temp: [2.0,0.1,0.999995] encoder_layerdrop: 0.0 dropout_input: 0.0 dropout_features: 0.0 dropout: 0.0 attention_dropout: 0.0 conv_bias: true encoder_layers: 24 encoder_embed_dim: 1024 encoder_ffn_embed_di...
COCO-LM/fairseq/fairseq/config/model/wav2vec2/wav2vec2_large.yaml/0
{ "file_path": "COCO-LM/fairseq/fairseq/config/model/wav2vec2/wav2vec2_large.yaml", "repo_id": "COCO-LM", "token_count": 163 }
179
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import math import os import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.data.squad import SquadResult, compute_predictions_logits, ...
COCO-LM/fairseq/fairseq/criterions/squad_criterion.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/criterions/squad_criterion.py", "repo_id": "COCO-LM", "token_count": 2600 }
180
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset class ColorizeDataset(BaseWrapperDataset): """ Adds 'colors' property to net input that i...
COCO-LM/fairseq/fairseq/data/colorize_dataset.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/colorize_dataset.py", "repo_id": "COCO-LM", "token_count": 333 }
181
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional from fairseq.data.encoders import register_bpe from fairseq.dataclass im...
COCO-LM/fairseq/fairseq/data/encoders/hf_bert_bpe.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/encoders/hf_bert_bpe.py", "repo_id": "COCO-LM", "token_count": 716 }
182
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Tuple import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset, data_ut...
COCO-LM/fairseq/fairseq/data/legacy/masked_lm_dataset.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/legacy/masked_lm_dataset.py", "repo_id": "COCO-LM", "token_count": 5552 }
183
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data import data_utils class WordNoising(object): """Generate a noisy version of a sentence...
COCO-LM/fairseq/fairseq/data/noising.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/noising.py", "repo_id": "COCO-LM", "token_count": 5932 }
184
from .squad_extractor import SquadExample, SquadFeature, read_squad_examples, squad_convert_examples_to_features from .basic_tokenizer import BasicTokenizer from .squad_metrics import SquadResult, compute_predictions_logits, squad_evaluate
COCO-LM/fairseq/fairseq/data/squad/__init__.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/squad/__init__.py", "repo_id": "COCO-LM", "token_count": 67 }
185
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .distributed_timeout_wrapper import DistributedTimeoutWrapper from .fully_sharded_data_parallel import fsdp_enable_wrap, fsdp_wrap, Fully...
COCO-LM/fairseq/fairseq/distributed/__init__.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/distributed/__init__.py", "repo_id": "COCO-LM", "token_count": 238 }
186
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( Fai...
COCO-LM/fairseq/fairseq/models/lightconv.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/models/lightconv.py", "repo_id": "COCO-LM", "token_count": 18801 }
187
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fair...
COCO-LM/fairseq/fairseq/models/nat/nonautoregressive_transformer.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/models/nat/nonautoregressive_transformer.py", "repo_id": "COCO-LM", "token_count": 8210 }
188
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.modules import ( LayerNorm, ) class PoolerLogits(nn.Module): """ Compute SQuAD start logits from sequence hid...
COCO-LM/fairseq/fairseq/models/squad/squad_head.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/models/squad/squad_head.py", "repo_id": "COCO-LM", "token_count": 1742 }
189
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn.functional as F logger = logging.getLogger(__name__) def _cross_entropy_pytorch(logits, targe...
COCO-LM/fairseq/fairseq/modules/cross_entropy.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/modules/cross_entropy.py", "repo_id": "COCO-LM", "token_count": 1287 }
190
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class GradMultiply(torch.autograd.Function): @staticmethod def forward(ctx, x, scale): ctx.scale = scale ...
COCO-LM/fairseq/fairseq/modules/grad_multiply.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/modules/grad_multiply.py", "repo_id": "COCO-LM", "token_count": 160 }
191
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from .learned_positional_embedding import LearnedPositionalEmbedding from .sinusoidal_positional_embedding import Sinus...
COCO-LM/fairseq/fairseq/modules/positional_embedding.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/modules/positional_embedding.py", "repo_id": "COCO-LM", "token_count": 509 }
192
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntEmbedding(nn.Module): """ ...
COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py", "repo_id": "COCO-LM", "token_count": 2331 }
193
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch logger = logging.getLogger(__name__) class NanDetector: """ Detects the first NaN or Inf in forward a...
COCO-LM/fairseq/fairseq/nan_detector.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/nan_detector.py", "repo_id": "COCO-LM", "token_count": 1799 }
194
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import importlib import os from fairseq import registry from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import ( ...
COCO-LM/fairseq/fairseq/optim/lr_scheduler/__init__.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/optim/lr_scheduler/__init__.py", "repo_id": "COCO-LM", "token_count": 392 }
195
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from fairseq.modules.quantization import pq, quantization_options, scalar from omegaconf import DictConfig logger = logging....
COCO-LM/fairseq/fairseq/quantization_utils.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/quantization_utils.py", "repo_id": "COCO-LM", "token_count": 2488 }
196
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import os import numpy as np from fairseq import tokenizer, utils from fairseq.data import ConcatDataset, Dic...
COCO-LM/fairseq/fairseq/tasks/legacy_masked_lm.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/tasks/legacy_masked_lm.py", "repo_id": "COCO-LM", "token_count": 2454 }
197
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re SPACE_NORMALIZER = re.compile(r"\s+") def tokenize_line(line): line = SPACE_NORMALIZER.sub(" ", line) line = line.strip(...
COCO-LM/fairseq/fairseq/tokenizer.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/tokenizer.py", "repo_id": "COCO-LM", "token_count": 116 }
198
#ifndef TORCH_CHECK #define TORCH_CHECK AT_CHECK #endif #ifdef VERSION_GE_1_3 #define DATA_PTR data_ptr #else #define DATA_PTR data #endif
COCO-LM/fairseq/fused_ops/csrc/compat.h/0
{ "file_path": "COCO-LM/fairseq/fused_ops/csrc/compat.h", "repo_id": "COCO-LM", "token_count": 59 }
199
pip install --user --editable . pip install --user sentencepiece if [ -d fused_ops ] then pip install --user --editable fused_ops fi
COCO-LM/fairseq/install.sh/0
{ "file_path": "COCO-LM/fairseq/install.sh", "repo_id": "COCO-LM", "token_count": 47 }
200
-- Copyright (c) Facebook, Inc. and its affiliates. -- -- This source code is licensed under the MIT license found in the -- LICENSE file in the root directory of this source tree. -- -- Usage: convert_model.lua <model_epoch1.th7> require 'torch' local fairseq = require 'fairseq' model = torch.load(arg[1]) function f...
COCO-LM/fairseq/scripts/convert_model.lua/0
{ "file_path": "COCO-LM/fairseq/scripts/convert_model.lua", "repo_id": "COCO-LM", "token_count": 1368 }
201
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from torch import nn from fairseq.distributed import ModuleProxyWrapper from .utils import objects_are_equal ...
COCO-LM/fairseq/tests/distributed/test_module_proxy_wrapper.py/0
{ "file_path": "COCO-LM/fairseq/tests/distributed/test_module_proxy_wrapper.py", "repo_id": "COCO-LM", "token_count": 817 }
202
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import Dictionary from fairseq.modules import CharacterTokenEmbedder class TestCharacterToke...
COCO-LM/fairseq/tests/test_character_token_embedder.py/0
{ "file_path": "COCO-LM/fairseq/tests/test_character_token_embedder.py", "repo_id": "COCO-LM", "token_count": 757 }
203
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.lstm import L...
COCO-LM/fairseq/tests/test_lstm_jitable.py/0
{ "file_path": "COCO-LM/fairseq/tests/test_lstm_jitable.py", "repo_id": "COCO-LM", "token_count": 1747 }
204
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import json import os import random import sys from io import StringIO import torch import torch.nn.functional as F from fair...
COCO-LM/fairseq/tests/utils.py/0
{ "file_path": "COCO-LM/fairseq/tests/utils.py", "repo_id": "COCO-LM", "token_count": 10743 }
205
#!/bin/bash pip install --user bcolz mxnet tensorboardX matplotlib easydict opencv-python einops --no-cache-dir -U | cat pip install --user scikit-image imgaug PyTurboJPEG --no-cache-dir -U | cat pip install --user scikit-learn --no-cache-dir -U | cat pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 -f https://...
CSWin-Transformer/segmentation/install_req.sh/0
{ "file_path": "CSWin-Transformer/segmentation/install_req.sh", "repo_id": "CSWin-Transformer", "token_count": 225 }
206
default_language_version: python: python3 ci: autofix_prs: true autoupdate_commit_msg: "[pre-commit.ci] pre-commit suggestions" autoupdate_schedule: quarterly repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.3.0 hooks: # list of supported hooks: https://pre-commit.com/hook...
ClimaX/.pre-commit-config.yaml/0
{ "file_path": "ClimaX/.pre-commit-config.yaml", "repo_id": "ClimaX", "token_count": 616 }
207
datadir: /data/CMIP6/AWI-ESM name: 10m_v_component_of_wind cmip_name: vas era_name: v10 run: r1i1p1f1 res: - 1.40625 # - 5.625
ClimaX/snakemake_configs/AWI-ESM/config_10m_v_component_of_wind.yml/0
{ "file_path": "ClimaX/snakemake_configs/AWI-ESM/config_10m_v_component_of_wind.yml", "repo_id": "ClimaX", "token_count": 71 }
208
datadir: /data/CMIP6/HAMMOZ name: geopotential cmip_name: zg era_name: z run: r1i1p1f1 version: v20190628 res: - 1.40625 # - 5.625
ClimaX/snakemake_configs/HAMMOZ/config_geopotential.yml/0
{ "file_path": "ClimaX/snakemake_configs/HAMMOZ/config_geopotential.yml", "repo_id": "ClimaX", "token_count": 70 }
209
datadir: /data/CMIP6/TaiESM1 server_prefix: https://esgf.ceda.ac.uk/thredds/fileServer/esg_cmip6/CMIP6/CMIP name: geopotential cmip_name: zg era_name: z run: r1i1p1f1 res: - 1.40625 # - 5.625
ClimaX/snakemake_configs/TaiESM1/config_geopotential.yml/0
{ "file_path": "ClimaX/snakemake_configs/TaiESM1/config_geopotential.yml", "repo_id": "ClimaX", "token_count": 102 }
210
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os from climax.global_forecast.datamodule import GlobalForecastDataModule from climax.global_forecast.module import GlobalForecastModule from pytorch_lightning.cli import LightningCLI def main(): # Initialize Lightning with the mode...
ClimaX/src/climax/global_forecast/train.py/0
{ "file_path": "ClimaX/src/climax/global_forecast/train.py", "repo_id": "ClimaX", "token_count": 579 }
211
import glob import os import click import numpy as np import xarray as xr from tqdm import tqdm from climax.utils.data_utils import DEFAULT_PRESSURE_LEVELS, NAME_TO_VAR def extract_one_year(path, year, variables, len_to_extract, np_vars, normalize_mean, normalize_std): for var in variables: ps = glob.glo...
ClimaX/src/data_preprocessing/nc2np_equally_cmip6.py/0
{ "file_path": "ClimaX/src/data_preprocessing/nc2np_equally_cmip6.py", "repo_id": "ClimaX", "token_count": 4993 }
212
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import torch from models.networks.base_network import BaseNetwork from models.networks.loss import * from models.networks.discriminator import * from models.networks.generator import * from models.networks.ContextualLoss import * from models.netw...
CoCosNet-v2/models/networks/__init__.py/0
{ "file_path": "CoCosNet-v2/models/networks/__init__.py", "repo_id": "CoCosNet-v2", "token_count": 679 }
213
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import torch import torch.nn.functional as F import models.networks as networks import util.util as util class Pix2PixModel(torch.nn.Module): @staticmethod def modify_commandline_options(parser, is_train): networks.modify_comman...
CoCosNet/models/pix2pix_model.py/0
{ "file_path": "CoCosNet/models/pix2pix_model.py", "repo_id": "CoCosNet", "token_count": 9946 }
214
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from .utils import (remove_comments_and_docstrings, tree_to_token_index, index_to_code_token, tree_to_variable_index) from .DFG import DFG_python,DFG_java,DFG_ruby,DFG_go,DFG_php,DFG_javas...
CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/parser/__init__.py/0
{ "file_path": "CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/parser/__init__.py", "repo_id": "CodeBERT", "token_count": 155 }
215
import os import torch import logging import argparse import random import numpy as np from tqdm import tqdm import multiprocessing import time from itertools import cycle from torch.utils.data import DataLoader, SequentialSampler, RandomSampler from torch.utils.data.distributed import DistributedSampler from transform...
CodeBERT/CodeReviewer/code/run_test_cls.py/0
{ "file_path": "CodeBERT/CodeReviewer/code/run_test_cls.py", "repo_id": "CodeBERT", "token_count": 1766 }
216
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import logging import sys from sklearn.metrics import recall_score,precision_score,f1_score def read_answers(filename): answers={} with open(filename) as f: for line in f: line=line.strip() idx1,idx2,label...
CodeBERT/GraphCodeBERT/clonedetection/evaluator/evaluator.py/0
{ "file_path": "CodeBERT/GraphCodeBERT/clonedetection/evaluator/evaluator.py", "repo_id": "CodeBERT", "token_count": 729 }
217
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import torch import torch.nn as nn import torch from torch.autograd import Variable import copy import torch.nn.functional as F from torch.nn import CrossEntropyLoss, MSELoss class Model(nn.Module): def __init__(self, encoder,config,...
CodeBERT/UniXcoder/downstream-tasks/clone-detection/POJ-104/model.py/0
{ "file_path": "CodeBERT/UniXcoder/downstream-tasks/clone-detection/POJ-104/model.py", "repo_id": "CodeBERT", "token_count": 751 }
218
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from concurrent.futures import as_completed, ProcessPoolExecutor import numpy as np import scipy import tqdm import os import copy import functools from utils import Tools, FilePathBuilder, CONSTANTS class SimilarityScore: @staticmethod ...
CodeT/RepoCoder/search_code.py/0
{ "file_path": "CodeT/RepoCoder/search_code.py", "repo_id": "CodeT", "token_count": 2776 }
219
#!/usr/bin/env bash uninstall() { # Path to Codex CLI source local CODEX_CLI_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # Path to OpenAI API settings local OPENAI_RC_FILE="$CODEX_CLI_PATH/src/openaiapirc" # Path to Bash settings loaded when a Bash session starts local BAS...
Codex-CLI/scripts/bash_cleanup.sh/0
{ "file_path": "Codex-CLI/scripts/bash_cleanup.sh", "repo_id": "Codex-CLI", "token_count": 306 }
220
#!/usr/bin/env python # -*- coding: utf-8 -*- """ File: test_large_face_list_face.py Description: Unittests for Large Face List Face section of the Cognitive Face API. """ import unittest import cognitive_face as CF from . import util class TestFaceList(unittest.TestCase): """Unittests for Large Face List ...
Cognitive-Face-Python/cognitive_face/tests/test_large_face_list_face.py/0
{ "file_path": "Cognitive-Face-Python/cognitive_face/tests/test_large_face_list_face.py", "repo_id": "Cognitive-Face-Python", "token_count": 909 }
221
#!/usr/bin/env python # -*- coding: utf-8 -*- """ File: panel_find_similar.py Description: Find Similar Panel for Python SDK sample. """ import os import uuid import wx import wx.lib.scrolledpanel as scrolled import util import model from view import base class FindSimilarPanel(base.MyPanel): """FindSimilar Pa...
Cognitive-Face-Python/sample/view/panel_find_similar.py/0
{ "file_path": "Cognitive-Face-Python/sample/view/panel_find_similar.py", "repo_id": "Cognitive-Face-Python", "token_count": 4383 }
222
export CUDA_VISIBLE_DEVICES=6 python t5_run_train.py \ --model_name_or_path t5-base \ --subtask Mod \ --method ContrastExp \ --train_file pretrain_contrast \ --max_steps 100000 \ --save_steps 100000 \ --batch_size 8 \ --ebatch_size 16 \ --gas 1 \ --seed 1 \ --set set1
ContextualSP/abstraction_probing/code/t5_code/Mod_ContrastExp_pretrain.sh/0
{ "file_path": "ContextualSP/abstraction_probing/code/t5_code/Mod_ContrastExp_pretrain.sh", "repo_id": "ContextualSP", "token_count": 106 }
223
import argparse from data_utils import load_data, load_score_file from data_utils.metrics import calc_metrics from experiments.exp_def import TaskDefs parser = argparse.ArgumentParser() parser.add_argument( "--task_def", type=str, default="experiments/glue/glue_task_def.yml" ) parser.add_argument("--task", type=s...
ContextualSP/adaptershare/calc_metrics.py/0
{ "file_path": "ContextualSP/adaptershare/calc_metrics.py", "repo_id": "ContextualSP", "token_count": 697 }
224
import os import argparse import random from sys import path path.append(os.getcwd()) from experiments.common_utils import dump_rows from data_utils.task_def import DataFormat from data_utils.log_wrapper import create_logger logger = create_logger(__name__, to_disk=True, log_file="domain_prepro.log") def load_scitai...
ContextualSP/adaptershare/experiments/domain_adaptation/domain_prepro.py/0
{ "file_path": "ContextualSP/adaptershare/experiments/domain_adaptation/domain_prepro.py", "repo_id": "ContextualSP", "token_count": 2238 }
225
ner: data_format: Seqence dropout_p: 0.3 enable_san: False labels: - O - B-MISC - I-MISC - B-PER - I-PER - B-ORG - I-ORG - B-LOC - I-LOC - X - CLS - SEP metric_meta: - SeqEval n_class: 12 loss: SeqCeCriterion kd_loss: MseCriterion adv_loss: SymKlCriterion split_names: - t...
ContextualSP/adaptershare/experiments/ner/ner_task_def.yml/0
{ "file_path": "ContextualSP/adaptershare/experiments/ner/ner_task_def.yml", "repo_id": "ContextualSP", "token_count": 807 }
226
# coding=utf-8 # Copyright (c) Microsoft. All rights reserved. from copy import deepcopy import torch from torch.nn import Parameter from functools import wraps class EMA: def __init__(self, gamma, model): super(EMA, self).__init__() self.gamma = gamma self.shadow = {} self.model =...
ContextualSP/adaptershare/module/my_optim.py/0
{ "file_path": "ContextualSP/adaptershare/module/my_optim.py", "repo_id": "ContextualSP", "token_count": 1761 }
227
import argparse from data_utils import load_score_file from experiments.exp_def import TaskDefs parser = argparse.ArgumentParser() parser.add_argument( "--task_def", type=str, default="experiments/glue/glue_task_def.yml" ) parser.add_argument("--task", type=str) parser.add_argument( "--add_soft_label", ac...
ContextualSP/adaptershare/prepare_distillation_data.py/0
{ "file_path": "ContextualSP/adaptershare/prepare_distillation_data.py", "repo_id": "ContextualSP", "token_count": 554 }
228
# %% from collections import defaultdict import os import json from dataclasses import dataclass, field from enum import Enum from typing import List, Dict import pandas as pd # %% class NLBindingType(int, Enum): Null = 0 Table = 1 # table in DB Column = 2 # table column header Value = 3 # 1) Cell ...
ContextualSP/awakening_latent_grounding/scripts/binding_annotate.squall.py/0
{ "file_path": "ContextualSP/awakening_latent_grounding/scripts/binding_annotate.squall.py", "repo_id": "ContextualSP", "token_count": 1538 }
229
{ "random_seed": 42, "numpy_seed": 42, "pytorch_seed": 42, "dataset_reader": { "type": "rewrite", "lazy": false, "super_mode": "before", "joint_encoding": true, "extra_stop_words": [ "of", "about", "the", "any", "for" ] }, "model": { "type": "rewrite", "word_embedder": { "tokens"...
ContextualSP/incomplete_utterance_rewriting/configs/task.jsonnet/0
{ "file_path": "ContextualSP/incomplete_utterance_rewriting/configs/task.jsonnet", "repo_id": "ContextualSP", "token_count": 604 }
230
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. # Author: Qian Liu (SivilTaram) # Original Repo: https://github.com/microsoft/ContextualSP from typing import List from typing import Tuple import nltk from allennlp.training.metrics.metric import Metric from nltk.translate.bleu_score import cor...
ContextualSP/incomplete_utterance_rewriting/src/data_utils.py/0
{ "file_path": "ContextualSP/incomplete_utterance_rewriting/src/data_utils.py", "repo_id": "ContextualSP", "token_count": 7736 }
231
# coding: utf-8 import os import json import logging import pickle as pkl import numpy as np from parsers.parser import Parser, IRNetSpiderParser from src.utils.algo_utils import BipartiteGraphSolver from src.utils.visualize_utils import draw_attention_hotmap from src.components.human_simulator import HumanSimulato...
ContextualSP/interactive_text_to_sql/src/interactive_sql_corrector.py/0
{ "file_path": "ContextualSP/interactive_text_to_sql/src/interactive_sql_corrector.py", "repo_id": "ContextualSP", "token_count": 4853 }
232
import json import sys import copy from itertools import combinations, permutations import math import argparse from random import shuffle from remove_same import big_file_remove_same import os parser = argparse.ArgumentParser() parser.add_argument("--dataset_prefix", type=str, default='alchemy', help="dataset prefix"...
ContextualSP/lemon/corpus_generation/corpus_generation_split_newformat.py/0
{ "file_path": "ContextualSP/lemon/corpus_generation/corpus_generation_split_newformat.py", "repo_id": "ContextualSP", "token_count": 912 }
233
from collections import Mapping from os.path import join import logging from git import Repo, exc as git_exc from gtd.io import IntegerDirectories, Workspace from gtd.log import SyncedMetadata from gtd.utils import Config, cached_property class ExperimentWorkspace(Workspace): def __init__(self, root): s...
ContextualSP/lemon/executor/gtd/ml/experiment.py/0
{ "file_path": "ContextualSP/lemon/executor/gtd/ml/experiment.py", "repo_id": "ContextualSP", "token_count": 2133 }
234
from unittest import TestCase import os import numpy as np import pytest import tensorflow as tf from numpy.testing import assert_array_equal, assert_array_almost_equal from tensorflow.python.framework.errors import InvalidArgumentError from gtd.ml.utils import TensorDebugger, clean_session, expand_dims_for_broadcast...
ContextualSP/lemon/executor/gtd/tests/ml/test_utils.py/0
{ "file_path": "ContextualSP/lemon/executor/gtd/tests/ml/test_utils.py", "repo_id": "ContextualSP", "token_count": 4134 }
235
"""Store system evaluation results (e.g., accuracy).""" from collections import OrderedDict from codecs import open from math import sqrt import json import numpy as np import os from scipy.stats import norm class NumberSequenceStat(object): """Stores statistics of a sequence of numbers. This is a reimplemen...
ContextualSP/lemon/executor/strongsup/evaluation.py/0
{ "file_path": "ContextualSP/lemon/executor/strongsup/evaluation.py", "repo_id": "ContextualSP", "token_count": 5294 }
236
from prettytable import PrettyTable from strongsup.results.recipe import RLongCookbook class TableDrawer(object): """Given a list of Entries, draws tables based on some criteria. Args: entries (list[Entry]): the entries name (string): the name of this table (typically the dataset from which ...
ContextualSP/lemon/executor/strongsup/results/table_drawer.py/0
{ "file_path": "ContextualSP/lemon/executor/strongsup/results/table_drawer.py", "repo_id": "ContextualSP", "token_count": 2195 }
237
import csv import os from codecs import open from strongsup.example import Context, Example from strongsup.example_factory import ExampleFactory from strongsup.predicate import Predicate from strongsup.utils import EOU from strongsup.tables.value import to_value_list from strongsup.tables.utils import tsv_unescape, ts...
ContextualSP/lemon/executor/strongsup/tables/example_factory.py/0
{ "file_path": "ContextualSP/lemon/executor/strongsup/tables/example_factory.py", "repo_id": "ContextualSP", "token_count": 1323 }
238
import pytest from strongsup.tables.predicates_computer import ( similarity_ratio, ) class TestEditDistance(object): CASES = [ ('superman', 'superman', 0), ('kitten', 'sitting', 5), ('industry', 'interest', 8), ('to ardo', 'from ardo', 4), ...
ContextualSP/lemon/executor/strongsup/tests/tables/test_predicates_computer.py/0
{ "file_path": "ContextualSP/lemon/executor/strongsup/tests/tables/test_predicates_computer.py", "repo_id": "ContextualSP", "token_count": 557 }
239
python lemon/run_model_finetune.py \ --dataset-dir lemon_data/dataset/DATASET_PREFIX/bin_large \ --exp-dir OUTPUT_PATH \ --model-path PRE_TRAINED_MODEL_PATH \ --model-arch bart_large \ --total-num-update 10000 \ --batch-size 64 \ --gradient-accumulation 1 \ --warmup-steps 1500 \ --le...
ContextualSP/lemon/finetune.sh/0
{ "file_path": "ContextualSP/lemon/finetune.sh", "repo_id": "ContextualSP", "token_count": 146 }
240
## eQASC Evaluator This script evaluates predictions for eQASC predictions against ground truth annotations and produces metrics. Hint: If you are in a hurry and want to simply evaluate your predictions, run the evaluator in Docker. ## Usage The program [evaluator.py](allennlp_reasoning_explainqa/evaluator/evaluato...
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/README.md/0
{ "file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/README.md", "repo_id": "ContextualSP", "token_count": 1610 }
241
{"chain_id":"304SM51WA33FD6TGROJ4OS4ZWAQSB9_1_1","score":0.5} {"chain_id":"304SM51WA33FD6TGROJ4OS4ZWAQSB9_1_10","score":0.5} {"chain_id":"304SM51WA33FD6TGROJ4OS4ZWAQSB9_1_2","score":0.5} {"chain_id":"304SM51WA33FD6TGROJ4OS4ZWAQSB9_1_3","score":0.5} {"chain_id":"304SM51WA33FD6TGROJ4OS4ZWAQSB9_1_4","score":0.5} {"chain_i...
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/data/dummy_predictions_dev.jsonl/0
{ "file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/data/dummy_predictions_dev.jsonl", "repo_id": "ContextualSP", "token_count": 353770 }
242
## ProPara datasets This directory contains dev, train and test datasets. * [dev](dev/) contains the dev dataset for developing your predictor * [train](train/) contains the training dataset for evaluating your predictor during development * [test](test/) contains the test dataset for evaluation on the [ProP...
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/data/README.md/0
{ "file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/data/README.md", "repo_id": "ContextualSP", "token_count": 147 }
243
# ProPara Evaluator ## Example ``` % export PYTHONPATH=. % python3 evaluator.py --predictions testfiles-1/predictions.tsv --answers testfiles-1/answers.tsv --output /tmp/metrics.json ================================================= Question Avg. Precision Avg. Recall Avg. F1 -----------------------------------...
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/README.md/0
{ "file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/README.md", "repo_id": "ContextualSP", "token_count": 6079 }
244
import unittest from collections import OrderedDict from process.action_file import ActionFile from process.constants import NO_ACTION as NO_ACT from process.constants import NO_LOCATION as NO_LOC, CREATE, DESTROY, MOVE class TestSummarize(unittest.TestCase): def test_load(self): # Spot-check values load...
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/test_action_file.py/0
{ "file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/test_action_file.py", "repo_id": "ContextualSP", "token_count": 1222 }
245
from typing import List, Set from text.stemmer import PorterStemmer # Extract term sets from a phrase containing " AND " and " OR " tokens. A phrase like "foo OR bar AND fnord OR gnarf" # is turned into a list of term sets like [{"foo", "bar"}, {"fnord", "gnarf"}] to match to another phrase's term sets. def extract_...
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/text/terms.py/0
{ "file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/text/terms.py", "repo_id": "ContextualSP", "token_count": 746 }
246
# TRACIE * [evaluator](evaluator/) is the program used by the AI2 Leaderboard to evaluate submitted predictions. * `data` holds the publicly available train and test sets (with hidden test labels), along with example prediction files (for testing the evaluator). ## Example usage To evaluate your predictions, run the...
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/tracie/README.md/0
{ "file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/tracie/README.md", "repo_id": "ContextualSP", "token_count": 235 }
247
python build_ver_train.py --num_examples 10000 --local_rank 0 --start_index 0 & python build_ver_train.py --num_examples 10000 --local_rank 1 --start_index 10000 & python build_ver_train.py --num_examples 10000 --local_rank 2 --start_index 20000 & python build_ver_train.py --num_examples 10000 --local_rank 3 --start_in...
ContextualSP/logigan/corpus_construction/elastic_search/run_ver.sh/0
{ "file_path": "ContextualSP/logigan/corpus_construction/elastic_search/run_ver.sh", "repo_id": "ContextualSP", "token_count": 1350 }
248
from tqdm import tqdm, trange from transformers import AutoModelForSequenceClassification , AutoTokenizer import torch.nn as nn import argparse import copy from copy import deepcopy from torch.utils.data import Dataset, DataLoader, IterableDataset import os, sys, time import json import string import re from collection...
ContextualSP/logigan/pre-training/nli_es.py/0
{ "file_path": "ContextualSP/logigan/pre-training/nli_es.py", "repo_id": "ContextualSP", "token_count": 1489 }
249
import json from collections import defaultdict import re FILTER_PRED = ["people.person.spouse_s/ns:people.marriage.spouse|ns:fictional_universe.fictional_character.married_to/ns:fictional_universe.marriage_of_fictional_characters.spouses", "people.person.sibling_s/ns:people.sibling_relationship.sibling|ns:fictional_u...
ContextualSP/poset_decoding/evaluate.py/0
{ "file_path": "ContextualSP/poset_decoding/evaluate.py", "repo_id": "ContextualSP", "token_count": 2453 }
250
matchzoo ======== .. toctree:: :maxdepth: 4 matchzoo
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/source/modules.rst/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/source/modules.rst", "repo_id": "ContextualSP", "token_count": 27 }
251
from matchzoo.engine.base_callback import BaseCallback class LambdaCallback(BaseCallback): """ LambdaCallback. Just a shorthand for creating a callback class. See :class:`matchzoo.engine.base_callback.BaseCallback` for more details. Example: >>> import matchzoo as mz >>> from matchz...
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/callbacks/lambda_callback.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/callbacks/lambda_callback.py", "repo_id": "ContextualSP", "token_count": 634 }
252
"""FastText embedding data loader.""" from pathlib import Path import matchzoo as mz _fasttext_embedding_url = "https://dl.fbaipublicfiles.com/fasttext/vectors" \ "-wiki/wiki.{}.vec" def load_fasttext_embedding(language: str = 'en') -> mz.embedding.Embedding: """ Return the pretra...
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/embeddings/load_fasttext_embedding.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/embeddings/load_fasttext_embedding.py", "repo_id": "ContextualSP", "token_count": 541 }
253
"""Base callback.""" import abc import numpy as np import matchzoo as mz class BaseCallback(abc.ABC): """ DataGenerator callback base class. To build your own callbacks, inherit `mz.data_generator.callbacks.Callback` and overrides corresponding methods. A batch is processed in the following wa...
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/base_callback.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/base_callback.py", "repo_id": "ContextualSP", "token_count": 357 }
254
"""Mean average precision metric for ranking.""" import numpy as np from matchzoo.engine.base_metric import ( BaseMetric, sort_and_couple, RankingMetric ) class MeanAveragePrecision(RankingMetric): """Mean average precision metric.""" ALIAS = ['mean_average_precision', 'map'] def __init__(self, thr...
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/mean_average_precision.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/mean_average_precision.py", "repo_id": "ContextualSP", "token_count": 670 }
255
"""An implementation of DRMMTKS Model.""" import typing import torch import torch.nn as nn import torch.nn.functional as F from matchzoo.engine.param_table import ParamTable from matchzoo.engine.base_callback import BaseCallback from matchzoo.engine.param import Param from matchzoo.engine.base_model import BaseModel ...
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/drmmtks.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/drmmtks.py", "repo_id": "ContextualSP", "token_count": 2065 }
256
"""DenseNet module.""" import typing import torch import torch.nn as nn class DenseBlock(nn.Module): """Dense block of DenseNet.""" def __init__( self, in_channels, growth_rate: int = 20, kernel_size: tuple = (2, 2), layers_per_dense_block: int = 3 ): """I...
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/dense_net.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/dense_net.py", "repo_id": "ContextualSP", "token_count": 1889 }
257
from .unit import Unit from .digit_removal import DigitRemoval from .frequency_filter import FrequencyFilter from .lemmatization import Lemmatization from .lowercase import Lowercase from .matching_histogram import MatchingHistogram from .ngram_letter import NgramLetter from .punc_removal import PuncRemoval from .state...
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/__init__.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/__init__.py", "repo_id": "ContextualSP", "token_count": 226 }
258
import numpy as np from .unit import Unit class WordExactMatch(Unit): """ WordExactUnit Class. Process unit to get a binary match list of two word index lists. The word index list is the word representation of a text. Examples: >>> import pandas >>> input_ = pandas.DataFrame({ ...
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/word_exact_match.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/word_exact_match.py", "repo_id": "ContextualSP", "token_count": 903 }
259
"""Matchzoo version file.""" __version__ = '1.1.1'
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/version.py/0
{ "file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/version.py", "repo_id": "ContextualSP", "token_count": 22 }
260