python_code stringlengths 0 187k | repo_name stringlengths 8 46 | file_path stringlengths 6 135 |
|---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
__all__ = ["DetrDatasetMapper"]
def ... | detr-master | d2/detr/dataset_mapper.py |
#! /usr/bin/env python
# Script to launch AllenNLP Beaker jobs.
import argparse
import os
import json
import random
import tempfile
import subprocess
import sys
# This has to happen before we import spacy (even indirectly), because for some crazy reason spacy
# thought it was a good idea to set the random seed on im... | allennlp-reading-comprehension-research-master | run_with_beaker.py |
allennlp-reading-comprehension-research-master | tests/__init__.py | |
#pylint: disable=unused-import
import pathlib
from allennlp.common.testing import ModelTestCase
from reading_comprehension.drop_models.augmented_qanet import AugmentedQANet
from reading_comprehension.data.drop_reader import DROPReader
class QANetModelTest(ModelTestCase):
PROJECT_ROOT = (pathlib.Path(__file__).pa... | allennlp-reading-comprehension-research-master | tests/test_aug_qanet.py |
def test_travis_integration():
# Remove this file once we have actual code and tests.
assert True
| allennlp-reading-comprehension-research-master | tests/test_travis.py |
allennlp-reading-comprehension-research-master | reading_comprehension/__init__.py | |
from allennlp.data.tokenizers import Token
def split_tokens_by_hyphen(tokens):
hyphens = ["-", "–", "~"]
new_tokens = []
def split_token_by_hyphen(token, hyphen):
split_tokens = []
char_offset = token.idx
for sub_str in token.text.split(hyphen):
if sub_str:
... | allennlp-reading-comprehension-research-master | reading_comprehension/utils.py |
import string
import re
from typing import Tuple, List, Union
from overrides import overrides
from allennlp.tools.squad_eval import metric_max_over_ground_truths
from allennlp.training.metrics.metric import Metric
from reading_comprehension.data.drop_official_evaluate import get_metrics as drop_em_and_f1
from reading_c... | allennlp-reading-comprehension-research-master | reading_comprehension/drop_metrics.py |
import sys
from allennlp.predictors import Predictor
from allennlp.models.archival import load_archive
from allennlp.common.util import import_submodules
# The path to the augmented qanet project dir
sys.path.append('../../')
import_submodules('reading_comprehension')
# This maps from the name of the task
# to the ... | allennlp-reading-comprehension-research-master | reading_comprehension/demo/models.py |
allennlp-reading-comprehension-research-master | reading_comprehension/demo/__init__.py | |
from typing import Any, Dict, List, Optional
import logging
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.models.reading_comprehension.util ... | allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/bert_rc_marginal.py |
from typing import Any, Dict, List, Optional
import torch
from torch.nn.functional import nll_loss
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import util, InitializerApp... | allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/passage_only.py |
allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/__init__.py | |
from typing import Any, Dict, List, Optional
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.models.reading_comprehension.bidaf import BidirectionalAttentionFlow
from allennlp.modules import Highway
from al... | allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/bidaf_marginal.py |
from typing import Any, Dict, List, Iterable, Optional
import logging
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.models.reading_comprehension.util import get_best_span
from allennlp.modules import Highway
from allennlp.nn.activations import Activation
from al... | allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/augmented_qanet.py |
allennlp-reading-comprehension-research-master | reading_comprehension/data/__init__.py | |
# pylint: skip-file
import json
import sys
import argparse
import string
import numpy as np
import re
# Copied from: https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
... | allennlp-reading-comprehension-research-master | reading_comprehension/data/drop_official_evaluate.py |
import json
import logging
import itertools
import string
from typing import Dict, List, Union, Tuple, Any
from collections import defaultdict
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.inst... | allennlp-reading-comprehension-research-master | reading_comprehension/data/drop_reader.py |
import os
import random
import signal
import sys
import time
import uuid
from typing import Dict, Iterable, List, Optional
import click
import petname
import rich
import yaml
from beaker import Beaker, CanceledCode, CurrentJobStatus, ExperimentSpec, TaskResources
from rich import pretty, print, traceback
VERSION = "1... | beaker-run-action-main | beaker_run.py |
from datetime import datetime
from pathlib import Path
from beaker_run import VERSION
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int = -1
for i in range(len(lines)):
line = lines[i]
if line.startswith("## Unre... | beaker-run-action-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List, Optional
import packaging.version
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change... | beaker-run-action-main | scripts/release_notes.py |
# pylint: disable=wildcard-import
from my_library.dataset_readers import *
from my_library.models import *
from my_library.predictors import *
| allennlp-as-a-library-example-master | my_library/__init__.py |
from my_library.dataset_readers.semantic_scholar_papers import SemanticScholarDatasetReader
| allennlp-as-a-library-example-master | my_library/dataset_readers/__init__.py |
from typing import Dict
import json
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.instance import Instance
from allennlp.... | allennlp-as-a-library-example-master | my_library/dataset_readers/semantic_scholar_papers.py |
from my_library.predictors.paper_classifier_predictor import PaperClassifierPredictor
| allennlp-as-a-library-example-master | my_library/predictors/__init__.py |
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register('paper-classifier')
class PaperClassifierPredictor(Predictor):
""""Predictor wrapper for the AcademicPaperClassifier"""
def predi... | allennlp-as-a-library-example-master | my_library/predictors/paper_classifier_predictor.py |
from my_library.models.academic_paper_classifier import AcademicPaperClassifier
| allennlp-as-a-library-example-master | my_library/models/__init__.py |
from typing import Dict, Optional
import numpy
from overrides import overrides
import torch
import torch.nn.functional as F
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import FeedForward, Seq2VecEncoder, TextFieldEmbedder
from allennlp.models.model ... | allennlp-as-a-library-example-master | my_library/models/academic_paper_classifier.py |
allennlp-as-a-library-example-master | tests/dataset_readers/__init__.py | |
# pylint: disable=no-self-use,invalid-name
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import ensure_list
from my_library.dataset_readers import SemanticScholarDatasetReader
class TestSemanticScholarDatasetReader(AllenNlpTestCase):
def test_read_from_file(self):
reader... | allennlp-as-a-library-example-master | tests/dataset_readers/semantic_scholar_dataset_reader_test.py |
# pylint: disable=no-self-use,invalid-name,unused-import
from unittest import TestCase
from pytest import approx
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
# required so that our custom model + predictor + dataset reader
# will be registered by name
import my_library
... | allennlp-as-a-library-example-master | tests/predictors/predictor_test.py |
# pylint: disable=invalid-name,protected-access
from allennlp.common.testing import ModelTestCase
class AcademicPaperClassifierTest(ModelTestCase):
def setUp(self):
super(AcademicPaperClassifierTest, self).setUp()
self.set_up_model('tests/fixtures/academic_paper_classifier.json',
... | allennlp-as-a-library-example-master | tests/models/academic_paper_classifier_test.py |
allennlp-as-a-library-example-master | tests/models/__init__.py | |
from invoke import task
import boto3
import subprocess
import os
import glob
import tempfile
import platform
@task
def extract_store_nvidia_driver(context, cuda_url):
if platform.system() != "Linux":
raise Exception("CUDA driver extraction can only be run on Linu")
name = os.path.basename(cuda_url)
... | ai2thor-docker-main | tasks.py |
import ai2thor.controller
import ai2thor.platform
from pprint import pprint
if __name__ == '__main__':
controller = ai2thor.controller.Controller(platform=ai2thor.platform.CloudRendering, scene='FloorPlan28')
event = controller.step(action='RotateRight')
pprint(event.metadata['agent'])
| ai2thor-docker-main | example_agent.py |
import csv
import os
import pickle
csv.field_size_limit(2147483647)
from collections import Counter
class EHRDataset:
def __init__(self, train_path, dev_path, test_path, do_train=True, do_test=True):
assert do_train or do_test, "if no train and no test, which data should it loads?"
self.train_data... | BEEP-main | outcome-prediction/data_loader.py |
import argparse
import random
import os
import pickle
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score, average_precision_score, \
RocCurveDisplay, PrecisionRecallDisplay, \
precision_score, recall_score, precision_... | BEEP-main | outcome-prediction/run_outcome_prediction.py |
import os
import math
import copy
import numpy as np
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from transformers import BertForSequenceClassification
from transformers.models.longformer.modeling_longformer import LongformerSelfAttention
class BertLongSelfAttention(LongformerSelfAtten... | BEEP-main | outcome-prediction/outcome_models.py |
import argparse
import math
import random
import os
import pickle
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score
import setproctitle
from data_loader import EHRDataset
from transformers import AdamW, BertConfig, BertToke... | BEEP-main | outcome-prediction/run_outcome_prediction_hpo.py |
import argparse
import random
import os
import pickle
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score
import setproctitle
from data_loader import EHRDataset
from transformers import AdamW, BertConfig, BertTokenizer, BertF... | BEEP-main | outcome-prediction/run_outcome_prediction_baseline_hpo.py |
import time
import pickle
import csv
import math
import datetime
import os
import argparse
EMAILID = "matanhol@gmail.com"
TOOLNAME = ""
from Bio import Entrez
Entrez.email = EMAILID
# Function to retrieve articles from a specified database using a provided query string
# Query string can be a single word/phrase or ... | BEEP-main | literature-retrieval/enterz_outcome_specific_retreival.py |
import pickle
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import math
from collections import Counter
import argparse
from scipy.sparse import coo_matrix, coo_array, vstack as sparse_vstack
import time
import psutil
import gc
def sparse_rank(lit_mentions_file, ehr_mentions_file, outcome... | BEEP-main | literature-retrieval/sparse_retriever.py |
import os.path
import pickle
import pip
# Initialize MeSH entity linker to link filtered mentions
import spacy
from scispacy.linking import EntityLinker
import glob
en_core_sci_md_url = "https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_md-0.4.0.tar.gz"
try:
import en_core_sci_md
except... | BEEP-main | literature-retrieval/mention_linking.py |
import os
import pickle
import csv
import spacy.cli
from nltk import word_tokenize, sent_tokenize
from spacy.tokens.doc import Doc
from spacy.tokens import Span
from medspacy.context import ConTextComponent, ConTextRule
import glob
"""
try:
import en_core_web_sm
except:
print('downloading "en_core_web_sm"')
... | BEEP-main | literature-retrieval/mention_filtering.py |
'''
Code to run LM-based reranker over abstracts retrieved per query
Command: python text_reranker.py --retrieval_results <RETRIEVAL_PICKLE_FILE> --entities <ENTITY_PICKLE_FILE> --out_dir <OUT_DIR> --model_name_or_path <MODEL> --checkpoint <MODEL_CHECKPOINT>
'''
print("started text_reranker...")
import torch
print("is ... | BEEP-main | literature-retrieval/reranker/text_reranker.py |
import gc
import os
import csv
csv.field_size_limit(2147483647)
import pickle
import spacy
import scispacy
from scispacy.linking import EntityLinker
en_core_sci_sm_url = "https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.5.1/en_core_sci_sm-0.5.1.tar.gz"
try:
print("trying to load en_core_sci_sm")
... | BEEP-main | literature-retrieval/reranker/data_loader.py |
'''
Code to run LM-based reranker over abstracts retrieved per query
Command: python run_reranker.py --data <DATA_DIR> --entities <ENTITY_PICKLE_FILE> --out_dir <OUT_DIR> --model_name_or_path <MODEL> --do_train --do_test
'''
import argparse
import os
import random
import statistics
import pickle
import torch
import t... | BEEP-main | literature-retrieval/reranker/run_reranker_cv.py |
import os.path
import pickle
import argparse
import numpy as np
import os
def take_top_n_and_untie(input_file_path, ranking_type, out_dir, top_n):
os.makedirs(out_dir, exist_ok=True)
if ranking_type != "similarity":
print("note that the similarity score are some decreasing function of the distances,"... | BEEP-main | literature-retrieval/reranker/take_top_n.py |
import pickle
import argparse
import os
def merge(sparse_ranked_path, dense_ranked_path, out_path, top_n):
sparse_ranked = pickle.load(open(sparse_ranked_path, "rb"))
dense_ranked = pickle.load(open(dense_ranked_path, "rb"))
sparse_keys = set(sparse_ranked.keys())
dense_keys = set(dense_ranked.keys())
... | BEEP-main | literature-retrieval/reranker/merge_rankers.py |
import gc
import os
import csv
import pickle
import spacy
# seems not needed but without it the program failed to find something
import scispacy
from scispacy.linking import EntityLinker
csv.field_size_limit(2147483647)
en_core_sci_sm_url = "https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.5.1/en_core... | BEEP-main | literature-retrieval/dense-retriever/data_loader_bireranker.py |
'''
Code to run LM-based reranker over abstracts retrieved per query
Command: python run_triplet_bireranker_cv.py --data <DATA_DIR> --entities <ENTITY_PICKLE_FILE> --out_dir <OUT_DIR> --model_name_or_path <MODEL> --do_train --do_test
'''
import argparse
import os
import random
import statistics
import pickle
import t... | BEEP-main | literature-retrieval/dense-retriever/run_triplet_bireranker_cv.py |
'''
Code to run LM-based reranker over abstracts retrieved per query
Command: python text_reranker.py --retrieval_results <RETRIEVAL_PICKLE_FILE> --entities <ENTITY_PICKLE_FILE> --out_dir <OUT_DIR> --model_name_or_path <MODEL> --checkpoint <MODEL_CHECKPOINT>
'''
import argparse
import gc
import os
import pickle
from s... | BEEP-main | literature-retrieval/dense-retriever/text_triplet_bireranker.py |
import os
import csv
import math
from nltk import sent_tokenize, word_tokenize
import pickle
class RawTextDataset:
def __init__(self, file, tag_type):
self.data = self.read_raw_text_files(file)
self.label_vocab = {'O': 0, 'B-PROB': 1, 'I-PROB': 2, 'B-TREAT': 3, 'I-TREAT': 4, 'B-TEST': 5, 'I-TEST': ... | BEEP-main | literature-retrieval/mention-extraction/data_loader.py |
import torch
import numpy as np
def tokenize_and_align_labels(tokenizer, examples):
tokenized_inputs = tokenizer(
examples,
padding='max_length',
truncation=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_spli... | BEEP-main | literature-retrieval/mention-extraction/utils.py |
'''
Code for NER and PICO tagging. Model: Pretrained LM + linear layer
Run: python pico_trainer.py --data_dir <DATA DIR> --out_dir <OUTPUT DIR> --model_name_or_path <LM NAME> --task <pico/i2b2> --do_train --do_test
'''
import os
import argparse
import random
import numpy as np
from collections import Counter
import t... | BEEP-main | literature-retrieval/mention-extraction/pico_trainer.py |
'''
Code to dump NER or PICO tags for raw text. Model: Pretrained LM + linear layer
Run: python text_tagger.py --data <RAW TEXT CSV> --out_dir <OUTPUT DIR> --model_name_or_path <LM NAME> --checkpoint <MODEL WEIGHT FILE> --task <pico/i2b2>
'''
import pickle
import os
import argparse
import numpy as np
import torch
fro... | BEEP-main | literature-retrieval/mention-extraction/text_tagger.py |
import sys
import csv
import pickle
import os
admnote_folder = sys.argv[1]
note_texts = {}
for file in os.listdir(admnote_folder):
reader = csv.reader(open(os.path.join(admnote_folder, file)))
next(reader, None)
for row in reader:
note_texts[int(row[0])] = row[1]
pmv_labels = pickle.load(open('pm... | BEEP-main | data/generate_pmv_data.py |
import argparse
import json
from copy import deepcopy
from math import ceil
from random import shuffle
from commaqa.inference.utils import LIST_JOINER, EOQ_MARKER, INTERQ_MARKER, ANSWER_MARKER, \
SIMPQ_MARKER
def parse_arguments():
arg_parser = argparse.ArgumentParser(description='Solve a ReModeL dataset usi... | CommaQA-main | commaqa/dataset/generate_decompositions_from_chains.py |
CommaQA-main | commaqa/dataset/__init__.py | |
import argparse
import json
from copy import deepcopy
from math import ceil
from random import shuffle
from commaqa.configs.predicate_language_config import ModelQuestionConfig
from commaqa.dataset.utils import nonempty_answer
from commaqa.execution.operation_executer import OperationExecuter
from commaqa.execution.ut... | CommaQA-main | commaqa/dataset/generate_decomposition_predictions.py |
import argparse
import json
import os
import random
import re
import string
from math import ceil
from pathlib import Path
from shutil import copyfile
from typing import List
import _jsonnet
from tqdm import tqdm
from commaqa.configs.dataset_build_config import DatasetBuildConfig
from commaqa.dataset.utils import get... | CommaQA-main | commaqa/dataset/build_submodel_datasets.py |
import itertools
import re
pred_match = re.compile("(.*)\((.*)\)$")
def get_answer_indices(question_str):
return [int(m.group(1)) for m in re.finditer("#(\d)", question_str)]
def get_question_indices(question_str):
return [int(m.group(1)) for m in re.finditer("\$(\d)", question_str)]
def is_question_var(... | CommaQA-main | commaqa/dataset/utils.py |
import argparse
import json
import logging
import os
import random
from math import ceil
from random import shuffle
from shutil import copyfile
from typing import List
import _jsonnet
from commaqa.configs.dataset_build_config import DatasetBuildConfig
from commaqa.execution.utils import build_models
logger = logging... | CommaQA-main | commaqa/dataset/build_dataset.py |
import torch
from transformers import AutoConfig, AutoTokenizer, AutoModelWithLMHead
from transformers.generation_utils import SampleEncoderDecoderOutput
import logging
logger = logging.getLogger(__name__)
class LMGenerator:
def __init__(self, model_path, device=None,
generation_args={}, encode... | CommaQA-main | commaqa/models/generator.py |
import json
import logging
from commaqa.dataset.utils import flatten_list, get_answer_indices, NOANSWER, \
valid_answer
logger = logging.getLogger(__name__)
class OperationExecuter:
def __init__(self, model_library, ignore_input_mismatch=False):
self.model_library = model_library
self.ignor... | CommaQA-main | commaqa/execution/operation_executer.py |
import json
import logging
import re
from json import JSONDecodeError
from commaqa.execution.model_executer import ModelExecutor
logger = logging.getLogger(__name__)
class MathModel(ModelExecutor):
def __init__(self, **kwargs):
self.func_regex = {
"is_greater\((.+) \| (.+)\)": self.greater_... | CommaQA-main | commaqa/execution/math_model.py |
MATH_MODEL = "math_special"
KBLOOKUP_MODEL = "kblookup"
| CommaQA-main | commaqa/execution/constants.py |
CommaQA-main | commaqa/execution/__init__.py | |
import logging
import re
from commaqa.configs.utils import execute_steps
from commaqa.dataset.utils import get_predicate_args, align_assignments, get_question_indices, \
valid_answer, NOANSWER
from commaqa.execution.constants import KBLOOKUP_MODEL
from commaqa.execution.operation_executer import OperationExecuter
... | CommaQA-main | commaqa/execution/model_executer.py |
import logging
from commaqa.execution.constants import MATH_MODEL
from commaqa.execution.kblookup import KBLookup
from commaqa.execution.math_model import MathModel
from commaqa.execution.model_executer import ModelExecutor
logger = logging.getLogger(__name__)
def build_models(pred_lang_config, complete_kb, ignore_... | CommaQA-main | commaqa/execution/utils.py |
import logging
from commaqa.dataset.utils import get_predicate_args
logger = logging.getLogger(__name__)
class KBLookup:
def __init__(self, kb):
self.kb = kb
def ask_question(self, question_predicate):
return self.ask_question_predicate(question_predicate)
def ask_question_predicate(se... | CommaQA-main | commaqa/execution/kblookup.py |
import random
from copy import deepcopy
from typing import List, Dict
from commaqa.configs.entities_config import EntitiesConfig
from commaqa.dataset.utils import get_predicate_args
class PredicateConfig:
def __init__(self, pred_json):
self.pred_name = pred_json[0]
self.args = pred_json[1]["args"... | CommaQA-main | commaqa/configs/predicate_config.py |
from commaqa.configs.entities_config import EntitiesConfig
from commaqa.configs.predicate_config import PredicateConfig
from commaqa.configs.predicate_language_config import PredicateLanguageConfig
from commaqa.configs.theory_config import TheoryConfig
class DatasetBuildConfig:
def __init__(self, input_json):
... | CommaQA-main | commaqa/configs/dataset_build_config.py |
import random
from math import ceil
from typing import Dict, Any, List
class EntitiesConfig:
def __init__(self, entities_json: Dict[str, List[str]]):
self.entity_type_map = entities_json
def subsample(self, num_ents):
new_ent_map = {}
for etype, elist in self.entity_type_map.items():
... | CommaQA-main | commaqa/configs/entities_config.py |
import json
import logging
import random
import string
from typing import Dict, List
from commaqa.configs.step_config import StepConfig
from commaqa.configs.utils import execute_steps
from commaqa.dataset.utils import dict_product, align_assignments, nonempty_answer, is_question_var
from commaqa.execution.model_execut... | CommaQA-main | commaqa/configs/theory_config.py |
CommaQA-main | commaqa/configs/__init__.py | |
import logging
from copy import deepcopy
from typing import List, Dict
from commaqa.configs.predicate_language_config import PredicateLanguageConfig
from commaqa.configs.step_config import StepConfig
from commaqa.dataset.utils import is_question_var, nonempty_answer
from commaqa.execution.operation_executer import Ope... | CommaQA-main | commaqa/configs/utils.py |
class StepConfig:
def __init__(self, step_json):
self.operation = step_json["operation"]
self.question = step_json["question"]
self.answer = step_json["answer"]
def to_json(self):
return self.__dict__
| CommaQA-main | commaqa/configs/step_config.py |
from commaqa.configs.step_config import StepConfig
from commaqa.dataset.utils import get_predicate_args
class ModelQuestionConfig:
def __init__(self, config_json):
self.steps = [StepConfig(x) for x in
config_json["steps"]] if "steps" in config_json else []
self.questions = co... | CommaQA-main | commaqa/configs/predicate_language_config.py |
from typing import Dict
from commaqa.inference.dataset_readers import HotpotQAReader, DatasetReader, DropReader
from commaqa.inference.participant_execution import ExecutionParticipant
from commaqa.inference.participant_qgen import LMGenParticipant, RandomGenParticipant
from commaqa.inference.participant_util import D... | CommaQA-main | commaqa/inference/constants.py |
CommaQA-main | commaqa/inference/__init__.py | |
import logging
import math
import random
import re
from itertools import product, permutations
from commaqa.inference.model_search import ParticipantModel
from commaqa.inference.utils import get_sequence_representation, stem_filter_tokenization, BLANK, \
stop_words_set
from commaqa.models.generator import LMGenera... | CommaQA-main | commaqa/inference/participant_qgen.py |
import os
from typing import List, Dict
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
stop_words_set = set(stopwords.words('english'))
QUESTION_MARKER = " Q: "
COMPQ_MARKER = " QC: "
SIMPQ_MARKER = " QS: "
INTERQ_MARKER = " QI: ... | CommaQA-main | commaqa/inference/utils.py |
import argparse
import json
import logging
import os
import _jsonnet
from commaqa.inference.constants import MODEL_NAME_CLASS, READER_NAME_CLASS
from commaqa.inference.dataset_readers import DatasetReader
from commaqa.inference.model_search import (
ModelController,
BestFirstDecomposer, QuestionGeneratorData)... | CommaQA-main | commaqa/inference/configurable_inference.py |
import copy
import heapq
import json
import logging
class BasicDataInstance(dict):
_REQUIRED_ATTRS = set([])
def __init__(self, input_data):
dict.__init__({})
self.update(input_data)
for item in type(self)._REQUIRED_ATTRS:
if item not in self:
self[item] = ... | CommaQA-main | commaqa/inference/model_search.py |
import json
import logging
import re
from commaqa.configs.predicate_language_config import ModelQuestionConfig
from commaqa.dataset.utils import valid_answer, nonempty_answer
from commaqa.execution.operation_executer import OperationExecuter
from commaqa.execution.utils import build_models
from commaqa.inference.model... | CommaQA-main | commaqa/inference/participant_execution.py |
from commaqa.inference.model_search import ParticipantModel
from commaqa.inference.utils import get_sequence_representation
class DumpChainsParticipant(ParticipantModel):
def __init__(self, output_file, next_model="gen"):
self.output_file = output_file
self.next_model = next_model
self.nu... | CommaQA-main | commaqa/inference/participant_util.py |
import json
class DatasetReader:
def read_examples(self, file):
return NotImplementedError("read_examples not implemented by " + self.__class__.__name__)
class HotpotQAReader(DatasetReader):
def read_examples(self, file):
with open(file, 'r') as input_fp:
input_json = json.load... | CommaQA-main | commaqa/inference/dataset_readers.py |
#!/usr/bin/python
"""
Official DROP evaluation script obtained from
https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc/eval/drop_eval.py
"""
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple, Union, Optional
import json
import argparse
import string
import... | CommaQA-main | scripts/drop_eval.py |
import json
import sys
def evaluate(answer_file, prediction_file):
answer_by_id = {}
for line in open(answer_file).readlines():
struct = json.loads(line)
answer_by_id[struct["id"]] = struct
prediction_by_id = {}
for line in open(prediction_file).readlines():
struct = json.load... | aristo-leaderboard-master | tracie/evaluator/evaluator.py |
import os
import evaluator
import unittest
import tempfile
import typing
class TestAccuracy(unittest.TestCase):
def test_EverythingCorrect(self):
qa = {"P1": "E", "P2": "N", "P3": "N"}
p = {"P1": "E", "P2": "N", "P3": "N"}
self.assertEqual(3.0 / 3.0, evaluator.calculate_accuracy(qa, p))
... | aristo-leaderboard-master | scitail/evaluator/test_evaluator.py |
#!/usr/bin/env python3
import csv
from typing import *
import logging
import sys
import json
EXIT_STATUS_ANSWERS_MALFORMED = 1
EXIT_STATUS_PREDICTIONS_MALFORMED = 2
EXIT_STATUS_PREDICTIONS_EXTRA = 3
EXIT_STATUS_PREDICTION_MISSING = 4
VALID_PREDICTION_VALUES = ['E', 'N']
def calculate_accuracy(answers: Dict[str, str... | aristo-leaderboard-master | scitail/evaluator/evaluator.py |
aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/__init__.py | |
aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/evaluator/__init__.py | |
import json
import random
import sys
from allennlp_reasoning_explainqa.common.constants import CORRECT_OPTION_TAG
from allennlp_reasoning_explainqa.training.metrics.confusion_matrix import (
F1MeasureCustomRetrievalEval,
)
from allennlp_reasoning_explainqa.training.metrics.explanation_eval import (
Explanation... | aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/evaluator/evaluator.py |
aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/training/__init__.py | |
from allennlp_reasoning_explainqa.training.metrics.confusion_matrix import *
from allennlp_reasoning_explainqa.training.metrics.explanation_eval import *
| aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/training/metrics/__init__.py |
import random
from collections import Counter
import numpy as np
from allennlp_reasoning_explainqa.common.constants import *
def dcg_score(y_true, y_score, k=10, gains="exponential"):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
... | aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/training/metrics/explanation_eval.py |
import numpy as np
import sklearn.metrics
from sklearn.metrics import roc_curve
class F1MeasureCustomRetrievalEval:
def __init__(self, pos_label=1) -> None:
self._predictions = []
self._gt = []
self._pos_label = pos_label
self._probs = []
def __call__(self, label, score):
... | aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/training/metrics/confusion_matrix.py |
CORRECT_OPTION_TAG = "correct_option"
INCORRECT_OPTION_TAG = "incorrect_option"
CORRECT_OPTION_GOLD_TAG = "gold"
CORRECT_OPTION_TAG_LIST = [CORRECT_OPTION_TAG, CORRECT_OPTION_GOLD_TAG]
ALL_OPTION_TAG_LIST = [
CORRECT_OPTION_TAG,
CORRECT_OPTION_GOLD_TAG,
INCORRECT_OPTION_TAG,
]
| aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/common/constants.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.