python_code
stringlengths
0
187k
repo_name
stringlengths
8
46
file_path
stringlengths
6
135
dnw-master
data/__init__.py
import os import torch import torchvision from torchvision import transforms from genutil.config import FLAGS class CIFAR10: def __init__(self): super(CIFAR10, self).__init__() data_root = os.path.join(FLAGS.data_dir, "cifar10") use_cuda = torch.cuda.is_available() # Data loadi...
dnw-master
data/cifar10.py
import os import json def load_aokvqa(aokvqa_dir, split, version='v1p0'): assert split in ['train', 'val', 'test', 'test_w_ans'] dataset = json.load(open( os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json") )) return dataset def get_coco_path(split, image_id, coco_dir): return os.p...
aokvqa-main
load_aokvqa.py
import os import json import argparse import pathlib from collections import Counter from load_aokvqa import load_aokvqa parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--split', type=str, choices=['train', 'val', 'test'...
aokvqa-main
heuristics/most_common_answer.py
import os import json from random import seed, sample import argparse import pathlib from load_aokvqa import load_aokvqa parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--split', type=str, choices=['train', 'val', 'test'...
aokvqa-main
heuristics/random_unweighted.py
import os import json import numpy as np import argparse import pathlib from collections import Counter from load_aokvqa import load_aokvqa parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--split', type=str, choices=['tr...
aokvqa-main
heuristics/random_weighted.py
import os import argparse from collections import Counter import pathlib from load_aokvqa import load_aokvqa parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') a...
aokvqa-main
data_scripts/build_vocab.py
import os import argparse import pathlib from tqdm import tqdm from PIL import Image import torch import torch.nn as nn from torchvision import models from torchvision import transforms as T from load_aokvqa import load_aokvqa, get_coco_path parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', typ...
aokvqa-main
data_scripts/extract_resnet_features.py
import os import argparse import pathlib from tqdm import tqdm import torch from transformers import AutoTokenizer, AutoModel from load_aokvqa import load_aokvqa parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--split',...
aokvqa-main
data_scripts/extract_bert_features.py
import json from tqdm import tqdm import argparse import pathlib import torch import clip parser = argparse.ArgumentParser() parser.add_argument('--vocab', type=pathlib.Path, required=True, dest='vocab_file') parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-...
aokvqa-main
data_scripts/encode_vocab_clip.py
import os from PIL import Image from tqdm import tqdm import argparse import pathlib import torch import clip from load_aokvqa import load_aokvqa, get_coco_path parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--coco-dir...
aokvqa-main
data_scripts/extract_clip_features.py
import argparse import pathlib import json from load_aokvqa import load_aokvqa if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], ...
aokvqa-main
evaluation/prepare_predictions.py
import argparse import pathlib import json import glob from load_aokvqa import load_aokvqa def eval_aokvqa(dataset, preds, multiple_choice=False, strict=True): if isinstance(dataset, list): dataset = { dataset[i]['question_id'] : dataset[i] for i in range(len(dataset)) } if multiple_choice is False...
aokvqa-main
evaluation/eval_predictions.py
import argparse import pathlib import json from tqdm import tqdm from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim from load_aokvqa import load_aokvqa def map_to_choices(dataset, predictions, device='cpu'): if isinstance(dataset, list): dataset = { data...
aokvqa-main
evaluation/remap_predictions.py
import os import json import argparse import pathlib from load_aokvqa import load_aokvqa parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir') parser.add_argument...
aokvqa-main
gpt3/caption_inputs.py
import os import random import json from tqdm import tqdm import argparse import pathlib import openai openai.organization = os.getenv('OPENAI_ORG') openai.api_key = os.getenv('OPENAI_API_KEY') from load_aokvqa import load_aokvqa random.seed(0) def main(): parser = argparse.ArgumentParser() parser.add_arg...
aokvqa-main
gpt3/query_gpt3.py
import json import argparse import pathlib from load_aokvqa import load_aokvqa parser = argparse.ArgumentParser() parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') parser.add_argument('--split', type=str, choices=['train', 'val', 'test_w_ans'], required=True) parser.add_argumen...
aokvqa-main
gpt3/rationale_inputs.py
import sys import os import argparse import pathlib from tqdm import tqdm import json import torch import torch.nn as nn # https://github.com/PyTorchLightning/pytorch-lightning/issues/11663 import sentencepiece; import pytorch_lightning as pl; import clip from transfer_experiments.train import LinearClassifier from ...
aokvqa-main
transfer_experiments/predict.py
import os import sys import json import argparse import pathlib import random import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader # https://github.com/PyTorchLightning/pytorch-lightning/issues/11663 import sentencepiece; import pytorch_lightning as pl i...
aokvqa-main
transfer_experiments/train.py
from setuptools import setup, find_packages def parse_requirements_file(path): requirements = [] with open(path) as requirements_file: import re def fix_url_dependencies(req: str) -> str: """Pip and setuptools disagree about how URL dependencies should be handled.""" m...
bettermap-master
setup.py
import bettermap def f(x: float) -> float: return x * x _INPUT = list(range(100)) _EXPECTED = list(map(f, _INPUT)) def test_map_per_process(): result = list(bettermap.map_per_process(f, _INPUT)) result.sort() assert result == _EXPECTED def test_ordered_map_per_process(): result = list(better...
bettermap-master
tests/test_basic_functionality.py
bettermap-master
tests/__init__.py
from .bettermap import *
bettermap-master
bettermap/__init__.py
#!/usr/bin/python3 import io import sys from concurrent.futures import ThreadPoolExecutor import itertools import multiprocessing as mp from multiprocessing.connection import Connection from multiprocessing.context import ForkProcess from typing import Iterable, List, Optional, Any, Dict, Tuple import dill from queu...
bettermap-master
bettermap/bettermap.py
from os import mkdir from os.path import join, dirname, expanduser, exists DATA_DIR = expanduser("~/data-dbg") COCO_SOURCE = join(DATA_DIR, "coco") COCO_ANNOTATIONS = join(COCO_SOURCE, "annotations") COCO_IMAGES = join(COCO_SOURCE, "images") VQAE = join(DATA_DIR, "vqa-e") VISUAL_NEWS = join(DATA_DIR, "visual_news/or...
close-main
close/file_paths.py
import gzip import logging import tarfile import tempfile import zipfile from os import listdir, makedirs from os.path import dirname, exists, join import requests from tqdm import tqdm from close import file_paths from close.utils import py_utils def ensure_dir_exists(filename): """Make sure the parent directory...
close-main
close/download.py
import argparse import logging import os from transformers import AutoConfig from l2v.data.visual_news import VisualNews from l2v.experiments.utils import get_adapter from l2v.train.optimizer import AdamWBuilder, DelayedWarmupScheduleBuilder from l2v.train.trainer import TrainerSimple from l2v.utils import py_utils ...
close-main
close/experiments/train_visual_news.py
import os from typing import Union from close.data.coco_captioning import CocoCaptioningKP from close.data.dataset import Dataset from close.data.vqa_e import EVQA from close.data.vqa_v2 import Vqa2, VqaWithCaptions from close.data.visual_entailment import VisualEntailment from close.model.language_adapters import * f...
close-main
close/experiments/utils.py
import argparse import logging import os import sys root_folder = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) sys.path.append(root_folder) from close.data.coco_captioning import CocoCaptioningKP from close.data.visual_entailment import VisualEntailment from close.data...
close-main
close/experiments/train.py
import json import nltk import openai import random import numpy as np from collections import OrderedDict from tqdm import tqdm openai.api_key = "YOUR_OPENAI_KEY" harry_potter_characters = [ "Sirius Black", "Cho Chang", "Aberforth Dumbledore", "Albus Dumbledore", "Hermione Granger", "Fenrir G...
close-main
close/experiments/generate_stylistic_captioning.py
import argparse import json import logging import os from typing import Union import numpy as np import sys root_folder = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) sys.path.append(root_folder) from close.data.coco_captioning import CocoCaptioningKP from close.dat...
close-main
close/experiments/eval.py
from os import listdir from os.path import join from close import file_paths _IMAGE_ID_TO_SIZE_MAP = {} IMAGE_SOURCE_MAP = { "coco": file_paths.COCO_IMAGES, "flicker30k": file_paths.FLICKER30K, "visual_news": file_paths.VISUAL_NEWS, } def get_image_file(image_id) -> str: """Returns the filepath of an image...
close-main
close/utils/image_utils.py
import logging from typing import Union import torch from torch import nn def get_device(device_name: Union[None, str, int]=None): if device_name is None: if torch.cuda.is_available(): logging.info("cuda found, defaulting to cuda device") return torch.device('cuda') else: logging.info("cu...
close-main
close/utils/pytorch_utils.py
"""Code from GPV-2 for saving FromParams objects to disk, used for model/trainer saving AllenNLP recently added their own to_params approach, but there default implementation does not work for some of our models so we stick with the GPV-2 version. """ import enum import typing from collections import OrderedDict from...
close-main
close/utils/to_params.py
import json import logging import pickle import sys from collections import defaultdict from json import JSONEncoder from os import listdir, remove, walk, makedirs from os.path import exists, join, isdir, basename, dirname, split, relpath from shutil import rmtree from typing import TypeVar, List, Iterable, Dict, Any, ...
close-main
close/utils/py_utils.py
#!/usr/bin/env python # # File Name : ptbtokenizer.py # # Description : Do the PTB Tokenization and remove punctuations. # # Creation Date : 29-12-2014 # Last Modified : Thu Mar 19 09:53:35 2015 # Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu> # Modified to silence the stederr output import os...
close-main
close/utils/quiet_ptbtokenizer.py
import logging from os import listdir from os.path import dirname, join, exists import torch from allennlp.common import Params from close.model.model import Model, BEST_STATE_NAME from close.utils import py_utils from close.utils.py_utils import load_json_object, import_all, select_run_dir def load_model(run_dir, ...
close-main
close/model/load_model.py
import logging from collections import Counter from dataclasses import dataclass, field, replace from typing import Any, Callable, List, Dict, Tuple, Union, Optional import numpy as np import clip import torch from PIL import Image from allennlp.common import Registrable, Params from torch import nn from transformers ...
close-main
close/model/clip_t5_model.py
import pickle from os.path import join, dirname import torch from allennlp.common import Params from torch.distributions import multivariate_normal from close import file_paths from close.model.layers import Layer from close.utils.py_utils import load_json_object import numpy as np from close.utils.pytorch_utils imp...
close-main
close/model/language_adapters.py
from typing import Union, Optional, List, Callable, Any, Dict, Tuple import torch from allennlp.common import Registrable, FromParams from allennlp.nn.beam_search import BeamSearch from dataclasses import dataclass from torch import nn BEST_STATE_NAME = "best-state.pth" @dataclass class ExampleOutput: text: Lis...
close-main
close/model/model.py
from os.path import join, dirname from typing import List, Dict, Any import torch from allennlp.common import Registrable, FromParams from torch import nn from close.utils import pytorch_utils from close.utils.py_utils import load_json_object from close.utils.to_params import to_params class Layer(nn.Module, Regist...
close-main
close/model/layers.py
import json from collections import Callable from dataclasses import dataclass from typing import Dict, Union import torch from allennlp.common import FromParams, Params from torch.utils.data import DataLoader from tqdm import tqdm from close.model.load_model import load_model from close.model.model import ExampleOut...
close-main
close/train/runner.py
import torch from typing import Dict, Tuple, List, Optional, Any, Union from allennlp.common import Registrable from dataclasses import dataclass from torch.optim import AdamW, SGD, Optimizer class OptimizerBuilder(Registrable): """Builds an Optimizer We use this class rather then using an Optimizer directly si...
close-main
close/train/optimizer.py
import json import logging import os import socket from datetime import datetime from os import makedirs from os.path import join, exists from time import perf_counter from typing import List, Optional, Dict, Union import numpy as np import torch from allennlp.common import FromParams, Params from dataclasses import d...
close-main
close/train/trainer.py
import re from collections import defaultdict, Counter from numbers import Number from typing import Optional, List, Dict, Any import numpy as np from allennlp.common import FromParams, Registrable, Params from dataclasses import dataclass, replace from pycocoevalcap.bleu.bleu import Bleu from pycocoevalcap.cider.ci...
close-main
close/train/evaluator.py
"""Provides functions to use T5 in allennlp's BeamSearch We use this instead of transformer's beam search mostly for legacy reasons since that is what the GPV-2 models used """ import torch from torch.nn import functional as F from close.utils import py_utils def t5_initialize_decoding(tokenizer, model, encoder_ou...
close-main
close/train/allennlp_beamsearch.py
import json from datetime import datetime from os.path import isdir from typing import Dict, Any, Union from close.model.model import ExampleOutput from close.train.evaluator import Evaluator from close.utils import py_utils from close.utils.py_utils import load_json_object, dump_json_object from close.utils.to_params...
close-main
close/eval/evaluation.py
import argparse import json import logging import os from typing import Union import numpy as np from l2v.data.coco_captioning import CocoCaptioning, CocoSCE from l2v.data.dataset import Dataset from l2v.data.visual_news import VisualNews from l2v.data.vqa_e import EVQA from l2v.data.vqa_v2 import Vqa2 from l2v.eval....
close-main
close/eval/compute_predictions.py
import re """VQA evaluation copied from the offical VQA 2.0 eval script""" contractions = { "aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", \ "couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadn...
close-main
close/eval/vqa_eval.py
from dataclasses import dataclass, replace from os.path import join from typing import List, Optional, Union import numpy as np from collections import Counter from close import file_paths from close.data.coco_captioning import CocoCaptioning2014 from close.data.dataset import Dataset from close.utils import image_ut...
close-main
close/data/vqa_v2.py
from dataclasses import dataclass from os.path import isfile, join from typing import List import numpy as np from close import file_paths from close.data.dataset import Dataset from close.utils.py_utils import int_to_str, load_json_object @dataclass class VisualNewsExample: example_id: str caption: str image_...
close-main
close/data/visual_news.py
from typing import List from allennlp.common import Registrable class Dataset(Registrable): """Dataset we can train/evaluate on""" def get_name(self) -> str: """Get the name of the dataset that uniquely identifies it""" raise NotImplementedError() def load(self) -> List: """Loads the examples""" ...
close-main
close/data/dataset.py
import json import logging from collections import defaultdict from dataclasses import dataclass from os.path import join from typing import Optional, Dict, Any, List from close import file_paths from close.data.dataset import Dataset from close.utils import image_utils, py_utils from close.utils.py_utils import int_t...
close-main
close/data/coco_captioning.py
import json from dataclasses import dataclass from os.path import join from typing import Dict, List, Any, Optional from close import file_paths from close.data.dataset import Dataset from close.utils import py_utils from close.utils.py_utils import int_to_str @dataclass class VisualEntailmentExample: example_id: ...
close-main
close/data/visual_entailment.py
import json import logging from collections import Counter from os.path import join from typing import List from close import file_paths from close.data.dataset import Dataset from close.data.vqa_v2 import VqaExample from close.utils import py_utils from close.utils.image_utils import get_coco_image_id from close.util...
close-main
close/data/vqa_e.py
import json import re def main(): # todo: @@SEP@@ to ; , @@#@@ to # predictions_file = "old_data_dev_low_level_preds.json" traget_file= predictions_file.replace('.json', '.csv') with open(predictions_file, "r") as fd: preds = [json.loads(line) for line in fd.readlines()] preds = [re.sub(r'@...
break-evaluator-master
allennlp_preds_format.py
import networkx as nx from queue import Queue, deque def has_cycle(graph: nx.DiGraph): try: nx.find_cycle(graph, orientation='original') return True except: return False def get_graph_levels(graph: nx.DiGraph): """ Find graph level for each node level[node] := 0 if the no...
break-evaluator-master
utils/graph.py
from __future__ import print_function import sys import threading try: import thread except ImportError: import _thread as thread def quit_function(fn_name): print('{0} took too long'.format(fn_name), file=sys.stderr) sys.stderr.flush() # raises KeyboardInterrupt thread.interrupt_main() ...
break-evaluator-master
utils/timeout.py
from time import sleep from utils.timeout import exit_after @exit_after(5) def countdown(n): print('countdown started', flush=True) for i in range(n, -1, -1): print(i, end=', ', flush=True) sleep(1) print('countdown finished') if __name__ == "__main__": try: countdown(10) ...
break-evaluator-master
utils/timeout_test.py
from typing import Dict, Tuple import numbers from itertools import zip_longest import argparse import os import random import re import numpy as np import pandas as pd import json from evaluation.decomposition import Decomposition from evaluation.graph_matcher import GraphMatchScorer, get_ged_plus_scores from evalu...
break-evaluator-master
scripts/evaluate_predictions.py
from pathlib import Path import os import argparse import traceback import pandas as pd import re from enum import Enum DELIMITER = ';' REF = '#' pd.set_option('display.max_colwidth', -1) def parse_decomposition(qdmr): """Parses the decomposition into an ordered list of steps Parameters ---------- qdmr : st...
break-evaluator-master
scripts/qdmr_to_program.py
from evaluation.decomposition import Decomposition, draw_decomposition_graph from evaluation.graph_matcher import AStarSearcher examples = [ # 0 (Decomposition(["representatives from New York state or Indiana state", "the life spans of @@1@@"]), Decomposition(["representatives from n...
break-evaluator-master
evaluation/graph_matcher_tests.py
import heapq import networkx as nx import networkx.algorithms.isomorphism as iso import numpy as np from itertools import chain, combinations, permutations from multiprocessing import Pool from progressbar import ProgressBar, SimpleProgress from tqdm import tqdm from evaluation.sequence_matcher import SequenceMatch...
break-evaluator-master
evaluation/graph_matcher.py
import matplotlib.pyplot as plt import networkx as nx import re from utils.graph import get_graph_levels class Decomposition(object): def __init__(self, decomposition_list): self.decomposition_list = [str(step) for step in decomposition_list] def _get_graph_edges(self): edges = [] fo...
break-evaluator-master
evaluation/decomposition.py
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
break-evaluator-master
evaluation/sari_hook.py
import spacy from edit_distance import SequenceMatcher from tqdm import tqdm class SequenceMatchScorer(object): def __init__(self, remove_stop_words): self.parser = spacy.load('en_core_web_sm', disable=['ner']) self.remove_stop_words = remove_stop_words # TODO: extend the default stop wo...
break-evaluator-master
evaluation/sequence_matcher.py
from __future__ import annotations from typing import Callable from abc import ABC, abstractmethod import os import re import networkx as nx import spacy from spacy.tokens.token import Token import _pickle as pk import logging from evaluation.decomposition import Decomposition _logger = logging.getLogger(__name__) ...
break-evaluator-master
evaluation/normal_form/normalization_rules.py
from overrides import overrides import networkx as nx from queue import Queue, deque import logging import re import spacy from evaluation.decomposition import Decomposition, draw_decomposition_graph from utils.graph import get_graph_levels from evaluation.normal_form.normalization_rules import prepare_node import ...
break-evaluator-master
evaluation/normal_form/normalized_graph_matcher.py
from abc import ABC import logging import networkx as nx from spacy.tokens.token import Token from scripts.qdmr_to_program import QDMROperation import scripts.qdmr_to_program as qdmr from evaluation.normal_form.normalization_rules import DecomposeRule, ReferenceToken, run_tests _logger = logging.getLogger(__name__) ...
break-evaluator-master
evaluation/normal_form/operations_normalization_rules.py
#!/usr/bin/env python import logging import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir)))) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO) from arc_solvers.commands import main # pylint: ...
ARC-Solvers-main
arc_solvers/run.py
ARC-Solvers-main
arc_solvers/__init__.py
import torch from allennlp.common.checks import ConfigurationError from allennlp.nn.util import replace_masked_values from allennlp.nn.util import get_text_field_mask import allennlp from typing import Union, Dict import torch from allennlp.modules import MatrixAttention, Seq2SeqEncoder def masked_mean(tensor, dim, m...
ARC-Solvers-main
arc_solvers/nn/util.py
ARC-Solvers-main
arc_solvers/nn/__init__.py
""" Script to compute the QA score from the entailment predictions for each supporting sentence and answer choice. USAGE: python scripts/evaluate_predictions.py predictions_file qa_file output_file Minimal expected format of files. 1. predictions_file: {"id": "Mercury_SC_415702", "question": { "choice": {...
ARC-Solvers-main
arc_solvers/processing/evaluate_predictions.py
from typing import Dict, List from elasticsearch import Elasticsearch import re class EsHit: def __init__(self, score: float, position: int, text: str, type: str): """ Basic information about an ElasticSearch Hit :param score: score returned by the query :param position: position ...
ARC-Solvers-main
arc_solvers/processing/es_search.py
""" Script to convert the retrieved HITS into an entailment dataset USAGE: python scripts/convert_to_entailment.py hits_file output_file JSONL format of files 1. hits_file: { "id": "Mercury_SC_415702", "question": { "stem": "George wants to warm his hands quickly by rubbing them. Which skin surface will...
ARC-Solvers-main
arc_solvers/processing/convert_to_entailment.py
""" Script to compute the QA score from the scores per choice USAGE: python scripts/calculate_scores.py predictions_file Minimal expected format of predictions_file: { "question": { "stem":"George wants to warm his hands quickly by rubbing them. Which skin surface will produce the most he...
ARC-Solvers-main
arc_solvers/processing/calculate_scores.py
""" Script to convert the retrieved hits into a paragraph comprehension dataset. Questions with no hits are mapped to a blank paragraph. USAGE: python scripts/convert_to_para_comprehension.py hits_file qa_file output_file JSONL format of files 1. hits_file: { "id": "Mercury_SC_415702", "question": { "st...
ARC-Solvers-main
arc_solvers/processing/convert_to_para_comprehension.py
ARC-Solvers-main
arc_solvers/processing/__init__.py
""" Script to retrieve HITS for each answer choice and question USAGE: python scripts/add_retrieved_text.py qa_file output_file JSONL format of files 1. qa_file: { "id":"Mercury_SC_415702", "question": { "stem":"George wants to warm his hands quickly by rubbing them. Which skin surface will ...
ARC-Solvers-main
arc_solvers/processing/add_retrieved_text.py
from arc_solvers.models.entailment.tree_attention import TreeAttention from arc_solvers.models.qa.multi_choice.qa_multi_choice_max_att import QAMultiChoiceMaxAttention
ARC-Solvers-main
arc_solvers/models/__init__.py
ARC-Solvers-main
arc_solvers/models/entailment/__init__.py
""" ===================================================================== Decomposable Graph Entailment Model code replicated from SciTail repo https://github.com/allenai/scitail ===================================================================== """ from typing import Dict, List, Any, Tuple import numpy import tor...
ARC-Solvers-main
arc_solvers/models/entailment/tree_attention.py
ARC-Solvers-main
arc_solvers/models/qa/__init__.py
ARC-Solvers-main
arc_solvers/models/qa/multi_choice/__init__.py
from allennlp.modules.matrix_attention import MatrixAttention from typing import Dict, Optional, AnyStr, List, Any import torch from allennlp.common import Params from allennlp.common.checks import ConfigurationError from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules imp...
ARC-Solvers-main
arc_solvers/models/qa/multi_choice/qa_multi_choice_max_att.py
ARC-Solvers-main
arc_solvers/training_config/qa/multi_choice/__init__.py
from allennlp.commands import main as main_allennlp def main(prog: str = None) -> None: predictor_overrides = { "decomposable_attention": "decompatt", "tree_attention": "dgem", "bidaf": "bidaf_qa" } main_allennlp(prog, predictor_overrides=predictor_overrides)
ARC-Solvers-main
arc_solvers/commands/__init__.py
ARC-Solvers-main
arc_solvers/service/__init__.py
import logging from allennlp.common.util import JsonDict, sanitize from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.instance import Instance from allennlp.models.model import Model from allennlp.service.predictors.predictor import Predictor from overrides import overrides logg...
ARC-Solvers-main
arc_solvers/service/predictors/dgem_predictor.py
import logging from operator import itemgetter from typing import List from allennlp.common.util import JsonDict, sanitize from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.instance import Instance from allennlp.models.model import Model from allennlp.service.predictors.predicto...
ARC-Solvers-main
arc_solvers/service/predictors/bidaf_qa_predictor.py
from arc_solvers.service.predictors.decompatt_qa_predictor import DecompAttPredictor from arc_solvers.service.predictors.dgem_predictor import DgemPredictor from arc_solvers.service.predictors.bidaf_qa_predictor import BidafQaPredictor
ARC-Solvers-main
arc_solvers/service/predictors/__init__.py
import logging from allennlp.common.util import JsonDict, sanitize from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.instance import Instance from allennlp.models.model import Model from allennlp.service.predictors.predictor import Predictor from overrides import overrides logg...
ARC-Solvers-main
arc_solvers/service/predictors/decompatt_qa_predictor.py
from arc_solvers.modules.single_time_distributed import SingleTimeDistributed
ARC-Solvers-main
arc_solvers/modules/__init__.py
""" ===================================================================== Decomposable Graph Entailment Model code replicated from SciTail repo https://github.com/allenai/scitail ===================================================================== """ import torch class SingleTimeDistributed(torch.nn.Module): "...
ARC-Solvers-main
arc_solvers/modules/single_time_distributed.py
from arc_solvers.data.dataset_readers.arc_multichoice_json_reader import ArcMultiChoiceJsonReader
ARC-Solvers-main
arc_solvers/data/__init__.py
""" ===================================================================== Decomposable Graph Entailment Model code replicated from SciTail repo https://github.com/allenai/scitail ===================================================================== """ import logging from builtins import ValueError from typing import ...
ARC-Solvers-main
arc_solvers/data/dataset_readers/entailment_tuple_reader.py
from typing import Dict, List, Any import json import logging from allennlp.data import Dataset from overrides import overrides from allennlp.common import Params from allennlp.common.file_utils import cached_path from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.fields import ...
ARC-Solvers-main
arc_solvers/data/dataset_readers/arc_multichoice_json_reader.py
from arc_solvers.data.dataset_readers.entailment_tuple_reader import EntailmentTupleReader
ARC-Solvers-main
arc_solvers/data/dataset_readers/__init__.py
#!/usr/bin/python3 # This script uses the Python Elasticsearch API to index a user-specified text corpus in an # ElasticSearch cluster. The corpus is expected to be a text file with a sentence per line. # Each sentence is indexed as a separate document, and per the mappings defined here, the # Snowball Stemmer is used...
ARC-Solvers-main
scripts/index-corpus.py