python_code stringlengths 0 187k | repo_name stringlengths 8 46 | file_path stringlengths 6 135 |
|---|---|---|
#!/usr/bin/env python
with open("AI-progress-metrics.html") as html_file:
html = html_file.read()
html = html.replace("https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js",
"js/require-2.1.10.min.js")
html = html.replace("https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3... | AI-metrics-master | sanitise.py |
from math import log
# Different metrics and measurements for progress are made on very different types of scales
# we have some helper functions to regularise these a little bit, so we can tell (for instance)
# whether progress on some metric appears to be accelerating or decelerating.
# Interface:
# ... | AI-metrics-master | scales.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import defaultdict
from math import log
import datetime
import json
import re
import sys
import traceback
from lxml.cssselect import CSSSelector
from matplotlib import pyplot as plt
from matplotlib import markers
#fr... | AI-metrics-master | taxonomy.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
from data.video_games import *
import re
# Machinery for importing both copy-and-pasted and (where necessary) OCR'd tables from various Atari research papers
# Copying and pasting tables from PDFs produces very weird results sometime... | AI-metrics-master | scrapers/atari.py |
AI-metrics-master | scrapers/__init__.py | |
#!/usr/bin/env ipython
import re
import os
import lxml
from lxml.cssselect import CSSSelector
import requests
from taxonomy import offline
from data.vision import *
# So we can pipe the output of this code somewhere
os.environ["PYTHONIOENCODING"] = "utf-8"
# Rodriguo Benenson's "Are We There Yet?" data!
reimport_a... | AI-metrics-master | scrapers/awty.py |
import os
import re
import string
import urllib2
from BeautifulSoup import BeautifulSoup
from dateutil import parser
import mistune
from data.acoustics import speech_recognition
from scales import error_rate # or error_percent?
from taxonomy import offline
# needs to be manually updated when more metrics are added
h... | AI-metrics-master | scrapers/wer.py |
# Absorb the data from https://arxiv.org/abs/1703.03864v1
# Copy and paste from Table 3:
table3 = """Game
Alien
Amidar
Assault
Asterix
Asteroids
Atlantis
Bank Heist
Battle Zone
Beam Rider
Berzerk
Bowling
Boxing
Breakout
Centipede
Chopper Command
Crazy Climber
Demon Attack
Double Dunk
Enduro
Fishing Derby
Freeway
Fros... | AI-metrics-master | scrapers/es.py |
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
abstract_strategy_games = Problem("Abstract strategy games", ["agi", "abstract-games"])
playing_with_hints = Problem("Playing abstract games with extensive hints", ["abstract-games"], solved=True)
abstract_strategy_games.add_subpro... | AI-metrics-master | data/strategy_games.py |
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
vision = Problem("Vision", ["agi", "vision", "world-modelling"])
image_comprehension = Problem("Image comprehension", ["agi", "vision", "language", "world-modelling"])
image_classification = Problem("Image classification", ["visio... | AI-metrics-master | data/vision.py |
"Data files for the AI Progress Measurement Notebook"
| AI-metrics-master | data/__init__.py |
# -*- coding: utf-8 -*-
"Vision data generated by scrapers/awty.py and then edited by hand"
from data.vision import *
# Data imported from Rodrigo Benenson's "Who is the Best at X / Are we there
# yet?" (https://rodrigob.github.io/are_we_there_yet/build/#about)
msrc21_pc = image_classification.metric("MSRC-21 image s... | AI-metrics-master | data/awty.py |
# The file was autogenerated by ../scrapers/wer.py
from datetime import date
from data.acoustics import speech_recognition, swb_hub_500
from scales import *
librispeech_WER_clean = speech_recognition.metric(name="librispeech WER testclean", scale=error_percent, target=5.83, target_source="http://arxiv.org/abs/1512.0... | AI-metrics-master | data/wer.py |
"Hand-entered acoustic data"
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
"""
http://melodi.ee.washington.edu/s3tp/
* * *
**_Word error rate on Switchboard (specify details): [Month, Year: Score [SWB]: Team]. Compiled by Jack Clark._**
A note about measurement: We'r... | AI-metrics-master | data/acoustics.py |
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
read_stem_papers = Problem("Read a scientific or technical paper, and comprehend its contents", ["language", "world-modelling", "super"])
# Getting some major results from an abstract, tables or conclusion is much easier than under... | AI-metrics-master | data/stem.py |
# -*- coding: utf-8 -*-
"Hand-entered data about written language problems"
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
modelling_english = Problem("Accurate modelling of human language.", ["language", "agi"])
ptperplexity = modelling_english.metric(name="Penn Treebank (Perpl... | AI-metrics-master | data/language.py |
"Hand-entered data about performance of generative models"
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
"""
* * *
**_Generative models of CIFAR-10 Natural Images _****[Year: bits-per-subpixel, method]. Compiled by Durk Kingma.**
**Why we care:**
(1) The compression=predictio... | AI-metrics-master | data/generative.py |
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
computer_games = Problem("Play real-time computer & video games", ["world-modelling", "realtime-games", "agi", "language"])
games_requiring_novel_language = Problem("Games that require inventing novel language, forms of speech, or ... | AI-metrics-master | data/video_games.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | commonsense-kg-completion-master | lm_finetuning/simple_lm_finetuning.py |
__author__ = "chaitanya" # partially borrowed from implemenation of ConvE
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.init import xavier_normal_
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
cl... | commonsense-kg-completion-master | src/decoder.py |
from src import reader_utils
import os
import sys
data_path = "data/atomic/"
filename = sys.argv[1]
with open(os.path.join(data_path, filename)) as f:
data = f.readlines()
edge_dict = {}
for inst in data:
inst = inst.strip()
if inst:
inst = inst.split('\t')
src, rel, tgt = inst
sr... | commonsense-kg-completion-master | src/preprocess_atomic.py |
import numpy as np
import torch
import math
import json
import logging
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
#######################################################################
# Utility functions for evaluation
#############################################################... | commonsense-kg-completion-master | src/evaluation_utils.py |
__author__ = "chaitanya"
from collections import defaultdict
class Graph:
def __init__(self, directed=True):
self.relations = defaultdict()
self.nodes = defaultdict()
self.node2id = {}
self.relation2id = {}
self.edges = {}
self.edgeCount = 0
self.directed ... | commonsense-kg-completion-master | src/graph.py |
# Main script for experimenting with training on a subgraph
from collections import Counter
import argparse
import numpy as np
import sys
import os
import json
import time
import random
import torch
import torch.nn as nn
from model import LinkPredictor
from reader import AtomicTSVReader, ConceptNetTSVReader, FB15kRea... | commonsense-kg-completion-master | src/run_kbc_subgraph.py |
__author__ = "chaitanya"
import torch
from torch.nn.init import xavier_normal_
import torch.nn as nn
import torch.nn.functional as F
from bert_feature_extractor import BertLayer
from decoder import DistMult, ConvE, ConvTransE, ConvKB
import layers
class LinkPredictor(nn.Module):
def __init__(self, num_nodes, nu... | commonsense-kg-completion-master | src/model.py |
__author__ = "chaitanya"
import logging as logger
from graph import Graph
import csv
import json
import os
import pandas as pd
import random
from sklearn.metrics.pairwise import cosine_similarity
from bert_feature_extractor import BertLayer
import numpy as np
class Reader:
def print_summary(self):
pri... | commonsense-kg-completion-master | src/reader.py |
# Main script for experimenting with training on full training graph in an epoch
import argparse
import numpy as np
np.random.seed(42)
import sys
import os
import json
import time
import collections
import torch
torch.manual_seed(42)
torch.cuda.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.c... | commonsense-kg-completion-master | src/run_kbc_fullgraph_beaker.py |
import numpy as np
import torch
import dgl
import random
import itertools
from scipy.sparse import coo_matrix
torch.set_printoptions(profile="full")
def get_adj_and_degrees(num_nodes, num_rels, triplets):
""" Get adjacency list and degrees of the graph
"""
col = []
row = []
rel = []
adj_... | commonsense-kg-completion-master | src/utils.py |
# Main script for experimenting with training on full training graph in an epoch
import argparse
import numpy as np
import sys
import os
import time
import torch
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
import torch.nn as nn
import random
random.seed(42)
f... | commonsense-kg-completion-master | src/run_kbc_fullgraph.py |
__author__ = "chaitanya" # Adapted from DGL official examples
import torch
import torch.nn as nn
from torch.nn.parallel import data_parallel
import numpy as np
import dgl.function as fn
from torch.nn.init import xavier_normal_, xavier_uniform_
from torch.nn import functional as F, Parameter
class RGCNLayer(nn.Modu... | commonsense-kg-completion-master | src/layers.py |
__author__ = "chaitanya"
import torch
import numpy as np
import string
def create_word_vocab(network):
word_vocab = {}
word_freqs = {}
word_vocab["PAD"] = len(word_vocab)
for node in network.graph.iter_nodes():
for word in node.name.split():
word = word.lower()
if wo... | commonsense-kg-completion-master | src/reader_utils.py |
__author__ = "chaitanya" # Adapted from HuggingFace implementation
from transformers import BertTokenizer, BertModel, BertForMaskedLM
import os
import re
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
import numpy as np
"""
Feature Extractor for BERT
""... | commonsense-kg-completion-master | src/bert_feature_extractor.py |
import pandas as pd
import numpy as np
#----------------------------------------------------------
# get stats for ACL Demo track submission
#----------------------------------------------------------
df1 = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/ACCoRD/1-sentence-annotations copy.csv")
df2 = pd.read_cs... | ACCoRD-main | corpus/accord-stats.py |
from re import A
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from statsmodels.stats import inter_rater as irr
import numpy as np
from statistics import mean
df_expertise_ratings = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/expertise-ratings.csv")
df_m... | ACCoRD-main | system/user-study/analyze-study-responses.py |
from os import rename
import pandas as pd
import re
def renameDfColumns(version):
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/study-responses-%s.csv" % version)
# iterate over remaining columns and rename
for label, content in df.iteritems():
if label == "... | ACCoRD-main | system/user-study/aggregate-study-responses.py |
import os
import json
import pandas as pd
import csv
# USE THIS SCRIPT ON SERVER 1 TO ACCESS LOCAL COPY OF S2ORC
# filter papers using metadata values
def getValidPaperIDs(batch_id):
all_valid_ids = []
count = 0
with open(f'/disk2/s2orc/20200705v1/full/metadata//metadata_{batch_id}.jsonl') as f_meta:
... | ACCoRD-main | system/resources/s2orc/get-cs-papers.py |
import os
import json
# feel free to wrap this into a larger loop for batches 0~99
BATCH_ID = 0
# create a lookup for the pdf parse based on paper ID
paper_id_to_pdf_parse = {}
with open('./pdfparse.jsonl') as f_pdf:
for line in f_pdf:
pdf_parse_dict = json.loads(line)
paper_id_to_pdf_parse[pdf_pa... | ACCoRD-main | system/resources/s2orc/create-dataset.py |
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import transformers
import matplotlib.pyplot as plt
from transformers import *
import matplotlib.ticker as mticker
import spacy
im... | ACCoRD-main | system/resources/forecite/locate-forecite-concepts.py |
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import collections
# import seaborn as sns
#----------------------------------------------------------
# get count of concepts in output
#----------------------------------------------------------
# count number of each concept in my extracted sentence... | ACCoRD-main | system/resources/forecite/get-forecite-stats.py |
import os
import json
import pandas as pd
import pickle
concept_dict = {}
# for files in the data directory
count = 1
for filename in os.listdir("/net/nfs2.s2-research/soniam/concept-rel/resources/forecite/noun-phrase-scores-top-papers/"):
print("getting concepts for file %d" % count)
if filename.endswith(".js... | ACCoRD-main | system/resources/forecite/get-forecite-concepts.py |
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import spacy
import re
from collections import Counter
import random
import sys
#----------------------------------------------------------
# process ForeCite concepts
#----------------------------------------------------------
# unpickle concept dicti... | ACCoRD-main | system/resources/forecite/get-sentences-topn-concepts.py |
import pickle
import pandas as pd
from operator import itemgetter
import numpy as np
import openai
from rouge_score import rouge_scorer
import spacy
import re
#--------------------------------------------------------
openai.api_key = "sk-NzQrkRfqE5lnJPubH7faej1ZcDuz0s40qCkTTeFt"
pd.set_option('display.max_colwidth', ... | ACCoRD-main | system/code/selection/rank-concept-gpt-formatted-statements.py |
import pandas as pd
import random
#------------------------------------------------------------------------------------------------
# filter output from top 150 NLP concepts to only those concepts with 3+ descriptions
# format demo data to only include [concept B] [elaboration]
# INPUT = ../ranked-filtered/nlp-concept... | ACCoRD-main | system/code/selection/filter-format-demo-data.py |
import pandas as pd
import ast
import numpy as np
experiment = "best-params-all-s2orc"
df_preds = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/scibert-multilabel-classification/run-best-model/predictions/scibert-weightedBCE-cls/%s/seed=1-epochs=10-lr=0.000020-bs=32-%s.csv" % (experiment, experiment))
df_trai... | ACCoRD-main | system/code/extraction/rank-multilabel-predictions.py |
# Hyperparameter search for scibert model
# Code borrowed from:
# https://colab.research.google.com/drive/14Ea4lIzsn5EFvPpYKtWStXEByT9qmbkj?usp=sharing#scrollTo=qAYbKDu4UR6M
# https://github.com/pnageshkar/NLP/blob/master/Medium/Multi_label_Classification_BERT_Lightning.ipynb
# https://pytorch-lightning.readthedocs.io/... | ACCoRD-main | system/code/extraction/run-scibert-binary-classifier.py |
# Hyperparameter search for scibert model
# Code borrowed from:
# https://colab.research.google.com/drive/14Ea4lIzsn5EFvPpYKtWStXEByT9qmbkj?usp=sharing#scrollTo=qAYbKDu4UR6M
# https://github.com/pnageshkar/NLP/blob/master/Medium/Multi_label_Classification_BERT_Lightning.ipynb
# https://pytorch-lightning.readthedocs.io/... | ACCoRD-main | system/code/extraction/run-scibert-multilabel-classifier.py |
import pickle
import pandas as pd
from operator import itemgetter
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from os.path import exists
#--------------------------------------------------------
# unpickle concept dictionary
with open('/net/nfs2.s2-research/soniam/concept-rel/resources/for... | ACCoRD-main | system/code/generation/analyze-description-diversity.py |
import pickle
import pandas as pd
from operator import itemgetter
import numpy as np
import openai
from rouge_score import rouge_scorer
import spacy
import re
import seaborn as sns
import matplotlib.pyplot as plt
#--------------------------------------------------------
openai.api_key = ""
# 4class: ['compare' 'is-a'... | ACCoRD-main | system/code/generation/get-concept-gpt-formatted-statements.py |
import numpy as np
import pandas as pd
import spacy
import random
from sklearn.model_selection import GroupShuffleSplit
import ast
import re
setting = "union-1sentence-both2sentence"
# load test set
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/annotations-round2/%s/union-multilabel-data-withformattedst... | ACCoRD-main | system/inputs/format-lightning-transformers-input.py |
import numpy as np
import pandas as pd
import spacy
import random
from sklearn.model_selection import GroupShuffleSplit
import ast
import re
import os
#----------------------------------------------------------
# aggregate all batches' 2-sentence, 1-concept instances
#-------------------------------------------------... | ACCoRD-main | system/inputs/aggregate-data-batches.py |
import pandas as pd
from ast import literal_eval
#--------------------------------------------------------
# AGGREGATE SCIBERT MULTILABEL AND BINARY PREDICTIONS
#--------------------------------------------------------
# get df of all positive predictions from scibert
df_binary_positive = pd.read_csv("/net/nfs2.s2-res... | ACCoRD-main | system/inputs/aggregate-scibert-multilabel-binary-predictions.py |
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import transformers
import matplotlib.pyplot as plt
from transformers import *
import matplotlib.ticker as mticker
import spacy
im... | ACCoRD-main | system/inputs/demarcate-unlabeled-data.py |
import pickle
import matplotlib.pyplot as plt
import pandas as pd
from collections import Counter
from operator import itemgetter
import numpy as np
import re
# load final df of sentences to select concept-specific data from
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/sc... | ACCoRD-main | system/inputs/get-top-n-forecite-concept-source-sentences.py |
import logging
import random
import sys
import pytest
from beaker import Beaker
logger = logging.getLogger(__name__)
def _get_unique_name() -> str:
from gantry.util import unique_name
return unique_name()
@pytest.fixture
def run_name() -> str:
return _get_unique_name()
@pytest.fixture
def workspace... | beaker-gantry-main | conftest.py |
beaker-gantry-main | tests/__init__.py | |
import pytest
from gantry.exceptions import InvalidRemoteError
from gantry.util import parse_git_remote_url
def test_parse_git_remote_url_ssh():
assert parse_git_remote_url("git@github.com:allenai/beaker-gantry.git") == (
"allenai",
"beaker-gantry",
)
def test_parse_git_remote_url_https():
... | beaker-gantry-main | tests/util_test.py |
import subprocess
from gantry.version import VERSION
def test_help():
result = subprocess.run(["gantry", "--help"])
assert result.returncode == 0
def test_version():
result = subprocess.run(["gantry", "--version"], capture_output=True, text=True)
assert result.returncode == 0
assert VERSION in ... | beaker-gantry-main | tests/main_test.py |
import json
from gantry import METRICS_FILE
def main():
with open(METRICS_FILE, "w") as f:
json.dump({"loss": 0.1, "accuracy": 0.95}, f)
print(f"\N{check mark} Done! Metrics written to {METRICS_FILE}")
if __name__ == "__main__":
main()
| beaker-gantry-main | examples/metrics/run.py |
from datetime import datetime
from pathlib import Path
from gantry.version import VERSION
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int = -1
for i in range(len(lines)):
line = lines[i]
if line.startswith("## ... | beaker-gantry-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List, Optional
import packaging.version
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change... | beaker-gantry-main | scripts/release_notes.py |
_MAJOR = "0"
_MINOR = "19"
_PATCH = "0"
_SUFFIX = ""
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| beaker-gantry-main | gantry/version.py |
import platform
import tempfile
import time
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union, cast
import requests
import rich
from beaker import (
Beaker,
Dataset,
DatasetConflict,
DatasetNotFound,
Digest,
Experimen... | beaker-gantry-main | gantry/util.py |
DEFAULT_IMAGE = "ai2/conda"
ENTRYPOINT = "entrypoint.sh"
GITHUB_TOKEN_SECRET = "GITHUB_TOKEN"
CONDA_ENV_FILE = "environment.yml"
CONDA_ENV_FILE_ALTERNATE = "environment.yaml"
PIP_REQUIREMENTS_FILE = "requirements.txt"
RUNTIME_DIR = "/gantry-runtime"
RESULTS_DIR = "/results"
METRICS_FILE = f"{RESULTS_DIR}/metric... | beaker-gantry-main | gantry/constants.py |
from .constants import METRICS_FILE, RESULTS_DIR
__all__ = ["METRICS_FILE", "RESULTS_DIR"]
| beaker-gantry-main | gantry/__init__.py |
from os import PathLike
from typing import Union
PathOrStr = Union[PathLike, str]
| beaker-gantry-main | gantry/aliases.py |
class GantryError(Exception):
"""
Base exception for all error types that Gantry might raise.
"""
class GitError(GantryError):
pass
class DirtyRepoError(GitError):
pass
class InvalidRemoteError(GitError):
pass
class ConfigurationError(GantryError):
pass
class ExperimentFailedError(... | beaker-gantry-main | gantry/exceptions.py |
import os
import signal
import sys
from fnmatch import fnmatch
from pathlib import Path
from typing import Optional, Tuple
import click
import rich
from beaker import (
Beaker,
ImageNotFound,
Job,
JobTimeoutError,
Priority,
SecretNotFound,
TaskResources,
)
from click_help_colors import Help... | beaker-gantry-main | gantry/__main__.py |
from allennlp.common.testing import ModelTestCase
class TestSimpleClassifier(ModelTestCase):
def test_model_can_train(self):
# This built-in test makes sure that your data can load, that it gets passed to the model
# correctly, that your model computes a loss in a way that we can get gradients fro... | allennlp-template-config-files-master | tests/test_model.py |
allennlp-template-config-files-master | tests/__init__.py | |
from my_project.dataset_reader import ClassificationTsvReader
class TestTextClassificationJsonReader:
def test_read_from_file_ag_news_corpus_and_truncates_properly(self):
reader = ClassificationTsvReader()
data_path = "tests/fixtures/toy_data.tsv"
instances = list(reader.read(data_path))
... | allennlp-template-config-files-master | tests/test_dataset_reader.py |
# These imports are important for making the configuration files find the classes that you wrote.
# If you don't have these, you'll get errors about allennlp not being able to find
# "simple_classifier", or whatever name you registered your model with. These imports and the
# contents of .allennlp_plugins makes it so ... | allennlp-template-config-files-master | my_project/__init__.py |
from typing import Dict
import torch
from allennlp.data import Vocabulary, TextFieldTensors
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("simple_classifier")
clas... | allennlp-template-config-files-master | my_project/model.py |
from typing import Dict, Iterable
from allennlp.data import DatasetReader, Instance, Field
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Tokenizer, WhitespaceTokenizer
@DatasetReader.register("cl... | allennlp-template-config-files-master | my_project/dataset_reader.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import unittest
import torch
from models.matcher import HungarianMatcher
from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned
from models.backbone import Backbone, Joiner, BackboneBase
from util import box... | detr-master | test_all.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as detection
import submitit
def parse_args():
detection_parser = detection.get_args_parser()
parser = ar... | detr-master | run_with_submitit.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import sys
from typing import Iterable
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
... | detr-master | engine.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer imp... | detr-master | hubconf.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets impo... | detr-master | main.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List... | detr-master | util/misc.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_... | detr-master | util/box_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| detr-master | util/__init__.py |
"""
Plotting utilities to visualize training logs.
"""
import torch
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
'''
Function to plot specif... | detr-master | util/plot_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
"""
from pathlib import Path
import torch
import torch.utils.data
import torchvision
f... | detr-master | datasets/coco.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Transforms and data augmentation for both image + bbox.
"""
import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from util.misc impor... | detr-master | datasets/transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
import util.misc as utils
try:
from panopticapi.evaluation import pq_compute
except ImportError:
pass
class PanopticEvaluator(object):
def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
... | detr-master | datasets/panoptic_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
from .coco import build as build_coco
def get_coco_api_from_dataset(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if ... | detr-master | datasets/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as... | detr-master | datasets/coco_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
from util.box_ops import masks_to_boxes
from .coco import make_coco_transforms
class CocoPanoptic:
def __init__(... | detr-master | datasets/coco_panoptic.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_siz... | detr-master | models/detr.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatc... | detr-master | models/matcher.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from uti... | detr-master | models/backbone.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from util.misc import NestedTensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embeddi... | detr-master | models/position_encoding.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .detr import build
def build_model(args):
return build(args)
| detr-master | models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding... | detr-master | models/transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
import io
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.fun... | detr-master | models/segmentation.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Helper script to convert models trained with the main version of DETR to be used with the Detectron2 version.
"""
import json
import argparse
import numpy as np
import torch
def parse_args():
parser = argparse.ArgumentParser("D2 model con... | detr-master | d2/converter.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import sys
# fmt: off
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
import time
from typing import Any, Dict, ... | detr-master | d2/train_net.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
from typing import List
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from detectron2.layers import... | detr-master | d2/detr/detr.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_detr_config(cfg):
"""
Add config for DETR.
"""
cfg.MODEL.DETR = CN()
cfg.MODEL.DETR.NUM_CLASSES = 80
# LOSS
cfg.MODEL.DETR.GIOU_WEIGHT = 2.0
... | detr-master | d2/detr/config.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_detr_config
from .detr import Detr
from .dataset_mapper import DetrDatasetMapper
| detr-master | d2/detr/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.