python_code stringlengths 0 187k | repo_name stringlengths 8 46 | file_path stringlengths 6 135 |
|---|---|---|
import nltk
import numpy as np
def _set_span(t, i):
if isinstance(t[0], str):
t.span = (i, i+len(t))
else:
first = True
for c in t:
cur_span = _set_span(c, i)
i = cur_span[1]
if first:
min_ = cur_span[0]
first = False
... | bi-att-flow-master | my/nltk_utils.py |
from tensorflow.python.ops.rnn_cell import _linear
from tensorflow.python.util import nest
import tensorflow as tf
from my.tensorflow import flatten, reconstruct, add_wd, exp_mask
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
... | bi-att-flow-master | my/tensorflow/nn.py |
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import DropoutWrapper, RNNCell, LSTMStateTuple
from my.tensorflow import exp_mask, flatten
from my.tensorflow.nn import linear, softsel, double_linear_logits
class SwitchableDropoutWrapper(DropoutWrapper):
def __init__(self, cell, is_train, input_keep_p... | bi-att-flow-master | my/tensorflow/rnn_cell.py |
from my.tensorflow.general import * | bi-att-flow-master | my/tensorflow/__init__.py |
import tensorflow as tf
from tensorflow.python.ops.rnn import dynamic_rnn as _dynamic_rnn, \
bidirectional_dynamic_rnn as _bidirectional_dynamic_rnn
from tensorflow.python.ops.rnn import bidirectional_rnn as _bidirectional_rnn
from my.tensorflow import flatten, reconstruct
def dynamic_rnn(cell, inputs, sequence_... | bi-att-flow-master | my/tensorflow/rnn.py |
from itertools import zip_longest
import tensorflow as tf
from functools import reduce
from operator import mul
import numpy as np
VERY_BIG_NUMBER = 1e30
VERY_SMALL_NUMBER = 1e-30
VERY_POSITIVE_NUMBER = VERY_BIG_NUMBER
VERY_NEGATIVE_NUMBER = -VERY_BIG_NUMBER
def get_initializer(matrix):
def _initializer(shape, ... | bi-att-flow-master | my/tensorflow/general.py |
import gzip
import json
from json import encoder
import os
import tensorflow as tf
from basic_cnn.evaluator import Evaluation, F1Evaluation
from my.utils import short_floats
import pickle
class GraphHandler(object):
def __init__(self, config):
self.config = config
self.saver = tf.train.Saver(ma... | bi-att-flow-master | basic_cnn/graph_handler.py |
bi-att-flow-master | basic_cnn/__init__.py | |
import random
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import BasicLSTMCell, GRUCell
from basic_cnn.read_data import DataSet
from basic_cnn.superhighway import SHCell
from my.tensorflow import exp_mask, get_initializer, VERY_SMALL_NUMBER
from my.tensorflow.nn imp... | bi-att-flow-master | basic_cnn/model.py |
import os
import tensorflow as tf
from basic_cnn.main import main as m
flags = tf.app.flags
flags.DEFINE_string("model_name", "basic_cnn", "Model name [basic]")
flags.DEFINE_string("data_dir", "data/cnn", "Data dir [data/cnn]")
flags.DEFINE_string("root_dir", "/Users/minjoons/data/cnn/questions", "root dir [~/data/... | bi-att-flow-master | basic_cnn/cli.py |
import json
import os
import random
import itertools
import math
from collections import defaultdict
import numpy as np
from cnn_dm.prepro import para2sents
from my.tensorflow import grouper
from my.utils import index
class Data(object):
def get_size(self):
raise NotImplementedError()
def get_by_id... | bi-att-flow-master | basic_cnn/read_data.py |
import tensorflow as tf
from basic_cnn.model import Model
from my.tensorflow import average_gradients
class Trainer(object):
def __init__(self, config, model):
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init... | bi-att-flow-master | basic_cnn/trainer.py |
import argparse
import json
import math
import os
import shutil
from pprint import pprint
import tensorflow as tf
from tqdm import tqdm
import numpy as np
from basic_cnn.evaluator import F1Evaluator, Evaluator, ForwardEvaluator, MultiGPUF1Evaluator, CNNAccuracyEvaluator, \
MultiGPUCNNAccuracyEvaluator
from basic_... | bi-att-flow-master | basic_cnn/main.py |
import itertools
from collections import defaultdict
import numpy as np
import tensorflow as tf
import os
from basic_cnn.read_data import DataSet
from my.nltk_utils import span_f1
from my.tensorflow import padded_reshape
from my.utils import argmax
class Evaluation(object):
def __init__(self, data_type, global_... | bi-att-flow-master | basic_cnn/evaluator.py |
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import RNNCell
from my.tensorflow.nn import linear
class SHCell(RNNCell):
"""
Super-Highway Cell
"""
def __init__(self, input_size, logit_func='tri_linear', scalar=False):
self._state_size = input_size
self._output_size = inp... | bi-att-flow-master | basic_cnn/superhighway.py |
import shutil
from collections import OrderedDict
import http.server
import socketserver
import argparse
import json
import os
import numpy as np
from tqdm import tqdm
from jinja2 import Environment, FileSystemLoader
from basic_cnn.evaluator import get_span_score_pairs, get_best_span
def bool_(string):
if strin... | bi-att-flow-master | basic_cnn/visualizer.py |
import pytest
@pytest.fixture(autouse=True)
def doctest_fixtures(
doctest_namespace,
tmp_path,
):
doctest_namespace["cache_dir"] = tmp_path
| cached_path-main | conftest.py |
from setuptools import find_packages, setup
def read_requirements(filename: str):
with open(filename) as requirements_file:
import re
def fix_url_dependencies(req: str) -> str:
"""Pip and setuptools disagree about how URL dependencies should be handled."""
m = re.match(
... | cached_path-main | setup.py |
import os
import pytest
from cached_path.cache_file import CacheFile
from cached_path.testing import BaseTestClass
class TestCacheFile(BaseTestClass):
def test_temp_file_removed_on_error(self):
cache_filename = self.TEST_DIR / "cache_file"
with pytest.raises(IOError, match="I made this up"):
... | cached_path-main | tests/cache_file_test.py |
import os
import pytest
from filelock import Timeout
from cached_path.file_lock import FileLock
from cached_path.testing import BaseTestClass
class TestFileLock(BaseTestClass):
def setup_method(self):
super().setup_method()
# Set up a regular lock and a read-only lock.
open(self.TEST_DI... | cached_path-main | tests/file_lock_test.py |
from cached_path import common
from cached_path.testing import BaseTestClass
class TestSetCacheDir(BaseTestClass):
def setup_method(self):
super().setup_method()
self.initial_value = common.CACHE_DIRECTORY
def test_toggle_ffl(self):
common.set_cache_dir(self.TEST_DIR / "foo")
... | cached_path-main | tests/common_test.py |
cached_path-main | tests/__init__.py | |
import json
import os
import pathlib
import pytest
from cached_path.testing import BaseTestClass
from cached_path.util import filename_to_url, resource_to_filename
class TestUtils(BaseTestClass):
def test_resource_to_filename(self):
for url in [
"http://allenai.org",
"http://alle... | cached_path-main | tests/util_test.py |
import shutil
import tempfile
import time
from collections import Counter
from pathlib import Path
import pytest
import responses
from flaky import flaky
from requests.exceptions import ConnectionError, HTTPError
from cached_path._cached_path import cached_path, get_from_cache
from cached_path.meta import Meta
from c... | cached_path-main | tests/cached_path_test.py |
from cached_path.schemes import (
SchemeClient,
add_scheme_client,
get_scheme_client,
get_supported_schemes,
)
from cached_path.util import is_url_or_existing_file
def test_supported_schemes():
assert "hf" in get_supported_schemes()
class CustomSchemeClient(SchemeClient):
scheme = "foo"
... | cached_path-main | tests/schemes_test.py |
import pytest
from cached_path.schemes.gs import GsClient
def test_split_gcs_path():
# Test splitting good urls.
assert GsClient.split_gcs_path("gs://my-bucket/subdir/file.txt") == (
"my-bucket",
"subdir/file.txt",
)
assert GsClient.split_gcs_path("gs://my-bucket/file.txt") == ("my-bu... | cached_path-main | tests/schemes/gs_test.py |
cached_path-main | tests/schemes/__init__.py | |
import pytest
from cached_path.schemes.s3 import S3Client
def test_split_s3_path():
# Test splitting good urls.
assert S3Client.split_s3_path("s3://my-bucket/subdir/file.txt") == (
"my-bucket",
"subdir/file.txt",
)
assert S3Client.split_s3_path("s3://my-bucket/file.txt") == ("my-bucke... | cached_path-main | tests/schemes/s3_test.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import ... | cached_path-main | docs/source/conf.py |
from datetime import datetime
from pathlib import Path
from cached_path.version import VERSION
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int
for i in range(len(lines)):
line = lines[i]
if line.startswith("## ... | cached_path-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List
import packaging.version
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes... | cached_path-main | scripts/release_notes.py |
import os
_MAJOR = "1"
_MINOR = "4"
# On main and in a nightly release the patch should be one ahead of the last
# released build.
_PATCH = "0"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = os.environ.get("CACHED... | cached_path-main | cached_path/version.py |
import os
import tarfile
from hashlib import sha256
from pathlib import Path
from typing import List, Optional, Tuple
from urllib.parse import urlparse
from .common import PathOrStr, get_cache_dir
from .meta import Meta
def resource_to_filename(resource: PathOrStr, etag: Optional[str] = None) -> str:
"""
Con... | cached_path-main | cached_path/util.py |
import logging
import os
import shutil
import tarfile
import tempfile
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Tuple
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
from .cache_file import CacheFile
from .common import PathOrStr, get_cache_dir
from .file_lock impor... | cached_path-main | cached_path/_cached_path.py |
"""
The idea behind **cached-path** is to provide a unified, simple, extendable interface for accessing
both local and remote files.
This can be used behind other APIs that need to access files agnostic to where they are located.
For remote files, **cached-path** supports several different schemes out-of-the-box in ad... | cached_path-main | cached_path/__init__.py |
import os
import warnings
from typing import Optional
from filelock import AcquireReturnProxy
from filelock import FileLock as _FileLock
from .common import PathOrStr
class FileLock(_FileLock):
"""
This is just a subclass of the `FileLock` class from the `filelock` library, except that
it adds an additi... | cached_path-main | cached_path/file_lock.py |
import os
from os import PathLike
from pathlib import Path
from typing import Tuple, Union
from urllib.parse import urlparse
PathOrStr = Union[str, PathLike]
CACHE_DIRECTORY: PathOrStr = Path(
os.getenv("CACHED_PATH_CACHE_ROOT", Path.home() / ".cache" / "cached_path")
)
"""
The default global cache directory.
"""... | cached_path-main | cached_path/common.py |
import io
from typing import List, Optional
from rich.progress import BarColumn, DownloadColumn, Progress, TaskID, TimeElapsedColumn
class QuietProgress:
"""
A mock `Progress` class that does absolutely nothing.
We use this when users pass `quiet=True` since rich's `Progress` still
prints empty lines... | cached_path-main | cached_path/progress.py |
import logging
import os
import shutil
import tempfile
from pathlib import Path
from .common import get_cache_dir, set_cache_dir
class BaseTestClass:
"""
A custom testing class that disables some of the more verbose
logging and that creates and destroys a temp directory as a test fixture.
"""
PR... | cached_path-main | cached_path/testing.py |
import logging
import os
import tempfile
from pathlib import Path
from .common import PathOrStr
logger = logging.getLogger(__name__)
class CacheFile:
"""
This is a context manager that makes robust caching easier.
On `__enter__`, an IO handle to a temporarily file is returned, which can
be treated ... | cached_path-main | cached_path/cache_file.py |
import json
import os
import time
from dataclasses import asdict, dataclass
from typing import Optional, Set
from .common import PathOrStr
@dataclass
class Meta:
"""
Any resource that is downloaded to - or extracted in - the cache directory will
have a meta JSON file written next to it, which corresponds... | cached_path-main | cached_path/meta.py |
from typing import Set, Type
from .gs import GsClient
from .hf import hf_get_from_cache
from .http import HttpClient
from .s3 import S3Client
from .scheme_client import SchemeClient
__all__ = ["GsClient", "HttpClient", "S3Client", "SchemeClient", "hf_get_from_cache"]
try:
from .beaker import BeakerClient
__... | cached_path-main | cached_path/schemes/__init__.py |
import io
from pathlib import Path
from typing import Optional
from beaker import Beaker, ChecksumFailedError, DatasetNotFound, DatasetReadError
from .scheme_client import SchemeClient
class BeakerClient(SchemeClient):
scheme = ("beaker",)
recoverable_errors = SchemeClient.recoverable_errors + (DatasetReadE... | cached_path-main | cached_path/schemes/beaker.py |
import io
from typing import Optional
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from urllib3.exceptions import MaxRetryError
from .scheme_client import SchemeClient
RECOVERABLE_SERVER_ERROR_CODES = (502, 503, 504)
class RecoverableServerError(r... | cached_path-main | cached_path/schemes/http.py |
import io
from abc import abstractmethod
from typing import ClassVar, Optional, Tuple, Type, Union
import requests
class SchemeClient:
"""
A client used for caching remote resources corresponding to URLs with a particular scheme.
Subclasses must define the :attr:`scheme` class variable and implement
... | cached_path-main | cached_path/schemes/scheme_client.py |
"""
Google Cloud Storage.
"""
import io
from typing import Optional, Tuple
from google.api_core.exceptions import NotFound
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import storage
from google.cloud.storage.retry import DEFAULT_RETRY
from ..common import _split_cloud_path
from .sche... | cached_path-main | cached_path/schemes/gs.py |
"""
AWS S3.
"""
import io
from typing import Optional, Tuple
import boto3
import botocore
from ..common import _split_cloud_path
from .scheme_client import SchemeClient
class S3Client(SchemeClient):
recoverable_errors = SchemeClient.recoverable_errors + (
botocore.exceptions.EndpointConnectionError,
... | cached_path-main | cached_path/schemes/s3.py |
"""
HuggingFace Hub.
Unlike the other schemes, we don't implement a `SchemeClient` subclass here because
`huggingface_hub` handles the caching logic internally in essentially the same way.
"""
from pathlib import Path
from typing import Optional
import huggingface_hub as hf_hub
import requests
from huggingface_hub.u... | cached_path-main | cached_path/schemes/hf.py |
from concurrent import futures
import random
import sys
import time
import grpc
import numpy
from pyhocon import ConfigFactory
# These have to be before we do any import from keras. It would be nice to be able to pass in a
# value for this, but that makes argument passing a whole lot more complicated. If/when we ch... | deep_qa_experiments-master | src/main/python/server.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _m... | deep_qa_experiments-master | src/main/python/proto/message_pb2.py |
deep_qa_experiments-master | src/main/python/proto/__init__.py | |
# There are lots of way to set up this training script. We're putting the bulk of the code inside
# the my_project module, with a simple run script in the base directory. If you prefer, you could
# just take train.py and move it to the top-level directory and use that as your run.py. Do
# whatever you're most comfor... | allennlp-template-python-script-master | run.py |
import tempfile
from allennlp.common.testing import ModelTestCase
from my_project.train import (
build_dataset_reader,
build_vocab,
build_model,
build_data_loaders,
build_trainer,
)
class TestSimpleClassifier(ModelTestCase):
def test_model_can_train(self):
with tempfile.TemporaryDire... | allennlp-template-python-script-master | tests/test_model.py |
allennlp-template-python-script-master | tests/__init__.py | |
from allennlp.common.testing import AllenNlpTestCase
from my_project.dataset_reader import ClassificationTsvReader
class TestTextClassificationJsonReader(AllenNlpTestCase):
def test_read_from_file_ag_news_corpus_and_truncates_properly(self):
reader = ClassificationTsvReader()
data_path = "tests/f... | allennlp-template-python-script-master | tests/test_dataset_reader.py |
# These imports are important for making the configuration files find the classes that you wrote.
# If you don't have these, you'll get errors about allennlp not being able to find
# "simple_classifier", or whatever name you registered your model with. These imports and the
# contents of .allennlp_plugins makes it so ... | allennlp-template-python-script-master | my_project/__init__.py |
from typing import Dict
import torch
from allennlp.data import Vocabulary, TextFieldTensors
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("simple_classifier")
clas... | allennlp-template-python-script-master | my_project/model.py |
# This file contains a bunch of build_* methods that configure objects however you want, and a
# run_training_loop method that calls these methods and runs the trainer.
from itertools import chain
from typing import Iterable, Tuple
import allennlp
import torch
from allennlp.data import DataLoader, DatasetReader, Inst... | allennlp-template-python-script-master | my_project/train.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance, Field
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
@DatasetReader... | allennlp-template-python-script-master | my_project/dataset_reader.py |
import pandas as pd
import ai2thor.controller
ENV_ARGS = dict(
gridSize=0.25,
width=224,
height=224,
visibilityDistance=1.0,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
useMassThreshold=True,
massThreshold=10,
... | disturb-free-main | manipulathor_plugin/disturb_dist_dict.py |
"""Task Samplers for the task of ArmPointNav"""
from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import (
ArmPointNavTaskSampler as RawArmPointNavTaskSampler,
)
from projects.manipulathor_disturb_free.manipulathor_plugin.manipulathor_task import (
ArmPointNavTask,
RotateArmPointNavTask... | disturb-free-main | manipulathor_plugin/manipulathor_task_samplers.py |
from allenact_plugins.manipulathor_plugin.manipulathor_tasks import (
ArmPointNavTask as RawArmPointNavTask,
RotateArmPointNavTask as RawRotateArmPointNavTask,
CamRotateArmPointNavTask as RawCamRotateArmPointNavTask,
)
import pandas as pd
DF = pd.read_csv(
"projects/manipulathor_disturb_free/manipulat... | disturb-free-main | manipulathor_plugin/manipulathor_task.py |
"""Utility classes and functions for sensory inputs used by the models."""
from typing import Any, Union, Optional
import gym
import numpy as np
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.utils.misc_utils import prepare_locals_for_super
from all... | disturb-free-main | manipulathor_plugin/disturb_sensor.py |
disturb-free-main | armpointnav_baselines/__init__.py | |
import platform
from abc import ABC
from math import ceil
from typing import Dict, Any, List, Optional, Sequence
import os
import gym
import numpy as np
import torch
from allenact.base_abstractions.experiment_config import MachineParams
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from a... | disturb-free-main | armpointnav_baselines/experiments/armpointnav_thor_base.py |
from abc import ABC
from typing import Optional, Sequence, Union
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.experiment_utils import Builder
class Ar... | disturb-free-main | armpointnav_baselines/experiments/armpointnav_base.py |
from typing import Dict, Tuple
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losse... | disturb-free-main | armpointnav_baselines/experiments/armpointnav_mixin_ddppo.py |
disturb-free-main | armpointnav_baselines/experiments/__init__.py | |
from typing import Sequence, Union
import gym
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact_plugins.manipulathor_plugin.manipulathor_sensors import (
RelativeAgentArmToObjectSensor,
... | disturb-free-main | armpointnav_baselines/experiments/armpointnav_mixin_actorcritic.py |
from abc import ABC
import torch
from allenact_plugins.manipulathor_plugin.armpointnav_constants import (
TRAIN_OBJECTS,
TEST_OBJECTS,
)
from projects.manipulathor_disturb_free.armpointnav_baselines.experiments.armpointnav_thor_base import (
ArmPointNavThorBaseConfig,
)
class ArmPointNaviThorBaseConfig(Ar... | disturb-free-main | armpointnav_baselines/experiments/ithor/armpointnav_ithor_base.py |
from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS
from allenact_plugins.manipulathor_plugin.manipulathor_sensors import (
DepthSensorThor,
RelativeAgentArmToObjectSensor,
RelativeObjectToGoalSensor,
PickedUpObjSensor,
)
from projects.manipulathor_disturb_free.manipulathor_... | disturb-free-main | armpointnav_baselines/experiments/ithor/armpointnav_depth.py |
disturb-free-main | armpointnav_baselines/experiments/ithor/__init__.py | |
from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS
from allenact_plugins.manipulathor_plugin.manipulathor_sensors import (
DepthSensorThor,
RelativeAgentArmToObjectSensor,
RelativeObjectToGoalSensor,
PickedUpObjSensor,
)
from projects.manipulathor_disturb_free.manipulathor_... | disturb-free-main | armpointnav_baselines/experiments/eval/TestScene.py |
"""Baseline models for use in the Arm Point Navigation task.
Arm Point Navigation is currently available as a Task in ManipulaTHOR.
"""
from typing import Tuple, Dict, Optional, cast, List
from collections import OrderedDict
from allenact.utils.system import get_logger
import gym
import torch
import torch.nn as nn
fr... | disturb-free-main | armpointnav_baselines/models/arm_pointnav_models.py |
import torch
import torch.nn as nn
class LinearActorHeadNoCategory(nn.Module):
def __init__(self, num_inputs: int, num_outputs: int):
super().__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
nn.init.orthogonal_(self.linear.weight, gain=0.01)
nn.init.constant_(self.linea... | disturb-free-main | armpointnav_baselines/models/base_models.py |
disturb-free-main | armpointnav_baselines/models/__init__.py | |
"""Defining the auxiliary loss for actor critic type models."""
from typing import Dict, cast, Tuple, List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
from allenact.embodiedai.aux_losses.losses import AuxiliaryLoss
from allenact.a... | disturb-free-main | armpointnav_baselines/models/disturb_pred_loss.py |
from typing import Tuple, Dict, Optional, Union, List, cast
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from projects.manipulathor_disturb_free.armpointnav_baselines.models.disturb_pred_loss import (
DisturbPredictionLoss,
)
class AuxiliaryModel(nn.Module):
def __init__(... | disturb-free-main | armpointnav_baselines/models/aux_model.py |
import pdb
import torch.nn as nn
import torch.nn.functional as F
def upshuffle(
in_planes, out_planes, upscale_factor, kernel_size=3, stride=1, padding=1
):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes * upscale_factor ** 2,
kernel_size=kernel_size,
... | disturb-free-main | armpointnav_baselines/models/manipulathor_net_utils.py |
coleridge-rich-context-ai2-master | rich-context-competition/project/__init__.py | |
# introduce myself
print( "Publication parse example:" )
# imports
import codecs
import json
import shutil
# declare variables
publications_json_path = None
json_publication_file = None
publication_list = None
publication_counter = -1
publication_info = None
pub_date = None
unique_identifier = None
text_file_name = N... | coleridge-rich-context-ai2-master | rich-context-competition/project/project.py |
coleridge-rich-context-ai2-master | rich-context-competition/evaluate/__init__.py | |
# coding: utf-8
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Setup" data-toc-modified-id="Setup-1"><span class="toc-item-num">1 </span>Setup</a></span><ul class="toc-item"><li><span><a href="#Setup---imports" data-toc-modified-id="Set... | coleridge-rich-context-ai2-master | rich-context-competition/evaluate/evaluate_data_set_coding.py |
# imports
import json
import numpy
import six
import sklearn
from sklearn import metrics
class CitationCodingEvaluation( object ):
#============================================================================
# CONSTANTS-ish
#============================================================================... | coleridge-rich-context-ai2-master | rich-context-competition/evaluate/citation_coding_evaluation.py |
from typing import List
from s2base.scispacy_util import SciSpaCyParser
import textacy
import spacy
import os
import json
from collections import defaultdict
from collections import Counter as mset
import numpy as np
import Levenshtein
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.externals ... | coleridge-rich-context-ai2-master | project/text_utils.py |
"""This file contains a class for the rule based model for generating dataset extraction candidates
"""
import os
import json
from fuzzywuzzy import fuzz
import nltk
from nltk.corpus import stopwords
import re
from s2base import scispacy_util
from tqdm import tqdm
from sklearn.externals import joblib
from spacy.lang i... | coleridge-rich-context-ai2-master | project/rule_based_model.py |
"""This file can be run to create the dataset linking dataset in the format expected by the
Structured Gradient Tree Boosting model. It assumes that there are train/dev/test folders
in project/data. We use this dataset for other downstream models as well, to minimize the
number of times this dataset is regener... | coleridge-rich-context-ai2-master | project/create_sgtb_dataset.py |
"""This file contains a class that can be used to predict dataset mentions using a trained
named entity recognition (NER) model
"""
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from ner_rcc.rcc_ner import RccNerDatasetReader
from allennlp.common.params import Params
im... | coleridge-rich-context-ai2-master | project/ner_model.py |
"""This file can be run to create the rule based dataset candidates. It assumes that
there are train/dev/test folders in project/data. The create_rule_based_input function
can be used to convert a citation list (the competition output format) to the format expected
by the next step of the system. The generate_... | coleridge-rich-context-ai2-master | project/create_linking_dataset.py |
coleridge-rich-context-ai2-master | project/__init__.py | |
"""Script to sample some publications from the phase 1 holdout set"""
import os
from collections import defaultdict
import numpy as np
from tqdm import tqdm
import json
def main():
SAMPLE_COUNT = 400
holdout_path = os.path.abspath(os.path.join("project", "holdout", "data"))
holdout_publications_path = os.... | coleridge-rich-context-ai2-master | project/sample_holdout.py |
# Adapted from https://github.com/bloomberg/sgtb
# Copyright 2018 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
... | coleridge-rich-context-ai2-master | project/structured_gradient_boosting.py |
"""This file can be run to convert all of the publications in train/dev/test to conll format,
both for NER and linking. The output will be in folder called ner-conll and linking-conll.
to_conll_test.py is used to produce conll formatted files for the test publications.
"""
import argparse
import json
import os
f... | coleridge-rich-context-ai2-master | project/to_conll.py |
# Adapted from https://github.com/bloomberg/sgtb
# Copyright 2018 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
... | coleridge-rich-context-ai2-master | project/structured_learner.py |
import os
import json
import re
from s2base import scispacy_util
from tqdm import tqdm
import nltk
from nltk.corpus import stopwords
from collections import defaultdict
from create_sgtb_dataset import get_scispacy_doc
from math import log
class MethodExtractor():
def __init__(self, train_path, dev_path, sage_metho... | coleridge-rich-context-ai2-master | project/method_extractor.py |
"""This is the main script for outputting predictions for the competition
"""
from rule_based_model import RuleBasedModel
from xgboost import XGBClassifier
from typing import Dict, Union, List
import json
import os
import create_linking_dataset
import create_sgtb_dataset
import structured_gradient_boosting
import struc... | coleridge-rich-context-ai2-master | project/project.py |
"""This file can be run to convert the test files to conll format, saved in the ner-conll folder.
This is mostly copy pasted from to_conll.py as a quick workaround to run the conll parsing code
at test time. A cleaner implementation would not need this file, and would just make use of
to_conll.py
"""
import o... | coleridge-rich-context-ai2-master | project/to_conll_test.py |
import xgboost as xgb
import os
import json
from sklearn.metrics import accuracy_score, roc_auc_score, precision_score, recall_score, f1_score, precision_recall_curve
import numpy as np
from sklearn.externals import joblib
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import Prede... | coleridge-rich-context-ai2-master | project/xgboost_linking.py |
"""Script to write all the needed files to a new folder based on splits provided in text files"""
import os
import json
from typing import Dict, Union, List
from collections import defaultdict
from tqdm import tqdm
def load_all_publications(old_base_path: str):
train_path = os.path.join(old_base_path, "train")
... | coleridge-rich-context-ai2-master | project/create_dataset_split_folder.py |
coleridge-rich-context-ai2-master | project/ner_rcc/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.