python_code
stringlengths
0
187k
repo_name
stringlengths
8
46
file_path
stringlengths
6
135
import torch import json import os import pickle from tqdm import tqdm from collections import defaultdict import numpy from scipy.spatial.distance import mahalanobis from sklearn import mixture from sklearn.cluster import AgglomerativeClustering from sklearn.decomposition import PCA from transformers import AutoTokeni...
data-efficient-finetuning-main
scripts/make_final_rep_clusters.py
import faiss import argparse import torch import numpy import json import tqdm from scipy.stats import entropy import pickle import os from transformers import AutoTokenizer, AutoModelForSeq2SeqLM numpy.random.seed(20389) parser = argparse.ArgumentParser() parser.add_argument("--search_output", type=str) parser.add...
data-efficient-finetuning-main
scripts/sort_training_data.py
import json from tqdm import tqdm import sys p3_instance_filename = sys.argv[1] pyserini_output_filename = sys.argv[2] with open(p3_instance_filename, 'r') as f: with open(pyserini_output_filename, "w") as writefile: for i, line in tqdm(enumerate(f)): data = json.loads(line) pyseri...
data-efficient-finetuning-main
scripts/convert_pyserini.py
import json import gzip import argparse import os from datasets import load_from_disk parser = argparse.ArgumentParser() parser.add_argument("--datasets", type=str, help="Json file containing the list of P3 datasets to load") parser.add_argument("--output_prefix", type=str, required=True) parser.add_argument("--data_c...
data-efficient-finetuning-main
scripts/write_p3_train_instances.py
import json from collections import defaultdict # We'll keep only one copy of instance of all the prompt-variants data_sizes = {k: len(v['validation']) + len(v['train']) for k, v in json.load(open('p3_data.json')).items()} simplified_p3_data = defaultdict(lambda: {"validation": {}, "train": {}}) num_all_instances =...
data-efficient-finetuning-main
scripts/simplify_p3_data.py
import json import sys filename = sys.argv[1] desired_size = int(sys.argv[2]) outfilename = sys.argv[3] all_ids = [] all_distances = [] with open(filename, 'r') as f: for line in f: sample = json.loads(line) all_ids.append(sample['ids']) all_distances.append(sample['distances']) def flat...
data-efficient-finetuning-main
scripts/reduce_indices_to_balanced.py
import json import os import sys import pickle from collections import defaultdict from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn import mixture from sklearn.metrics.pairwise import euclidean_distances num_clusters = 15 data = json.load(open("p3...
data-efficient-finetuning-main
scripts/make_tf_idf_clusters.py
from tqdm import tqdm from datasets import load_dataset import argparse import json parser = argparse.ArgumentParser() parser.add_argument("--p3_output_file", type=str, required=True) args = parser.parse_args() tasks = open('data/t0_prompt_tasks.txt', 'r') outputfile = open(args.p3_output_file, 'w') for task in tq...
data-efficient-finetuning-main
scripts/download_p3.py
import argparse from fileinput import filename import faiss import torch import gzip import json import tqdm from collections import defaultdict from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import sys import os sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from attribu...
data-efficient-finetuning-main
scripts/retrieve_training_data_rank.py
import json from tqdm import tqdm from collections import defaultdict import os cluster_data = json.load(open("./t0_cluster_data.json")) prompt_dataset_groups = defaultdict(list) for instances in cluster_data.values(): for instance in instances: prompt_dataset_groups[instance['dataset']].append(instance) ...
data-efficient-finetuning-main
scripts/infer_prompts.py
import json import sys import tqdm from collections import defaultdict full_data = json.load(open(sys.argv[1])) input_data = json.load(open(sys.argv[2])) output_file = sys.argv[3] special_dataset_prefixes = ["hellaswag", "winogrande", "super_glue_copa"] dataset_options_dict = defaultdict(set) instances_options_dict ...
data-efficient-finetuning-main
scripts/add_options.py
import sys import random from attribution.huggingface_readers import ( RTEReader, ANLIR1Reader, ANLIR2Reader, ANLIR3Reader, WiCReader, WSCReader, WinoGrandeReader, HellaSwagReader, COPAReader, CBReader, StoryClozeReader, CaseHOLDReader ) from attribution.drop_reader impo...
data-efficient-finetuning-main
scripts/dump_dataset_queries.py
import sys import os import json from tqdm import tqdm from attribution.ni_reader import NaturalInstructionsReader from natural_instructions.task_eval_splits import ALL_EVAL_SPLITS outfolder = sys.argv[1] reader = NaturalInstructionsReader( return_original_instance=True, split_name='test', add_task_defi...
data-efficient-finetuning-main
scripts/generate_natural_instructions_query_splits.py
import json import argparse parser = argparse.ArgumentParser() parser.add_argument('--predictions', type=str, required=True) parser.add_argument('--data_file', type=str, required=True) args = parser.parse_args() references = {} data = json.load(open(args.data_file)) for paper_data in data.values(): for qa_info in...
data-efficient-finetuning-main
scripts/evaluate_qasper_evidence_predictions.py
import json from tqdm import tqdm from collections import defaultdict prompts = json.load(open("p3_prompts.json")) cluster_data = json.load(open("./t0_cluster_data.json")) raw_instances = defaultdict(list) print("Grouping instances..") prompt_issues_with_datasets = defaultdict(list) for cluster_id, data in tqdm(cluste...
data-efficient-finetuning-main
scripts/group_instances_without_prompts.py
import sys # assumed format (from pyserini) # 0 Q0 41840491 10 51.878502 Anserini def parse_example(line): example = line.split(" ") # get the rank and the index rank = int(example[3]) index = example[2] return rank, index # filter the examples we keep def filter_examples(examples, max_rank): ...
data-efficient-finetuning-main
scripts/parse_pyserini_output.py
import json import sys stuff = {} file = open(sys.argv[1], 'r') for line in file: sample = json.loads(line) stuff[sample['query_id'][0]] = sample['answer'][0] with open('drop_preds.json', 'w') as f: json.dump(stuff, f)
data-efficient-finetuning-main
scripts/evaluate_unfair_tos_preds.py
import pickle from datasets import load_dataset import os import json from collections import defaultdict from tqdm import tqdm import sys cluster_indices_directory = "" # location of p3 cluster indices clusters_text = defaultdict(list) errors = [] for filename in tqdm(os.listdir(cluster_indices_directory)): full...
data-efficient-finetuning-main
scripts/show_clusters.py
import argparse import json import string from rouge_score import rouge_scorer from transformers import AutoTokenizer class GPTTokenizer: gpt_tokenizer = AutoTokenizer.from_pretrained("gpt2", max_length=1e5) def tokenize(self, s): tokens = self.gpt_tokenizer.tokenize(s) # GPT2 uses Byte-leve...
data-efficient-finetuning-main
natural_instructions/ni_evaluation.py
# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
data-efficient-finetuning-main
natural_instructions/ni_dataset.py
''' Splits of the evak tasks, as laid out in the NIV2 paper ''' ENTAILMENT = [ "task937", # "task202", # "task936", # "task641", # "task1344", # "task1615", # "task1385", # "task935", # "task199", # "task1388", # "task1554", # "task640", # "task534", # "task201", ...
data-efficient-finetuning-main
natural_instructions/task_eval_splits.py
import logging import random import string from dataclasses import dataclass from typing import Any, Optional, Union from transformers import PreTrainedTokenizerBase from transformers.utils import PaddingStrategy logger = logging.getLogger(__name__) @dataclass class DataCollatorForNI: tokenizer: PreTrainedToke...
data-efficient-finetuning-main
natural_instructions/ni_collator.py
import logging import uuid from pathlib import Path from typing import Generator import petname import pytest from beaker import exceptions from beaker.client import Beaker from beaker.data_model import * logger = logging.getLogger(__name__) def unique_name() -> str: return petname.generate() + "-" + str(uuid....
beaker-py-main
conftest.py
""" Tests creating, pushing, and pulling images to/from Beaker. This requires building a test image called "beaker-py-test" using the Dockerfile at "test_fixtures/docker/Dockerfile". """ from beaker import Beaker LOCAL_IMAGE_TAG = "beaker-py-test" def test_image_create_workflow( client: Beaker, beaker_image_na...
beaker-py-main
integration_tests/images_test.py
import pytest from beaker import ( Beaker, ExperimentConflict, ExperimentNotFound, ExperimentSpec, ImageSource, ResultSpec, TaskContext, TaskSpec, ) def test_experiment_workflow( client: Beaker, experiment_name: str, alternate_experiment_name: str, beaker_cluster_name:...
beaker-py-main
integration_tests/experiments_test.py
import time from beaker import Beaker, ExperimentSpec, TaskSpec def test_job_stop_and_finalize(client: Beaker, experiment_name: str, beaker_cluster_name: str): start = time.time() spec = ExperimentSpec().with_task( TaskSpec.new( "main", beaker_cluster_name, docker_...
beaker-py-main
integration_tests/jobs_test.py
import os import tempfile from pathlib import Path from typing import Optional import pytest from beaker.client import Beaker, DatasetClient from beaker.exceptions import DatasetWriteError class TestDataset: def setup_method(self): self.file_a = tempfile.NamedTemporaryFile(delete=False) self.fil...
beaker-py-main
integration_tests/datasets_test.py
from beaker import Beaker, Organization def test_cluster_get_on_prem(client: Beaker, beaker_on_prem_cluster_name: str): cluster = client.cluster.get(beaker_on_prem_cluster_name) assert cluster.autoscale is False assert cluster.is_cloud is False assert cluster.is_active is True assert cluster.node_...
beaker-py-main
tests/cluster_test.py
import pytest from beaker import Beaker, GroupConflict, GroupNotFound def test_group_methods( client: Beaker, group_name: str, alternate_group_name: str, hello_world_experiment_id: str ): # Create a new group. group = client.group.create(group_name) assert group.name == group_name # Add an exper...
beaker-py-main
tests/group_test.py
import pytest from beaker import ( Beaker, ClusterNotFound, CurrentJobStatus, DataMount, DatasetNotFound, DataSource, ExperimentSpec, ImageNotFound, ImageSource, ResultSpec, SecretNotFound, TaskContext, TaskNotFound, TaskSpec, ) def test_experiment_get(client: ...
beaker-py-main
tests/experiment_test.py
from typing import Optional, Union import pytest from beaker import ( Account, Beaker, Permission, Workspace, WorkspaceNotFound, WorkspaceWriteError, ) def test_ensure_workspace_invalid_name(client: Beaker): with pytest.raises(ValueError, match="Invalid name"): client.workspace.e...
beaker-py-main
tests/workspace_test.py
from pathlib import Path import pytest from beaker.data_model import * from beaker.data_model.base import MappedSequence from beaker.exceptions import ValidationError def test_data_source_validation(): with pytest.raises(ValidationError, match="Exactly one"): DataSource() with pytest.raises(Validat...
beaker-py-main
tests/data_model_test.py
beaker-py-main
tests/__init__.py
import base64 import time import pytest from beaker.client import Beaker from beaker.services.service_client import ServiceClient from beaker.util import * @pytest.mark.parametrize( "camel_case, snake_case", [ ("hostPath", "host_path"), ("fooBarBaz", "foo_bar_baz"), ("docker", "docke...
beaker-py-main
tests/util_test.py
from beaker.client import Beaker def test_dataset_get(client: Beaker, squad_dataset_name: str): dataset = client.dataset.get(squad_dataset_name) assert dataset.name is not None # Try with ID. client.dataset.get(dataset.id) # Try with just name (without account prefix). client.dataset.get(datas...
beaker-py-main
tests/dataset_test.py
from beaker import Beaker def test_secrets(client: Beaker, secret_name: str): secret = client.secret.write(secret_name, "foo") assert secret.name == secret_name assert client.secret.get(secret_name) == secret assert client.secret.read(secret) == "foo"
beaker-py-main
tests/secret_test.py
from beaker import Beaker def test_organization_get(client: Beaker, beaker_org_name: str): org = client.organization.get(beaker_org_name) assert org.name == beaker_org_name # Now get by ID. client.organization.get(org.id) def test_organization_list_members(client: Beaker, beaker_org_name: str): ...
beaker-py-main
tests/organization_test.py
from beaker import Beaker def test_image_get(client: Beaker, hello_world_image_name: str): # Get by full name. image = client.image.get(hello_world_image_name) # Get by ID. client.image.get(image.id) # Get by name. assert image.name is not None client.image.get(image.name) def test_image...
beaker-py-main
tests/image_test.py
from beaker import Beaker, CurrentJobStatus, JobKind def test_job_get(client: Beaker, hello_world_job_id: str): job = client.job.get(hello_world_job_id) assert job.id == hello_world_job_id assert job.status.current == CurrentJobStatus.finalized assert job.kind == JobKind.execution assert job.to_js...
beaker-py-main
tests/job_test.py
from beaker.client import Beaker def test_whoami(client: Beaker): client.account.whoami() def test_name(client: Beaker): assert isinstance(client.account.name, str) def test_list_organizations(client: Beaker): client.account.list_organizations()
beaker-py-main
tests/account_test.py
import pytest import yaml from beaker import Beaker from beaker.config import Config def test_str_method(client: Beaker): assert "user_token=***" in str(client.config) assert client.config.user_token not in str(client.config) def test_config_from_path_unknown_field(tmp_path): path = tmp_path / "config....
beaker-py-main
tests/config_test.py
import pytest from flaky import flaky from beaker import Beaker @flaky # this can fail if the request to GitHub fails def test_warn_for_newer_version(monkeypatch): import beaker.client import beaker.version monkeypatch.setattr(Beaker, "CLIENT_VERSION", "0.1.0") monkeypatch.setattr(beaker.client, "_...
beaker-py-main
tests/client_test.py
from beaker import Beaker def test_node_get(client: Beaker, beaker_node_id: str): gpu_count = client.node.get(beaker_node_id).limits.gpu_count assert gpu_count is not None assert gpu_count > 0
beaker-py-main
tests/node_test.py
import pytest @pytest.fixture(autouse=True) def doctest_fixtures( doctest_namespace, client, workspace_name, docker_image_name, beaker_image_name, beaker_cluster_name, beaker_on_prem_cluster_name, experiment_name, dataset_name, download_path, beaker_org_name, beaker_nod...
beaker-py-main
beaker/conftest.py
import logging import os import warnings from dataclasses import asdict, dataclass, fields from pathlib import Path from typing import ClassVar, Optional, Set import yaml from .exceptions import ConfigurationError DEFAULT_CONFIG_LOCATION: Optional[Path] = None try: DEFAULT_CONFIG_LOCATION = Path.home() / ".beake...
beaker-py-main
beaker/config.py
_MAJOR = "1" _MINOR = "21" _PATCH = "0" _SUFFIX = "" VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR) VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
beaker-py-main
beaker/version.py
import base64 import re import time import warnings from collections import OrderedDict from datetime import datetime, timedelta from functools import wraps from pathlib import Path from typing import Any, Callable, Optional, Set, Tuple, Type, TypeVar, Union from .aliases import PathOrStr from .exceptions import Reque...
beaker-py-main
beaker/util.py
import logging import os from contextlib import contextmanager from typing import Generator, Optional, Tuple, Union import docker import requests from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry from .config import Config from .data_model import * from .exceptions import * from .services...
beaker-py-main
beaker/client.py
""" Initialize a :class:`Beaker client <beaker.Beaker>` with :meth:`Beaker.from_env()`: >>> from beaker import * >>> beaker = Beaker.from_env(default_workspace=workspace_name) Accounts -------- Manage your Beaker account with :data:`Beaker.account`. For example, you can check who you are logged in as with :meth:`Be...
beaker-py-main
beaker/__init__.py
import os from typing import Union PathOrStr = Union[os.PathLike, str]
beaker-py-main
beaker/aliases.py
""" Exceptions that can be raised by the :class:`~beaker.Beaker` client. .. tip:: All exceptions inherit from :class:`BeakerError` other than :exc:`HTTPError`, which is re-exported from :exc:`requests.exceptions.HTTPError`, and :exc:`ValidationError`, which is re-exported from `pydantic <https://pydantic-d...
beaker-py-main
beaker/exceptions.py
import io import time from typing import List, Optional, Tuple, Union from rich.console import Console from rich.live import Live from rich.panel import Panel from rich.progress import ( BarColumn, DownloadColumn, FileSizeColumn, MofNCompleteColumn, Progress, ProgressColumn, SpinnerColumn, ...
beaker-py-main
beaker/progress.py
from datetime import datetime from typing import Optional from .account import Account from .base import BaseModel, StrEnum __all__ = ["Organization", "OrganizationRole", "OrganizationMember"] class Organization(BaseModel): id: str name: str description: str created: datetime display_name: str ...
beaker-py-main
beaker/data_model/organization.py
from datetime import datetime from typing import Dict, List, Optional, Tuple from pydantic import Field from .account import Account from .base import BaseModel, IntEnum, StrEnum, field_validator from .experiment_spec import ( DataMount, EnvVar, ImageSource, Priority, ResultSpec, TaskSpec, ) ...
beaker-py-main
beaker/data_model/job.py
from typing import Any, Dict, List, Optional, Union from pydantic import Field from ..aliases import PathOrStr from ..exceptions import * from .base import BaseModel, StrEnum, field_validator, model_validator __all__ = [ "ImageSource", "EnvVar", "DataSource", "DataMount", "ResultSpec", "TaskR...
beaker-py-main
beaker/data_model/experiment_spec.py
from .account import * from .cluster import * from .dataset import * from .experiment import * from .experiment_spec import * from .group import * from .image import * from .job import * from .node import * from .organization import * from .secret import * from .workspace import *
beaker-py-main
beaker/data_model/__init__.py
from typing import Any, ClassVar, Dict, Set, Type from pydantic import BaseModel as _BaseModel from pydantic import ConfigDict, model_validator from ..util import issue_data_model_warning, to_snake_case class BaseModelV2(_BaseModel): """ The Pydantic v2 base class for a Beaker data models. """ mode...
beaker-py-main
beaker/data_model/_base_v2.py
from datetime import datetime from typing import Optional, Tuple, Union from urllib.parse import urlparse from .account import Account from .base import BaseModel, BasePage, StrEnum, field_validator from .workspace import WorkspaceRef __all__ = [ "DatasetStorage", "DatasetSize", "Dataset", "DatasetInf...
beaker-py-main
beaker/data_model/dataset.py
from typing import Any, ClassVar, Dict, Optional, Set, Type from pydantic import BaseModel as _BaseModel from pydantic import root_validator, validator from ..util import issue_data_model_warning, to_snake_case def field_validator(*fields: str, mode: str = "after"): return validator(*fields, pre=mode == "before...
beaker-py-main
beaker/data_model/_base_v1.py
from datetime import datetime from typing import List, Optional, Tuple from pydantic import Field from .account import Account from .base import BaseModel, BasePage, MappedSequence, StrEnum from .job import Job from .workspace import WorkspaceRef __all__ = ["Experiment", "Task", "Tasks", "ExperimentsPage", "Experime...
beaker-py-main
beaker/data_model/experiment.py
from datetime import datetime from .base import BaseModel __all__ = ["Secret"] class Secret(BaseModel): name: str created: datetime updated: datetime
beaker-py-main
beaker/data_model/secret.py
from datetime import datetime from typing import Optional, Tuple from .base import BaseModel, StrEnum, field_validator from .node import NodeResources, NodeUtilization __all__ = ["ClusterStatus", "Cluster", "ClusterUtilization", "ClusterSpec", "ClusterPatch"] class ClusterStatus(StrEnum): """ Current status...
beaker-py-main
beaker/data_model/cluster.py
from datetime import datetime from typing import List, Optional, Tuple from .account import Account from .base import BaseModel, BasePage, StrEnum from .workspace import WorkspaceRef __all__ = [ "Group", "GroupSpec", "GroupParameterType", "GroupParameter", "GroupPatch", "GroupsPage", "Grou...
beaker-py-main
beaker/data_model/group.py
from datetime import datetime from typing import Optional from .base import BaseModel __all__ = ["NodeResources", "Node", "NodeUtilization"] class NodeResources(BaseModel): cpu_count: Optional[float] = None memory: Optional[str] = None gpu_count: Optional[int] = None gpu_type: Optional[str] = None ...
beaker-py-main
beaker/data_model/node.py
from datetime import datetime from typing import Dict, List, Optional, Tuple from .account import Account from .base import BaseModel, BasePage, StrEnum __all__ = [ "WorkspaceSize", "Workspace", "WorkspaceRef", "WorkspacePage", "WorkspaceSpec", "WorkspaceTransferSpec", "Permission", "W...
beaker-py-main
beaker/data_model/workspace.py
from datetime import datetime from typing import Optional, Tuple from .account import Account from .base import BaseModel, BasePage, StrEnum, field_validator from .workspace import WorkspaceRef __all__ = [ "Image", "ImagesPage", "ImageRepoAuth", "ImageRepo", "DockerLayerProgress", "DockerLayer...
beaker-py-main
beaker/data_model/image.py
import logging from enum import Enum from typing import ( Any, Dict, Generic, Iterator, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from pydantic import ValidationError from ..util import to_lower_camel, to_snake_case try: from pydantic import field_val...
beaker-py-main
beaker/data_model/base.py
from typing import Optional from .base import BaseModel __all__ = ["Account"] class Account(BaseModel): id: str name: str display_name: str institution: Optional[str] = None pronouns: Optional[str] = None email: Optional[str] = None
beaker-py-main
beaker/data_model/account.py
from typing import List, Optional, Union from ..data_model import * from ..exceptions import * from .service_client import ServiceClient class OrganizationClient(ServiceClient): """ Accessed via :data:`Beaker.organization <beaker.Beaker.organization>`. """ def get(self, org: Optional[str] = None) ->...
beaker-py-main
beaker/services/organization.py
import time from collections import defaultdict from datetime import datetime, timedelta from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Set, Union from ..data_model import * from ..exceptions import * from .service_client import ServiceClient if TYPE_CHECKING: from rich.progress import Pr...
beaker-py-main
beaker/services/job.py
from .account import AccountClient from .cluster import ClusterClient from .dataset import DatasetClient from .experiment import ExperimentClient from .group import GroupClient from .image import ImageClient from .job import JobClient from .node import NodeClient from .organization import OrganizationClient from .secre...
beaker-py-main
beaker/services/__init__.py
import io import os import urllib.parse from datetime import datetime from pathlib import Path from typing import ( TYPE_CHECKING, ClassVar, Dict, Generator, List, Optional, Tuple, Union, ) from ..aliases import PathOrStr from ..data_model import * from ..exceptions import * from ..util...
beaker-py-main
beaker/services/dataset.py
import time from datetime import datetime, timedelta from typing import ( TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Set, Union, ) from ..aliases import PathOrStr from ..data_model import * from ..exceptions import * from .service_client import ServiceClient if ...
beaker-py-main
beaker/services/experiment.py
from typing import Optional, Union from ..data_model import * from ..exceptions import * from .service_client import ServiceClient class SecretClient(ServiceClient): """ Accessed via :data:`Beaker.secret <beaker.Beaker.secret>`. """ def get(self, secret: str, workspace: Optional[Union[str, Workspace...
beaker-py-main
beaker/services/secret.py
from typing import Dict, List, Optional, Union from ..data_model import * from ..exceptions import * from .service_client import ServiceClient class ClusterClient(ServiceClient): """ Accessed via :data:`Beaker.cluster <beaker.Beaker.cluster>`. """ def get(self, cluster: str) -> Cluster: """ ...
beaker-py-main
beaker/services/cluster.py
from typing import Generator, List, Optional, Union from ..data_model import * from ..exceptions import * from .service_client import ServiceClient class GroupClient(ServiceClient): """ Accessed via :data:`Beaker.group <beaker.Beaker.group>`. """ def get(self, group: str) -> Group: """ ...
beaker-py-main
beaker/services/group.py
from ..data_model import * from ..exceptions import * from .service_client import ServiceClient class NodeClient(ServiceClient): """ Accessed via :data:`Beaker.node <beaker.Beaker.node>`. """ def get(self, node_id: str) -> Node: """ Get information about a node. :param node_i...
beaker-py-main
beaker/services/node.py
from collections import defaultdict from datetime import datetime from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union from ..data_model import * from ..data_model.base import BasePage from ..exceptions import * from ..util import format_cursor from .service_client import ServiceClient T = Ty...
beaker-py-main
beaker/services/workspace.py
from typing import TYPE_CHECKING, Dict, Optional, Union, cast from docker.models.images import Image as DockerImage from ..data_model import * from ..exceptions import * from .service_client import ServiceClient if TYPE_CHECKING: from rich.progress import TaskID class ImageClient(ServiceClient): """ Ac...
beaker-py-main
beaker/services/image.py
import io import json import logging import urllib.parse from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union import docker import requests from ..config import Config from ..data_model import * from ..data_model.base import BaseModel from ..exceptions import * from ..util import retriable if TYPE_CHE...
beaker-py-main
beaker/services/service_client.py
from typing import List from ..data_model import * from ..exceptions import * from ..util import cached_property from .service_client import ServiceClient class AccountClient(ServiceClient): """ Accessed via :data:`Beaker.account <beaker.Beaker.account>`. """ @cached_property(ttl=3 * 60) def nam...
beaker-py-main
beaker/services/account.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html import logging import os import sys from datetime import datetime # -- Path setup ----...
beaker-py-main
docs/source/conf.py
""" This script will upload an image to Beaker and then submit a bunch of experiments with different inputs. It will wait for all experiments to finish and then collect the results. See the output of 'python run.py --help' for usage. """ import argparse import uuid import petname from rich import print, progress, ta...
beaker-py-main
examples/sweep/run.py
""" This is the script that will run on Beaker as the Docker image's "entrypoint". All it does is write out a simple JSON file with a random number in it to the experiment's result directory. This is just meant to simulate the results of a training/evaluation pipeline. """ import json import random import sys # NOTE...
beaker-py-main
examples/sweep/entrypoint.py
from datetime import datetime from pathlib import Path from beaker.version import VERSION def main(): changelog = Path("CHANGELOG.md") with changelog.open() as f: lines = f.readlines() insert_index: int = -1 for i in range(len(lines)): line = lines[i] if line.startswith("## ...
beaker-py-main
scripts/prepare_changelog.py
# encoding: utf-8 """ Prepares markdown release notes for GitHub releases. """ import os from typing import List TAG = os.environ["TAG"] ADDED_HEADER = "### Added 🎉" CHANGED_HEADER = "### Changed ⚠️" FIXED_HEADER = "### Fixed ✅" REMOVED_HEADER = "### Removed 👋" def get_change_log_notes() -> str: in_current_...
beaker-py-main
scripts/release_notes.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from setuptools import setup # type: ignore setup( name="cc_net", version="1.0.0", pa...
cc_net-main
setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Main script to download a CC dump, remove duplicates, split by language and filter the documents. The pipeline parameters are described...
cc_net-main
cc_net/mine.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Creates mono-lingual corpus from Wikipedia. """ import functools import re import subprocess import urllib.request from pathlib import ...
cc_net-main
cc_net/get_wiki_cirrus.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Manipulate files containing one json per line. """ import argparse import collections import contextlib import functools import glob imp...
cc_net-main
cc_net/jsonql.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import functools import itertools import logging import os import sys import time import warnings from pathlib import Path from typing impor...
cc_net-main
cc_net/execution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import sys import time import warnings from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type import numpy as np HASH_TYPE: T...
cc_net-main
cc_net/flat_hash_set.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import base64 import hashlib import itertools import urllib.parse from pathlib import Path from typing import Dict, Iterable, List, Optional...
cc_net-main
cc_net/minify.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import re import unicodedata UNICODE_PUNCT = { ",": ",", "。": ".", "、": ",", "„": '"', "”": '"', "“": '"', "«":...
cc_net-main
cc_net/text_normalizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import logging import subprocess from pathlib import Path from typing import List import func_argparse import numpy as np from cc_net impo...
cc_net-main
cc_net/regroup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import argparse import time from pathlib import Path from typing import Dict, List, Optional, Sequence, Tuple, Union import kenlm # type: ...
cc_net-main
cc_net/perplexity.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #
cc_net-main
cc_net/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import time from typing import Dict, Optional import sacremoses # type: ignore from cc_net import jsonql, text_normalizer class RobustT...
cc_net-main
cc_net/tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Tools to remove duplicate paragraphs across one or several shards. """ import argparse import gc import hashlib import logging import m...
cc_net-main
cc_net/dedup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import contextlib import functools import logging import re import tempfile import time import urllib.request from pathlib import Path from ...
cc_net-main
cc_net/process_wet_file.py