id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
179,472 | from transformers import pipeline, set_seed
import random
import re
text_pipe = pipeline('text-generation', model='succinctly/text2image-prompt-generator')
def text_generate(input):
seed = random.randint(100, 1000000)
set_seed(seed)
for count in range(6):
sequences = text_pipe(input, max_lengt... | null |
179,473 | from clip_interrogator import Config, Interrogator
import torch
ci = Interrogator(config)
import requests
import shutil
from PIL import Image
def get_prompt_from_image(image):
return ci.interrogate(image.convert('RGB')) | null |
179,474 | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-zh-en").eval()
tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-zh-en")
def translate(text):
with torch.no_grad():
encoded = tokenizer([text], return... | null |
179,475 | from nltk.stem import WordNetLemmatizer
TAG_OVERRIDES = {
"flashbang": "stun-grenade",
"flashbangs": "stun-grenade",
"taze": "tase",
"tazes": "tase",
"tazer": "taser",
"tazers": "taser",
"kneck": "neck",
"knee-on-kneck": "knee-on-neck",
"bicycle": "bike",
"beanbag": "bean-bag",
... | null |
179,476 | import random
import string
import data_builder
from text_formatter import COMMON_MISSPELLINGS, fix_common_misspellings, format_tags, read_tag_file, TAG_OVERRIDES, WNL
def gen_md_from_rows(state, rows, all_tags):
city = ""
lines = []
for row in rows:
if row["city"] and row["city"] != city:
... | null |
179,477 | import random
import string
import data_builder
from text_formatter import COMMON_MISSPELLINGS, fix_common_misspellings, format_tags, read_tag_file, TAG_OVERRIDES, WNL
def validate_ids_unique(data):
seen = set()
for row in data:
row_id = row["id"]
if row_id in seen:
print(row)
... | null |
179,478 | import random
import string
import data_builder
from text_formatter import COMMON_MISSPELLINGS, fix_common_misspellings, format_tags, read_tag_file, TAG_OVERRIDES, WNL
def gen_id(row):
state = row["state"]
state_abbrev = us_state_to_abbrev[state].lower()
city = row["city"]
city_abbrev = city.replace(" "... | null |
179,479 | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
md_header = f"""
GENERATED FILE, PLEASE MAKE EDITS ON MASTER AT https://github.com/2020PB/police-brutality/
UPDATED AT: {updated_at}
"""
md_out_format = """
# {location}
{text}
""... | null |
179,480 | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
def to_csv_file_v1(data, target_path):
max_link_count = max(len(it["links"]) for it in data)
flat_data = []
for row in data:
# just write it but instead of a ... | null |
179,481 | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
updated_at = datetime.now(timezone.utc).isoformat()
def to_json_file_v1(data, target_path):
data_with_meta = {
"edit_at": "https://github.com/2020PB/police-brutality"... | null |
179,482 | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
updated_at = datetime.now(timezone.utc).isoformat()
def v2_only(item):
item = copy.deepcopy(item)
item["links"] = item["links_v2"]
del item["links_v2"]
return item... | null |
179,483 | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
readme_text = """
# /r/2020PoliceBrutality/ dataset
This repository exists to accumulate and contextualize evidence of police brutality during the 2020 George Floyd protests.
Our ... | null |
179,484 | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
md_dir = os.path.join(src_dir, "..", "reports")
def read_all_md_files(base_dir):
def process_md_texts(md_texts):
def read_all_data():
md_texts = read_all_md_files(md_dir)
... | null |
179,485 | import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
def v1_only(item):
# Deepcopy to avoid affecting the original data
item = copy.deepcopy(item)
v1_keys = set(["links", "state", "city", "edit_at", "name", "date", "dat... | null |
179,486 | import itertools
import logging
import os
import random
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
def average_by_duration(x, durs):
def tf_average_by_duration(x, durs):
outs = tf.numpy_function(average... | null |
179,487 | import argparse
import logging
import os
from numba import jit
import sys
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import yaml
from tqdm import tqdm
from examples.tacotron2.tacotron_dataset import CharactorMelDataset
from tensorflow_tts.configs import Tacotron2Config
from tensorflow_tt... | null |
179,488 | import os
import shutil
from tqdm import tqdm
import argparse
from scipy.ndimage import zoom
from skimage.data import camera
import numpy as np
from scipy.spatial.distance import cdist
from pathlib import Path
def safemkdir(dirn):
if not os.path.isdir(dirn):
os.mkdir(dirn) | null |
179,489 | import os
import shutil
from tqdm import tqdm
import argparse
from scipy.ndimage import zoom
from skimage.data import camera
import numpy as np
from scipy.spatial.distance import cdist
from pathlib import Path
def duration_to_alignment(in_duration):
total_len = np.sum(in_duration)
num_chars = len(in_duration)
... | null |
179,490 | import os
import shutil
from tqdm import tqdm
import argparse
from scipy.ndimage import zoom
from skimage.data import camera
import numpy as np
from scipy.spatial.distance import cdist
from pathlib import Path
def rescale_alignment(in_alignment, in_targcharlen):
current_x = in_alignment.shape[0]
x_ratio = in_t... | null |
179,491 | import os
import shutil
from tqdm import tqdm
import argparse
from scipy.ndimage import zoom
from skimage.data import camera
import numpy as np
from scipy.spatial.distance import cdist
from pathlib import Path
def gather_dist(in_mtr, in_points):
# initialize with known size for fast
full_coords = [(0, 0) for x ... | null |
179,492 | import os
import shutil
from tqdm import tqdm
import argparse
from scipy.ndimage import zoom
from skimage.data import camera
import numpy as np
from scipy.spatial.distance import cdist
from pathlib import Path
def get_pivot_points(in_att):
ret_points = []
for x in range(0, in_att.shape[0]):
for y in ra... | null |
179,494 | import tensorflow as tf
import sys
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
import tensorflow_tts
import tensorflow_tts.configs.melgan as MELGAN_CONFIG
from examples.melgan.audio_mel_dataset import AudioMelDataset
from tensorflow_tts.losses imp... | Initialize collater (mapping function) for Tensorflow Audio-Mel Dataset. Args: batch_max_steps (int): The maximum length of input signal in batch. hop_size (int): Hop size of auxiliary features. |
179,495 | import os
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
def average_by_duration(x, durs):
mel_len = durs.sum()
durs_cum = np.cumsum(np.pad(durs, (1, 0)))
# calculate charactor f0/energy
x_char =... | null |
179,496 | from subprocess import call
from pathlib import Path
import click
import os
def run_mfa(
mfa_path: str,
corpus_directory: str,
lexicon: str,
acoustic_model_path: str,
output_directory: str,
jobs: str,
):
Path(output_directory).mkdir(parents=True, exist_ok=True)
call(
[
... | null |
179,497 | import numpy as np
import os
from tqdm import tqdm
import click
import logging
import sys
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
def fix(base_path: str, dur_path: str, trimmed_dur_path: str, use_norm: str):
... | null |
179,498 | import os
import re
from typing import Dict, List, Union, Tuple, Any
import librosa
import numpy as np
import soundfile as sf
from dataclasses import dataclass, field
from pypinyin import Style
from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin
from pypinyin.converter import DefaultConverter
from pypinyin.... | null |
179,499 | import argparse
import glob
import logging
import os
import yaml
import librosa
import numpy as np
import pyworld as pw
from functools import partial
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from tensorfl... | Run preprocessing process and compute statistics for normalizing. |
179,500 | import argparse
import glob
import logging
import os
import yaml
import librosa
import numpy as np
import pyworld as pw
from functools import partial
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from tensorfl... | Normalize mel spectrogram with pre-computed statistics. |
179,501 | import argparse
import glob
import logging
import os
import yaml
import librosa
import numpy as np
import pyworld as pw
from functools import partial
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from tensorfl... | Compute mean / std statistics of some features for later normalization. |
179,502 | import numpy as np
import tensorflow as tf
from scipy.signal import kaiser
from tensorflow_tts.models import BaseModel
from tensorflow_tts.models import TFMelGANGenerator
The provided code snippet includes necessary dependencies for implementing the `design_prototype_filter` function. Write a Python function `def desi... | Design prototype filter for PQMF. This method is based on `A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`_. Args: taps (int): The number of filter taps. cutoff_ratio (float): Cut-off frequency ratio. beta (float): Beta coefficient for kaiser window. Returns: ndarray: Implu... |
179,503 | import tensorflow as tf
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `get_initializer` function. Write a Python function `def get_initializer(initializer_seed=42)` to solve the following problem:
Creates a `tf.initializers.he_normal` with th... | Creates a `tf.initializers.he_normal` with the given seed. Args: initializer_seed: int, initializer seed. Returns: HeNormal initializer with seed = `initializer_seed`. |
179,504 | import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
from tensorflow_tts.utils import GroupConv1D, WeightNormalization
The provided code snippet includes necessary dependencies for implementing the `get_initializer` function. Write a Python function `def get_initializer(initializer_se... | Creates a `tf.initializers.glorot_normal` with the given seed. Args: initializer_seed: int, initializer seed. Returns: GlorotNormal initializer with seed = `initializer_seed`. |
179,505 | import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `get_initializer` function. Write a Python function `def get_initializer(initializer_range=0.02)` to solve the following problem:
Creates a `tf.initializ... | Creates a `tf.initializers.truncated_normal` with the given range. Args: initializer_range: float, initializer range for stddev. Returns: TruncatedNormal initializer with stddev = `initializer_range`. |
179,506 | import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Gaussian Error Linear unit.
Here is the function:
def gelu(x):
... | Gaussian Error Linear unit. |
179,507 | import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `gelu_new` function. Write a Python function `def gelu_new(x)` to solve the following problem:
Smoother gaussian Error Linear Unit.
Here is the function... | Smoother gaussian Error Linear Unit. |
179,508 | import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `swish` function. Write a Python function `def swish(x)` to solve the following problem:
Swish activation function.
Here is the function:
def swish(x):... | Swish activation function. |
179,509 | import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x)) | null |
179,510 | import collections
import numpy as np
import tensorflow as tf
from tensorflow_addons.seq2seq import BahdanauAttention, Decoder, Sampler
from tensorflow_tts.utils import dynamic_decode
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `get_initial... | Creates a `tf.initializers.truncated_normal` with the given range. Args: initializer_range: float, initializer range for stddev. Returns: TruncatedNormal initializer with stddev = `initializer_range`. |
179,511 | import collections
import numpy as np
import tensorflow as tf
from tensorflow_addons.seq2seq import BahdanauAttention, Decoder, Sampler
from tensorflow_tts.utils import dynamic_decode
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `gelu` funct... | Gaussian Error Linear unit. |
179,512 | import collections
import numpy as np
import tensorflow as tf
from tensorflow_addons.seq2seq import BahdanauAttention, Decoder, Sampler
from tensorflow_tts.utils import dynamic_decode
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `gelu_new` f... | Smoother gaussian Error Linear Unit. |
179,513 | import collections
import numpy as np
import tensorflow as tf
from tensorflow_addons.seq2seq import BahdanauAttention, Decoder, Sampler
from tensorflow_tts.utils import dynamic_decode
from tensorflow_tts.models import BaseModel
The provided code snippet includes necessary dependencies for implementing the `swish` func... | Swish activation function. |
179,514 | import collections
import numpy as np
import tensorflow as tf
from tensorflow_addons.seq2seq import BahdanauAttention, Decoder, Sampler
from tensorflow_tts.utils import dynamic_decode
from tensorflow_tts.models import BaseModel
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x)) | null |
179,515 | import fnmatch
import os
import re
import tempfile
from pathlib import Path
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `find_files` function. Write a Python function `def find_files(root_dir, query="*.wav", include_root_dir=True)` to solve the following probl... | Find files recursively. Args: root_dir (str): Root root_dir to find. query (str): Query to find. include_root_dir (bool): If False, root_dir name is not included. Returns: list: List of found filenames. |
179,516 | import fnmatch
import os
import re
import tempfile
from pathlib import Path
import tensorflow as tf
def _path_requires_gfile(filepath):
"""Checks if the given path requires use of GFile API.
Args:
filepath (str): Path to check.
Returns:
bool: True if the given path needs GFile API to access,... | Save model weights. Same as model.save_weights(filepath), but supports saving to S3 or GCS buckets using TensorFlow GFile API. Args: model (tf.keras.Model): Model to save. filepath (str): Path to save the model weights to. |
179,517 | import fnmatch
import os
import re
import tempfile
from pathlib import Path
import tensorflow as tf
def _path_requires_gfile(filepath):
"""Checks if the given path requires use of GFile API.
Args:
filepath (str): Path to check.
Returns:
bool: True if the given path needs GFile API to access,... | Load model weights. Same as model.load_weights(filepath), but supports loading from S3 or GCS buckets using TensorFlow GFile API. Args: model (tf.keras.Model): Model to load weights to. filepath (str): Path to the weights file. |
179,518 | import re
from tensorflow_tts.utils.korean import tokenize as ko_tokenize
from tensorflow_tts.utils.number_norm import normalize_numbers
from unidecode import unidecode
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text)
The provided code snippet inc... | Basic pipeline that lowercases and collapses whitespace without transliteration. |
179,519 | import re
from tensorflow_tts.utils.korean import tokenize as ko_tokenize
from tensorflow_tts.utils.number_norm import normalize_numbers
from unidecode import unidecode
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text)
def convert_to_ascii(text):
... | Pipeline for non-English text that transliterates to ASCII. |
179,520 | import re
from tensorflow_tts.utils.korean import tokenize as ko_tokenize
from tensorflow_tts.utils.number_norm import normalize_numbers
from unidecode import unidecode
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expan... | Pipeline for English text, including number and abbreviation expansion. |
179,521 | import re
from tensorflow_tts.utils.korean import tokenize as ko_tokenize
from tensorflow_tts.utils.number_norm import normalize_numbers
from unidecode import unidecode
The provided code snippet includes necessary dependencies for implementing the `korean_cleaners` function. Write a Python function `def korean_cleaner... | Pipeline for Korean text, including number and abbreviation expansion. |
179,522 | import re
from tensorflow_tts.utils.korean import tokenize as ko_tokenize
from tensorflow_tts.utils.number_norm import normalize_numbers
from unidecode import unidecode
The provided code snippet includes necessary dependencies for implementing the `german_cleaners` function. Write a Python function `def german_cleaner... | Pipeline for German text, including number and abbreviation expansion. |
179,523 | import numpy as np
def is_outlier(x, p25, p75):
"""Check if value is an outlier."""
lower = p25 - 1.5 * (p75 - p25)
upper = p75 + 1.5 * (p75 - p25)
return x <= lower or x >= upper
The provided code snippet includes necessary dependencies for implementing the `remove_outlier` function. Write a Python fu... | Remove outlier from x. |
179,524 | import os
import librosa
import numpy as np
import soundfile as sf
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
The provided code snippet includes necessary dependencies for implementing the `griffin_lim_lb` function. Write a Python function `def griffin_lim_lb( mel_spec, stats_path, da... | Generate wave from mel spectrogram with Griffin-Lim algorithm using Librosa. Args: mel_spec (ndarray): array representing the mel spectrogram. stats_path (str): path to the `stats.npy` file containing norm statistics. dataset_config (Dict): dataset configuration parameters. n_iter (int): number of iterations for GL. ou... |
179,525 | import tensorflow as tf
def return_strategy():
physical_devices = tf.config.list_physical_devices("GPU")
if len(physical_devices) == 0:
return tf.distribute.OneDeviceStrategy(device="/cpu:0")
elif len(physical_devices) == 1:
return tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
... | null |
179,526 | import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `calculate_3d_loss` function. Write a Python function `def calculate_3d_loss(y_gt, y_pred, loss_fn)` to solve the following problem:
Calculate 3d loss, normally it's mel-spectrogram loss.
Here is the function:
def ... | Calculate 3d loss, normally it's mel-spectrogram loss. |
179,527 | import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `calculate_2d_loss` function. Write a Python function `def calculate_2d_loss(y_gt, y_pred, loss_fn)` to solve the following problem:
Calculate 2d loss, normally it's durrations/f0s/energys loss.
Here is the function... | Calculate 2d loss, normally it's durrations/f0s/energys loss. |
179,528 | from typing import Any, Optional, Tuple, Union
import tensorflow as tf
from tensorflow.python.ops import control_flow_util
from tensorflow_addons.seq2seq import Decoder
from tensorflow_addons.seq2seq.decoder import (
BaseDecoder,
_prepend_batch,
_transpose_batch_time,
)
from tensorflow_addons.utils.types im... | Perform dynamic decoding with `decoder`. Calls initialize() once and step() repeatedly on the Decoder object. Args: decoder: A `Decoder` instance. output_time_major: Python boolean. Default: `False` (batch major). If `True`, outputs are returned as time major tensors (this mode is faster). Otherwise, outputs are return... |
179,529 | # import ast
import json
import os
import re
from jamo import h2j, hangul_to_jamo, j2h, jamo_to_hcj
get_mode(char):
if is_lead(char):
return 0
elif is_vowel(char):
return 1
elif is_tail(char):
return 2
else:
return -1
def _get_text_from_candidates(candidates):
if le... | null |
179,530 | # import ast
import json
import os
import re
from jamo import h2j, hangul_to_jamo, j2h, jamo_to_hcj
def compare_sentence_with_jamo(text1, text2):
return h2j(text1) != h2j(text2) | null |
179,531 | # import ast
import json
import os
import re
from jamo import h2j, hangul_to_jamo, j2h, jamo_to_hcj
def tokenize(text, as_id=False):
# jamo package에 있는 hangul_to_jamo를 이용하여 한글 string을 초성/중성/종성으로 나눈다.
text = normalize(text)
tokens = list(
hangul_to_jamo(text)
) # '존경하는' --> ['ᄌ', 'ᅩ', 'ᆫ', 'ᄀ'... | null |
179,532 | import copy
import random
from dataclasses import dataclass, field
from typing import Optional, Dict, Sequence
import torch
import torch.distributed
import transformers
from transformers import Trainer
from datasets import load_dataset
class ModelArguments:
model_name_or_path: Optional[str] = field(default="deepsee... | null |
179,533 | import os
from threading import Thread
from typing import Iterator
import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
with gr.Blocks(css="style.css") as demo:
gr.M... | null |
179,534 | import argparse
import json
import os
import torch
import re
from pathlib import Path
from tqdm import tqdm
data_abs_dir = Path(__file__).parent / "data"
from transformers import AutoTokenizer, AutoModelForCausalLM
from human_eval.evaluation import evaluate_functional_correctness
def read_test_examples(data_path: str):... | null |
179,535 | import fire
import sys
from .data import HUMAN_EVAL
from .evaluation import evaluate_functional_correctness
def evaluate_functional_correctness(
input_file: str = None,
tmp_dir: str = "./",
n_workers: int = 32,
timeout: float = 10.0,
problem_file: str = "../data/humaneval_python... | Evaluates the functional correctness of generated samples, and writes results to f"{sample_file}_results.jsonl.gz" |
179,536 | from typing import Iterable, Dict
import gzip
import json
import os
HUMAN_EVAL = os.path.join(ROOT, "..", "data", "HumanEval.jsonl.gz")
def stream_jsonl(filename: str) -> Iterable[Dict]:
def read_problems(evalset_file: str = HUMAN_EVAL) -> Dict[str, Dict]:
return {task["task_id"]: task for task in stream_jsonl(eva... | null |
179,537 | from typing import Iterable, Dict
import gzip
import json
import os
The provided code snippet includes necessary dependencies for implementing the `write_jsonl` function. Write a Python function `def write_jsonl(filename: str, data: Iterable[Dict], append: bool = False)` to solve the following problem:
Writes an itera... | Writes an iterable of dictionaries to jsonl |
179,538 | def _clean_python_code_for_sft(code):
code = code.replace("\r", "")
if "```python" in code:
code_start_idx = code.index("```python")
code = code[code_start_idx:].replace("```python", "").strip()
end_idx = code.find("```") if "```" in code else len(code)
code = code[:end_idx].stri... | Cleans up the generated code. |
179,539 | from vllm import LLM, SamplingParams
import json
from transformers import AutoTokenizer
from pathlib import Path
def generate_batch(examples, tokenizer, llm, model: str):
stop = None
if model == 'deepseekcoder-instruct':
prompts = [
tokenizer.apply_chat_template([{'role': 'user', 'content': ... | null |
179,540 | import re
import json
from pathlib import Path
from collections import defaultdict
from human_eval.evaluation import evaluate_functional_correctness
version = "20240121-Jul"
DATA_DIR = Path(__file__).parent / "data"
def extract_python_code(generation: str):
generation = generation.replace("[PYTHON]", '```python').r... | null |
179,541 | import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import random
import subprocess
import tempfile
import gzip
import json
from typing import *
import traceback
java_exec = ""
node_exec = ""
tsc_exec = ""
go_exec = ""
php_exec = ""
cs_exec = ""
def time_limit(... | Evaluates the functional correctness of a completion by running the test suite provided in the problem. |
179,542 | from typing import Iterable, Dict
import gzip
import json
import os
HUMAN_EVAL = os.path.join(ROOT, "..", "data", "HumanEval.jsonl.gz")
def stream_jsonl(filename: str) -> Iterable[Dict]:
"""
Parses each jsonl line and yields it as a dictionary
"""
if filename.endswith(".gz"):
with open(filename,... | null |
179,544 | import os
import json
import gzip
import numpy as np
import itertools
from typing import *
from tqdm.auto import tqdm
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from human_eval.data import stream_jsonl
from human_eval.execution import check_correctness
def read_d... | null |
179,545 | import argparse
import json
import os
import torch
from pathlib import Path
from tqdm import tqdm
data_abs_dir = Path(__file__).parent / "data"
from utils.utils import extract_generation_code, languge_settings
from transformers import AutoTokenizer, AutoModelForCausalLM
from human_eval.evaluation import evaluate_functi... | null |
179,546 | import argparse
import json
import os
import torch
from pathlib import Path
from tqdm import tqdm
data_abs_dir = Path(__file__).parent / "data"
from utils.utils import extract_generation_code, languge_settings
from transformers import AutoTokenizer, AutoModelForCausalLM
from human_eval.evaluation import evaluate_functi... | null |
179,548 | from typing import Iterable, Dict
import gzip
import json
import os
HUMAN_EVAL = os.path.join(ROOT, "..", "data", "HumanEval.jsonl.gz")
def stream_jsonl(filename: str) -> Iterable[Dict]:
"""
Parses each jsonl line and yields it as a dictionary
"""
if filename.endswith(".gz"):
with open(filename,... | null |
179,550 | import re
def _clean_python_code_for_sft(code):
code = code.replace("\r", "")
if "```python" in code:
code_start_idx = code.index("```python")
code = code[code_start_idx:].replace("```python", "").strip()
end_idx = code.find("```") if "```" in code else len(code)
code = code[:end... | Cleans up the generated code. |
179,551 | import os
import re
import json
import argparse
import torch
import numpy as np
from utils.parser import *
from utils.grader import *
from utils.python_executor import PythonExecutor
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
def extract_python_block_with_solution(text):
"""
... | Inference on the dataset. :param args: Arguments. :return: None |
179,552 | import os
import re
import json
import argparse
import torch
import numpy as np
from utils.parser import *
from utils.grader import *
from utils.python_executor import PythonExecutor
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
The provided code snippet includes necessary dependencies... | Evaluate the results. :param args: Arguments. :return: None |
179,553 | import multiprocessing
from math import isclose
from typing import Union
from sympy import simplify, N
from sympy.parsing.sympy_parser import parse_expr
from sympy.parsing.latex import parse_latex
def math_equal(prediction: Union[bool, float, str],
reference: Union[float, str],
include_p... | null |
179,554 | import multiprocessing
from math import isclose
from typing import Union
from sympy import simplify, N
from sympy.parsing.sympy_parser import parse_expr
from sympy.parsing.latex import parse_latex
def math_equal(prediction: Union[bool, float, str],
reference: Union[float, str],
include_p... | null |
179,555 | import re
from typing import Any, Dict
def strip_string(string):
string = str(string).strip()
# linebreaks
string = string.replace("\n", "")
# right "."
string = string.rstrip(".")
# remove inverse spaces
string = string.replace("\\!", "")
string = string.replace("\\ ", "")
# replace... | null |
179,556 | import re
from typing import Any, Dict
def parse_question(example, data_name):
question = ""
if data_name == "asdiv":
question = f"{example['body'].strip()} {example['question'].strip()}"
elif data_name == "svamp":
body = example["Body"].strip()
if not body.endswith("."):
... | null |
179,557 | import re
from typing import Any, Dict
def strip_string(string):
string = str(string).strip()
# linebreaks
string = string.replace("\n", "")
# right "."
string = string.rstrip(".")
# remove inverse spaces
string = string.replace("\\!", "")
string = string.replace("\\ ", "")
# replace... | null |
179,558 | import io
import regex
import pickle
import traceback
import copy
import datetime
import multiprocessing
import dateutil.relativedelta
import multiprocess
from multiprocess import Pool
from typing import Any, Dict, Optional
from pebble import ProcessPool
from tqdm import tqdm
from concurrent.futures import TimeoutError... | null |
179,559 | import argparse
import os
import sys
from datetime import datetime
import cv2 as cv
import numpy as np
from stitching import AffineStitcher, Stitcher, __version__
from stitching.blender import Blender
from stitching.camera_adjuster import CameraAdjuster
from stitching.camera_estimator import CameraEstimator
from stitch... | null |
179,560 | import os
import cv2 as cv
from .images import Images
from .seam_finder import SeamFinder
from .timelapser import Timelapser
def write_verbose_result(dir_name, img_name, img):
cv.imwrite(verbose_output(dir_name, img_name), img)
def verbose_output(dir_name, file):
return os.path.join(dir_name, file)
class Image... | null |
179,561 | import warnings
from collections import OrderedDict
import cv2 as cv
import numpy as np
from .blender import Blender
from .stitching_error import StitchingWarning
def create_img_by_size(size, color=(0, 0, 0)):
width, height = size
img = np.zeros((height, width, 3), np.uint8)
img[:] = color
return img
c... | null |
179,562 | import warnings
from collections import OrderedDict
import cv2 as cv
import numpy as np
from .blender import Blender
from .stitching_error import StitchingWarning
def add_weighted_image(img1, img2, alpha):
return cv.addWeighted(img1, alpha, img2, (1.0 - alpha), 0.0) | null |
179,563 | import warnings
from collections import OrderedDict
import cv2 as cv
import numpy as np
from .blender import Blender
from .stitching_error import StitchingWarning
def check_if_pixel_or_neighbor_is_black(img, x, y):
check = [
is_pixel_black(img, x, y),
is_pixel_black(img, x + 1, y),
is_pixel_... | null |
179,564 | import datetime
import logging
import logging.handlers
import os
import sys
from torch import nn
import numpy as np
import requests
from videollava.constants import LOGDIR
def order_pick_k(lst, k):
if len(lst) <= k:
return lst
rng = np.random.random(len(lst))
index = np.argsort(rng)[:k]
index_s... | null |
179,565 | import datetime
import logging
import logging.handlers
import os
import sys
from torch import nn
import numpy as np
import requests
from videollava.constants import LOGDIR
handler = None
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __... | null |
179,566 | import datetime
import logging
import logging.handlers
import os
import sys
from torch import nn
import numpy as np
import requests
from videollava.constants import LOGDIR
The provided code snippet includes necessary dependencies for implementing the `violates_moderation` function. Write a Python function `def violate... | Check whether the text violates OpenAI moderation API. |
179,567 | import datetime
import logging
import logging.handlers
import os
import sys
from torch import nn
import numpy as np
import requests
from videollava.constants import LOGDIR
def pretty_print_semaphore(semaphore):
if semaphore is None:
return "None"
return f"Semaphore(value={semaphore._value}, locked={sem... | null |
179,568 | from io import BytesIO
import requests
from PIL import Image
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(im... | null |
179,569 | import argparse
import asyncio
import json
import time
import threading
import uuid
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
import requests
import torch
import uvicorn
from functools import partial
from videollava.constants import WORKER_HEART_BEAT_INTERVAL
... | null |
179,570 | import argparse
import asyncio
import json
import time
import threading
import uuid
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
import requests
import torch
import uvicorn
from functools import partial
from videollava.constants import WORKER_HEART_BEAT_INTERVAL
... | null |
179,571 | import argparse
import asyncio
import json
import time
import threading
import uuid
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
import requests
import torch
import uvicorn
from functools import partial
from videollava.constants import WORKER_HEART_BEAT_INTERVAL
... | null |
179,572 | import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from videollava.cons... | null |
179,573 | import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from videollava.cons... | null |
179,574 | import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from videollava.cons... | null |
179,575 | import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from videollava.cons... | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.