id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
143,118
import argparse import asyncio import json import time import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse import requests import torch import uvicorn from functools import partial from llava.constants import WORKER_HEART_BEAT_INTERVAL from ...
null
143,119
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from llava.constants...
null
143,120
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from llava.constants...
null
143,121
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from llava.constants...
null
143,122
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from llava.constants...
null
143,123
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from llava.constants...
null
143,124
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from llava.constants...
null
143,125
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from llava.constants...
null
143,126
import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse import numpy as np import requests import uvicorn from llava.constants...
null
143,127
import argparse import datetime import json import os import time import gradio as gr import requests from llava.conversation import (default_conversation, conv_templates, SeparatorStyle) from llava.constants import LOGDIR from llava.utils import (build_logger, server_error_msg, v...
null
143,128
import argparse import asyncio from concurrent.futures import ThreadPoolExecutor import json import time import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse import requests import re import uvicorn from functools import partial from llava.co...
null
143,129
import argparse import asyncio from concurrent.futures import ThreadPoolExecutor import json import time import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse import requests import re import uvicorn from functools import partial from llava.co...
null
143,130
import argparse import asyncio from concurrent.futures import ThreadPoolExecutor import json import time import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse import requests import re import uvicorn from functools import partial from llava.co...
null
143,131
import argparse import asyncio from concurrent.futures import ThreadPoolExecutor import json import time import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse import requests import re import uvicorn from functools import partial from llava.co...
null
143,132
import os import copy from dataclasses import dataclass, field import json import logging import pathlib from typing import Dict, Optional, Sequence, List import torch import transformers import tokenizers from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_...
null
143,133
import os import copy from dataclasses import dataclass, field import json import logging import pathlib from typing import Dict, Optional, Sequence, List import torch import transformers import tokenizers from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_...
Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
143,134
import os import copy from dataclasses import dataclass, field import json import logging import pathlib from typing import Dict, Optional, Sequence, List import torch import transformers import tokenizers from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_...
null
143,135
from typing import Optional, Tuple import warnings import torch import transformers from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv from flash_attn.bert_padding import unpad_input, pad_input def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.T...
null
143,137
import os import torch import torch.nn as nn from torch.utils.data import Sampler from transformers import Trainer from transformers.trainer import ( is_sagemaker_mp_enabled, get_parameter_names, has_length, ALL_LAYERNORM_LAYERS, logger, ) from typing import List, Optional def get_length_grouped_ind...
null
143,138
from PIL import Image from io import BytesIO import base64 import torch import math import ast from transformers import StoppingCriteria from llava.constants import IMAGE_TOKEN_INDEX def select_best_resolution(original_size, possible_resolutions): """ Selects the best resolution from a list of possible resoluti...
Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (tuple): The size of the input image in the format (width, height). grid_pinpoints (str): A string representation of a list of possible resolutions. patch_size (int): The size of each image patch. Returns:...
143,139
import os from .clip_encoder import CLIPVisionTower class CLIPVisionTower(nn.Module): def __init__(self, vision_tower, args, delay_load=False): super().__init__() self.is_loaded = False self.vision_tower_name = vision_tower self.select_layer = args.mm_vision_select_layer s...
null
143,141
import argparse import torch from tqdm import tqdm from transformers import AutoTokenizer, AutoModelForCausalLM from llava.model.utils import auto_upgrade def auto_upgrade(config): cfg = AutoConfig.from_pretrained(config) if 'llava' in config and 'llava' not in cfg.model_type: assert cfg.model_type == ...
null
143,142
from abc import ABC, abstractmethod import torch import torch.nn as nn from .multimodal_encoder.builder import build_vision_tower from .multimodal_projector.builder import build_vision_projector from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_EN...
Unpads a PyTorch tensor of a padded and resized image. Args: tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format. original_size (tuple): The original size of the image (height, width). Returns: torch.Tensor: The unpadded image tensor.
143,143
import argparse import torch from transformers import AutoTokenizer, AutoModelForCausalLM from llava.model import * from llava.model.utils import auto_upgrade def auto_upgrade(config): cfg = AutoConfig.from_pretrained(config) if 'llava' in config and 'llava' not in cfg.model_type: assert cfg.model_type...
null
143,145
import json import os from collections import defaultdict import numpy as np import argparse def parse_args(): parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') parser.add_argument('-d', '--dir', default=None) parser.add_argument('-v', '--version', default=None) parser.add_ar...
null
143,146
import argparse import json import os import openai import time NUM_SECONDS_TO_SLEEP = 0.5 def get_eval(content: str, max_tokens: int): while True: try: response = openai.ChatCompletion.create( model='gpt-4-0314', messages=[{ 'role': 'system',...
null
143,147
import argparse import json import os import openai import time def parse_score(review): try: score_pair = review.split('\n')[0] score_pair = score_pair.replace(',', ' ') sp = score_pair.split(' ') if len(sp) == 2: return [float(sp[0]), float(sp[1])] else: ...
null
143,148
import os import argparse import json import re from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--annotation-file', type=str) parser.add_argument('--result-file', type=str) parser.add_argument('--result-dir', type=str...
null
143,149
import os import argparse import json import re from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator def prompt_processor(prompt): if prompt.startswith('OCR tokens: '): pattern = r"Question: (.*?) Short answer:" match = re.search(pattern, prompt, re.DOTALL) question = match.group(1)...
null
143,155
import argparse import json import os import time import concurrent.futures import openai import tqdm import shortuuid MODEL = 'gpt-3.5-turbo' MODEL_ID = 'gpt-3.5-turbo:20230327' def get_answer(question_id: int, question: str, max_tokens: int): ans = { 'answer_id': shortuuid.uuid(), 'question_id': ...
null
143,156
import argparse from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria import torch import os import json from tqdm import tqdm import shortuuid from llava.conversation import default_conversation from llava.utils import disable_torch_init default_conversation = conv_vicuna_v1 def disable_torc...
null
143,157
import argparse import torch import os import json from tqdm import tqdm import shortuuid from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from llava.conversation import conv_templates, SeparatorStyle from llava.model.builder import load_pretrained_model f...
null
143,158
import argparse import torch import os import json import pandas as pd from tqdm import tqdm import shortuuid from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from llava.conversation import conv_templates, SeparatorStyle from llava.model.builder import loa...
null
143,162
import argparse import torch from llava.constants import ( IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_PLACEHOLDER, ) from llava.conversation import conv_templates, SeparatorStyle from llava.model.builder import load_pretrained_model from llava.utils i...
null
143,163
import argparse import torch import os import json from tqdm import tqdm import shortuuid from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from llava.conversation import conv_templates, SeparatorStyle from llava.model.builder import load_pretrained_model f...
null
143,164
import os import json import argparse def eval_pope(answers, label_file): label_list = [json.loads(q)['label'] for q in open(label_file, 'r')] for answer in answers: text = answer['text'] # Only keep the first sentence if text.find('.') != -1: text = text.split('.')[0] ...
null
143,165
import json import os import re def read_jsonl(path: str, key: str=None): data = [] with open(os.path.expanduser(path)) as f: for line in f: if not line: continue data.append(json.loads(line)) if key is not None: data.sort(key=lambda x: x[key]) ...
null
143,166
import json import os import re def trim_hanging_lines(s: str, n: int) -> str: s = s.strip() for _ in range(n): s = s.split('\n', 1)[1].strip() return s
null
143,167
import argparse import torch import os import json from tqdm import tqdm import shortuuid from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from llava.conversation import conv_templates, SeparatorStyle from llava.model.builder import load_pretrained_model f...
null
143,168
import argparse import json import os import openai import tqdm import ray import time NUM_SECONDS_TO_SLEEP = 3 def get_eval(content: str, max_tokens: int): while True: try: response = openai.ChatCompletion.create( model='gpt-4', messages=[{ '...
null
143,169
import argparse import json import os import openai import tqdm import ray import time def parse_score(review): try: score_pair = review.split('\n')[0] score_pair = score_pair.replace(',', ' ') sp = score_pair.split(' ') if len(sp) == 2: return [float(sp[0]), float(sp[1]...
null
143,173
import os, copy, types, gc, sys import numpy as np from prompt_toolkit import prompt args = types.SimpleNamespace() print('\n\nChatRWKV project: https://github.com/BlinkDL/ChatRWKV') for i in range(10): print('NOTE: This code is v1 and only for reference. Use v2 instead.') import torch args.RUN_DEVICE = "cuda" arg...
null
143,174
import os, copy, types, gc, sys import numpy as np from prompt_toolkit import prompt print('\n\nChatRWKV v2 https://github.com/BlinkDL/ChatRWKV') import torch torch.backends.cudnn.benchmark = True torch.backends.cudnn.allow_tf32 = True torch.backends.cuda.matmul.allow_tf32 = True PROMPT_FILE = f'{current_path}/prompt/d...
null
143,175
import os, sys, argparse from rwkv.model import RWKV def get_args(): p = argparse.ArgumentParser(prog = 'convert_model', description = 'Convert RWKV model for faster loading and saves cpu RAM.') p.add_argument('--in', metavar = 'INPUT', help = 'Filename for input model.', required = True) p.add_argument('--out',...
null
143,178
import types, math, os, gc import torch from torch.nn import functional as F def __nop(ob): return ob
null
143,180
import numpy as np np.set_printoptions(precision=4, suppress=True, linewidth=200) import types, torch import torch.nn as nn from torch.nn import functional as F def sample_logits(out, temperature=1.0, top_p=0.8): probs = F.softmax(out, dim=-1).numpy() sorted_probs = np.sort(probs)[::-1] cumulative_probs = ...
null
143,181
print( "\nHere are some demos for RWKV-4-World models (https://huggingface.co/BlinkDL/rwkv-4-world)\n" ) import os, re from rwkv.model import RWKV from rwkv.utils import PIPELINE, PIPELINE_ARGS print("\n#### Demo 1: free generation ####\n") print(ctx, end="") print("\n") print("\n#### Demo 2: single-round Q & A ##...
null
143,182
print( "\nHere are some demos for RWKV-4-World models (https://huggingface.co/BlinkDL/rwkv-4-world)\n" ) import os, re from rwkv.model import RWKV from rwkv.utils import PIPELINE, PIPELINE_ARGS model = RWKV(model=MODEL_FILE, strategy="cuda fp16") pipeline = PIPELINE(model, "rwkv_vocab_v20230424") print("\n#### De...
null
143,183
print( "\nHere are some demos for RWKV-4-World models (https://huggingface.co/BlinkDL/rwkv-4-world)\n" ) import os, re from rwkv.model import RWKV from rwkv.utils import PIPELINE, PIPELINE_ARGS model = RWKV(model=MODEL_FILE, strategy="cuda fp16") pipeline = PIPELINE(model, "rwkv_vocab_v20230424") print("\n#### De...
null
143,184
import os, copy, types, gc, sys, re import numpy as np from prompt_toolkit import prompt import torch from rwkv.model import RWKV from rwkv.utils import PIPELINE CHUNK_LEN = 256 model = RWKV(model=args.MODEL_NAME, strategy=args.strategy) pipeline = PIPELINE(model, "rwkv_vocab_v20230424") model_tokens = [] model_state ...
null
143,185
print('\nChatRWKV https://github.com/BlinkDL/ChatRWKV\n') import os, sys, torch import numpy as np from rwkv.model import RWKV print(out.detach().cpu().numpy()) print(out.detach().cpu().numpy()) from rwkv.utils import PIPELINE, PIPELINE_ARGS print(ctx, end='') print('\n') ...
null
143,187
from typing import Optional import types, gc, os, time, re import torch import torch.nn as nn from torch.nn import functional as F def __nop(ob): return ob
null
143,188
from typing import Optional import types, gc, os, time, re import torch import torch.nn as nn from torch.nn import functional as F torch.backends.cudnn.benchmark = True torch.backends.cudnn.allow_tf32 = True torch.backends.cuda.matmul.allow_tf32 = True def cuda_wkv(T: int, C: int, w, u, k, v, aa, bb, pp): asse...
null
143,189
from typing import Optional import types, gc, os, time, re import torch import torch.nn as nn from torch.nn import functional as F torch.backends.cudnn.benchmark = True torch.backends.cudnn.allow_tf32 = True torch.backends.cuda.matmul.allow_tf32 = True def mm8(x: torch.Tensor, w: torch.Tensor, mx: torch.Tensor, rx: tor...
null
143,190
import os, sys, time, random print(''' ####################################################################################################################### This tokenizer is not used in any RWKV models yet. I plan to use it for the future multilang RWKV models. Benefits: * Good support of most languages, from Europe...
null
143,191
import sys from pathlib import Path import g4f import json import os import re import requests from typing import Union from github import Github from github.PullRequest import PullRequest GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY') The provided code snippet includes necessary dependencies for implementing the ...
Retrieves the details of the pull request from GitHub. Args: github (Github): The Github object to interact with the GitHub API. Returns: PullRequest: An object representing the pull request.
143,192
import sys from pathlib import Path import g4f import json import os import re import requests from typing import Union from github import Github from github.PullRequest import PullRequest The provided code snippet includes necessary dependencies for implementing the `get_diff` function. Write a Python function `def g...
Fetches the diff of the pull request from a given URL. Args: diff_url (str): URL to the pull request diff. Returns: str: The diff of the pull request.
143,193
import sys from pathlib import Path import g4f import json import os import re import requests from typing import Union from github import Github from github.PullRequest import PullRequest def get_ai_response(prompt: str, as_json: bool = True) -> Union[dict, str]: """ Gets a response from g4f API based on the p...
Analyzes the code changes in the pull request. Args: pull (PullRequest): The pull request object. diff (str): The diff of the pull request. Returns: list[dict]: A list of comments generated by the analysis.
143,194
import sys from pathlib import Path import g4f import json import os import re import requests from typing import Union from github import Github from github.PullRequest import PullRequest The provided code snippet includes necessary dependencies for implementing the `create_review_prompt` function. Write a Python fun...
Creates a prompt to create a review comment. Args: pull (PullRequest): The pull request object. diff (str): The diff of the pull request. Returns: str: The generated prompt for review.
143,195
import sys, re from pathlib import Path from os import path import g4f def read_code(text): if match := re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text): return match.group("code")
null
143,196
import sys from pathlib import Path import asyncio import g4f from g4f.debug import access_token async def translate_part(part, i): blocklisted = False for headline in blocklist: if headline in part: blocklisted = True if blocklisted: lines = part.split('\n') lines[0] = a...
null
143,197
import json import re from typing import Any import quickjs from curl_cffi import requests session = requests.Session(impersonate="chrome107") def get_model_info() -> dict[str, Any]: url = "https://sdk.vercel.ai" response = session.get(url) html = response.text paths_regex = r"static\/chunks.+?\.js" ...
null
143,198
import json import re from typing import Any import quickjs from curl_cffi import requests def params_to_default_params(parameters: dict[str, Any]): defaults: dict[str, Any] = {} for key, parameter in parameters.items(): if key == "maximumLength": key = "maxTokens" defaults[key] = pa...
null
143,199
import json import re from typing import Any import quickjs from curl_cffi import requests def get_model_names(model_info: dict[str, Any]): model_names = model_info.keys() model_names = [ name for name in model_names if name not in ["openai:gpt-4", "openai:gpt-3.5-turbo"] ] mode...
null
143,200
import json import re from typing import Any import quickjs from curl_cffi import requests def print_providers(model_names: list[str]): for name in model_names: split_name = re.split(r":|/", name) base_provider = split_name[0] variable_name = split_name[-1].replace("-", "_").replace(".", ""...
null
143,201
import json import re from typing import Any import quickjs from curl_cffi import requests def print_convert(model_names: list[str]): for name in model_names: split_name = re.split(r":|/", name) key = split_name[-1] variable_name = split_name[-1].replace("-", "_").replace(".", "") #...
null
143,203
import sys, re from pathlib import Path from os import path import g4f def input_command(): print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.") contents = [] while True: try: line = input() except EOFError: break contents.append(li...
null
143,204
import re from urllib.parse import urlparse import asyncio from g4f import models, ChatCompletion from g4f.providers.types import BaseRetryProvider, ProviderType from etc.testing._providers import get_providers from g4f import debug def test_async_list(providers: list[ProviderType]): def get_providers() -> list[Provid...
null
143,205
import re from urllib.parse import urlparse import asyncio from g4f import models, ChatCompletion from g4f.providers.types import BaseRetryProvider, ProviderType from etc.testing._providers import get_providers from g4f import debug class BaseRetryProvider(BaseProvider): """ Base class for a provider that impl...
null
143,206
from pathlib import Path def create_content(): path = Path() paths = path.glob("g4f/provider/*.py") paths = [p for p in paths if p.name not in ["__init__.py", "base_provider.py"]] classnames = [p.stem for p in paths] import_lines = [f"from .{name} import {name}" for name in classnames] import_...
null
143,207
from __future__ import annotations from os import environ import requests from functools import cached_property from importlib.metadata import version as get_package_version, PackageNotFoundError from subprocess import check_output, CalledProcessError, PIPE from .errors import VersionNotFoundError from . import debug ...
Retrieves the latest version of a package from PyPI. Args: package_name (str): The name of the package for which to retrieve the version. Returns: str: The latest version of the specified package from PyPI. Raises: VersionNotFoundError: If there is an error in fetching the version from PyPI.
143,208
from __future__ import annotations from os import environ import requests from functools import cached_property from importlib.metadata import version as get_package_version, PackageNotFoundError from subprocess import check_output, CalledProcessError, PIPE from .errors import VersionNotFoundError from . import debug ...
Retrieves the latest release version from a GitHub repository. Args: repo (str): The name of the GitHub repository. Returns: str: The latest release version from the specified GitHub repository. Raises: VersionNotFoundError: If there is an error in fetching the version from GitHub.
143,209
import sys,logging from loguru import logger def __exception_handle(e_type, e_value, e_traceback): def hook_except_handle(): sys.excepthook = __exception_handle
null
143,210
import sys,logging from loguru import logger class __InterceptHandler(logging.Handler): def emit(self, record): try: level = logger.level(record.levelname).name except ValueError: level = record.levelno frame, depth = logging.currentframe(), 2 while frame.f_co...
null
143,211
import argparse from enum import Enum import g4f from g4f import Provider from g4f.gui.run import gui_parser, run_gui_args def run_gui(args): print("Running GUI...")
null
143,212
from __future__ import annotations from aiohttp import ClientSession, ClientResponse, ClientTimeout, BaseConnector, FormData from typing import AsyncIterator, Any, Optional from .defaults import DEFAULT_HEADERS from ..errors import MissingRequirementsError class MissingRequirementsError(Exception): ... def get_co...
null
143,213
from __future__ import annotations import os import time from .typing import Dict, Cookies from .errors import MissingRequirementsError from . import debug _cookies: Dict[str, Cookies] = {} def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool = True, single_browser: bool = False) -> Cookies: ...
Load cookies for a given domain from all supported browsers and cache the results. Args: domain_name (str): The domain for which to load cookies. Returns: Dict[str, str]: A dictionary of cookie names and values.
143,214
from __future__ import annotations import os import time from .typing import Dict, Cookies from .errors import MissingRequirementsError from . import debug _cookies: Dict[str, Cookies] = {} Cookies = Dict[str, str] def set_cookies(domain_name: str, cookies: Cookies = None) -> None: if cookies: _cookies[do...
null
143,215
from __future__ import annotations import re from io import BytesIO import base64 from .typing import ImageType, Union, Image from .errors import MissingRequirementsError ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'} The provided code snippet includes necessary dependencies for implementing the `is...
Checks if the given filename has an allowed extension. Args: filename (str): The filename to check. Returns: bool: True if the extension is allowed, False otherwise.
143,216
from __future__ import annotations import re from io import BytesIO import base64 from .typing import ImageType, Union, Image from .errors import MissingRequirementsError The provided code snippet includes necessary dependencies for implementing the `format_images_markdown` function. Write a Python function `def forma...
Formats the given images as a markdown string. Args: images: The images to format. alt (str): The alt for the images. preview (str, optional): The preview URL format. Defaults to "{image}?w=200&h=200". Returns: str: The formatted markdown string.
143,217
from __future__ import annotations import re from io import BytesIO import base64 from .typing import ImageType, Union, Image from .errors import MissingRequirementsError def is_data_uri_an_image(data_uri: str) -> bool: """ Checks if the given data URI represents an image. Args: data_uri (str): The ...
Converts the given image to bytes. Args: image (ImageType): The image to convert. Returns: bytes: The image as bytes.
143,218
from __future__ import annotations import time from shutil import which from os import path from os import access, R_OK from .typing import Cookies from .errors import MissingRequirementsError from . import debug The provided code snippet includes necessary dependencies for implementing the `bypass_cloudflare` functio...
Attempts to bypass Cloudflare protection when accessing a URL using the provided WebDriver. Args: driver (WebDriver): The WebDriver to use for accessing the URL. url (str): The URL to access. timeout (int): Time in seconds to wait for the page to load. Raises: Exception: If there is an error while bypassing Cloudflare ...
143,219
from __future__ import annotations import time from shutil import which from os import path from os import access, R_OK from .typing import Cookies from .errors import MissingRequirementsError from . import debug def element_send_text(element: WebElement, text: str) -> None: script = "arguments[0].innerText = argu...
null
143,220
from __future__ import annotations import json, base64, requests, random, os try: import execjs has_requirements = True except ImportError: has_requirements = False from ..typing import Messages, CreateResult from .base_provider import AbstractProvider from ..requests import raise_for_status from ...
null
143,221
from __future__ import annotations from aiohttp import ClientSession from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin Messages = List[Dict[str, str]] def format_prompt(messages: Messages): messages = [ f"[INST] {message['content']} [/INST]" ...
null
143,222
from __future__ import annotations import random, json from datetime import datetime from ...requests import StreamSession from ...typing import AsyncGenerator from ..base_provider import AsyncGeneratorProvider def k(e: str, t: int): def get_fingerprint() -> str: return str(k(str(int(random.random() * 100000)), 25...
null
143,223
from __future__ import annotations import random, json from datetime import datetime from ...requests import StreamSession from ...typing import AsyncGenerator from ..base_provider import AsyncGeneratorProvider def get_datetime() -> str: return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
null
143,224
from __future__ import annotations from aiohttp import ClientSession import os import json try: import execjs has_requirements = True except ImportError: has_requirements = False from ...typing import AsyncResult, Messages from ..base_provider import AsyncGeneratorProvider from ..helper import format_prompt...
null
143,225
from __future__ import annotations import time import hashlib import uuid from ..typing import AsyncResult, Messages from ..requests import StreamSession from ..errors import RateLimitError from .base_provider import AsyncGeneratorProvider, ProviderModelMixin def generate_signature(timestamp: int, message: str, id: st...
null
143,226
from __future__ import annotations import time from hashlib import sha256 from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider from ..errors import RateLimitError from ..requests import raise_for_status from ..requests.aiohttp impo...
null
143,227
from __future__ import annotations import json from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import get_connector from ..errors import RateLimitError, ModelNotFoundError from ..requests.raise...
null
143,228
from __future__ import annotations from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider from .helper import get_random_string, get_connector from ..requests import raise_for_status, get_args_from_browser, WebDriver from ..webdriver...
null
143,229
from __future__ import annotations import json, base64, requests, random, uuid try: import execjs has_requirements = True except ImportError: has_requirements = False from ...typing import Messages, TypedDict, CreateResult, Any from ..base_provider import AbstractProvider from ...errors import M...
null
143,230
from __future__ import annotations import random from ...requests import StreamSession from ...typing import AsyncResult, Messages from ..base_provider import AsyncGeneratorProvider, format_prompt def _create_header(): return { "accept" : "application/json, text/plain, */*", "content-type" ...
null
143,231
from __future__ import annotations import random from ...requests import StreamSession from ...typing import AsyncResult, Messages from ..base_provider import AsyncGeneratorProvider, format_prompt Messages = List[Dict[str, str]] def _create_payload( messages: Messages, system_message: str = "", user_id: i...
null
143,232
from __future__ import annotations from aiohttp import ClientSession from ...typing import AsyncResult, Messages from ..base_provider import AsyncGeneratorProvider def _create_header(): return { 'accept': '*/*', 'content-type': 'application/json', }
null
143,233
from __future__ import annotations from aiohttp import ClientSession from ...typing import AsyncResult, Messages from ..base_provider import AsyncGeneratorProvider Messages = List[Dict[str, str]] def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs): return { 'key' : '', ...
null
143,234
from __future__ import annotations import json import os import uuid import requests from ...typing import Any, CreateResult from ..base_provider import AbstractProvider def _encrypt(e: str): # t = os.urandom(8).hex().encode('utf-8') # n = os.urandom(8).hex().encode('utf-8') # r = e.encode('utf-8') ...
null
143,235
from __future__ import annotations import json import os import uuid import requests from ...typing import Any, CreateResult from ..base_provider import AbstractProvider def _pad_data(data: bytes) -> bytes: # block_size = AES.block_size # padding_size = block_size - len(data) % block_size # padding ...
null
143,236
from __future__ import annotations from aiohttp import ClientSession from hashlib import sha256 from ...typing import AsyncResult, Messages, Dict from ..base_provider import AsyncGeneratorProvider from ..helper import format_prompt def _create_header() -> Dict[str, str]: return { 'accept': '*/*', '...
null