id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
141,212
import contextlib import hashlib import json import random import string import time from typing import Optional, Literal, Union, Tuple from nonebot import logger as nb_logger from tortoise.queryset import Q from LittlePaimon.config import config from LittlePaimon.database import PublicCookie, PrivateCookie, CookieCach...
null
141,213
import contextlib import hashlib import json import random import string import time from typing import Optional, Literal, Union, Tuple from nonebot import logger as nb_logger from tortoise.queryset import Q from LittlePaimon.config import config from LittlePaimon.database import PublicCookie, PrivateCookie, CookieCach...
null
141,214
from difflib import get_close_matches from typing import Union, Literal, List, Optional, Dict from .files import load_json from .path import JSON_DATA def load_json(path: Union[Path, str], encoding: str = 'utf-8'): """ 读取本地json文件,返回文件数据。 :param path: 文件路径 :param encoding: 编码,默认为utf-8 :return: 数据 ...
根据角色id获取角色名 :param role_id: 角色id :return: 角色名字符串
141,215
from difflib import get_close_matches from typing import Union, Literal, List, Optional, Dict from .files import load_json from .path import JSON_DATA def load_json(path: Union[Path, str], encoding: str = 'utf-8'): """ 读取本地json文件,返回文件数据。 :param path: 文件路径 :param encoding: 编码,默认为utf-8 :return: 数据 ...
null
141,216
from difflib import get_close_matches from typing import Union, Literal, List, Optional, Dict from .files import load_json from .path import JSON_DATA def load_json(path: Union[Path, str], encoding: str = 'utf-8'): """ 读取本地json文件,返回文件数据。 :param path: 文件路径 :param encoding: 编码,默认为utf-8 :return: 数据 ...
null
141,217
from difflib import get_close_matches from typing import Union, Literal, List, Optional, Dict from .files import load_json from .path import JSON_DATA def load_json(path: Union[Path, str], encoding: str = 'utf-8'): """ 读取本地json文件,返回文件数据。 :param path: 文件路径 :param encoding: 编码,默认为utf-8 :return: 数据 ...
null
141,218
from pathlib import Path from typing import Union from nonebot.adapters.onebot.v11 import Message from .path import JSON_DATA text_filter = DFAFilter() text_filter.parse(JSON_DATA / 'ban_word.txt') The provided code snippet includes necessary dependencies for implementing the `filter_msg` function. Write a Python func...
过滤违禁词 :param message: 过滤的消息 :param repl: 替换词
141,219
import asyncio import datetime import functools import hashlib import inspect import time import zipfile from collections import defaultdict from pathlib import Path from LittlePaimon.config import config from .logger import logger from .requests import aiorequests The provided code snippet includes necessary dependen...
缓存装饰器 :param ttl: 过期时间
141,220
import asyncio import datetime import functools import hashlib import inspect import time import zipfile from collections import defaultdict from pathlib import Path from LittlePaimon.config import config from .logger import logger from .requests import aiorequests RESOURCE_BASE_PATH = Path() / 'resources' class logge...
null
141,221
import datetime import re from pathlib import Path import git from git.exc import InvalidGitRepositoryError, GitCommandError from nonebot.utils import run_sync from . import __version__, NICKNAME from .requests import aiorequests from .logger import logger class aiorequests: async def get(url: str, ...
null
141,222
from pathlib import Path from ssl import SSLCertVerificationError from typing import Union from ruamel.yaml import YAML from .requests import aiorequests def load_json(path: Union[Path, str], encoding: str = 'utf-8'): """ 读取本地json文件,返回文件数据。 :param path: 文件路径 :param encoding: 编码,默认为utf-8 :return: 数据 ...
从网络url中读取json,当有path参数时,如果path文件不存在,就会从url下载保存到path,如果path文件存在,则直接读取path :param url: url :param path: 本地json文件路径 :param force_refresh: 是否强制重新下载 :return: json字典
141,223
from pathlib import Path from ssl import SSLCertVerificationError from typing import Union from ruamel.yaml import YAML from .requests import aiorequests The provided code snippet includes necessary dependencies for implementing the `load_yaml` function. Write a Python function `def load_yaml(path: Union[Path, str], e...
读取本地yaml文件,返回字典。 :param path: 文件路径 :param encoding: 编码,默认为utf-8 :return: 字典
141,224
from pathlib import Path from ssl import SSLCertVerificationError from typing import Union from ruamel.yaml import YAML from .requests import aiorequests The provided code snippet includes necessary dependencies for implementing the `save_yaml` function. Write a Python function `def save_yaml(data: dict, path: Union[P...
保存yaml文件 :param data: 数据 :param path: 保存路径 :param encoding: 编码
141,225
import logging from apscheduler.schedulers.asyncio import AsyncIOScheduler from nonebot import get_driver from nonebot.log import LoguruHandler, logger from pydantic import Field, BaseSettings scheduler = AsyncIOScheduler() scheduler.configure(plugin_config.apscheduler_config) async def _start_scheduler(): if not ...
null
141,226
import logging from apscheduler.schedulers.asyncio import AsyncIOScheduler from nonebot import get_driver from nonebot.log import LoguruHandler, logger from pydantic import Field, BaseSettings scheduler = AsyncIOScheduler() scheduler.configure(plugin_config.apscheduler_config) async def _shutdown_scheduler(): if s...
null
141,227
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
null
141,228
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
获取查询操作中的user_id、uid和图片,并将过滤uid后的msg存放到T_State中 :param limit: 限制个数 :param only_cn: 是否只接受国服uid :return: 查询对象列表
141,229
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
从消息中提取uid :param only_cn: 是否只接受国服uid :return: uid
141,230
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
从命令中提取出原神的角色,需配合CommandUID使用 :param limit: 限制个数 :return: 角色名列表
141,231
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
根据消息事件的类型获取对象id 私聊->用户id 群聊->群id 频道->子频道id :return: 对象id
141,232
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
获取消息中的开关类型,如果没有则返回None :return: Optional[bool]
141,233
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
null
141,234
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
获取消息中的小时:分钟格式时间元组,如果没有则返回None :return: (小时, 分钟)
141,235
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
检查时间戳是否在指定天数内 :param time_stamp: 时间戳 :param days: 天数 :return: True/False
141,236
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
撤回指定群消息(需管理员权限且权限大于发送者) :param event: 消息事件 :return: 是否撤回成功
141,237
import random import re import time from io import BytesIO from pathlib import Path from typing import Union, Optional, Tuple, List from PIL import Image from nonebot import get_bot from nonebot.adapters.onebot.v11 import MessageEvent, Message, MessageSegment, GroupMessageEvent from nonebot.rule import Rule from nonebo...
null
141,238
from contextlib import asynccontextmanager from contextlib import suppress from typing import Optional, Literal, Tuple, Union, List, AsyncGenerator, AsyncIterator from playwright.async_api import Page, Browser, Playwright, async_playwright, Error from . import DRIVER from .logger import logger from LittlePaimon.config ...
null
141,239
from contextlib import asynccontextmanager from contextlib import suppress from typing import Optional, Literal, Tuple, Union, List, AsyncGenerator, AsyncIterator from playwright.async_api import Page, Browser, Playwright, async_playwright, Error from . import DRIVER from .logger import logger from LittlePaimon.config ...
null
141,240
import os import sys import torch from setuptools import Extension, find_packages, setup from torch.utils.cpp_extension import ( CppExtension, CUDAExtension, BuildExtension, CUDA_HOME, ) version = write_version_py() with open("README.md") as f: readme = f.read() def write_version_py(): with ope...
null
141,241
import os import sys import torch from setuptools import Extension, find_packages, setup from torch.utils.cpp_extension import ( CppExtension, CUDAExtension, BuildExtension, CUDA_HOME, ) version = write_version_py() extension_modules = [ NumpyExtension( "metaseq.data.data_utils_fast", ...
null
141,242
import torch import torch.nn as nn import torch.nn.functional as F try: from apex.normalization import FusedLayerNorm as _FusedLayerNorm has_fused_layernorm = True class FusedLayerNorm(_FusedLayerNorm): def forward(self, x): if not x.is_cuda: return super().forward(x) ...
null
141,243
def checkpoint_wrapper(module, *args, **kwargs): try: from metaseq.modules.checkpoint_activation_wrapper.checkpoint_activations import ( checkpoint_wrapper as _checkpoint_wrapper, ) except ImportError: raise ImportError( "Cannot find fairscale.nn.misc.checkpoint...
null
141,244
import math from typing import Dict, Optional import torch import torch.nn as nn from torch import Tensor from metaseq import utils from metaseq.modules import ( ActivationFn, ModelParallelMultiheadAttention, Dropout, FeedForward, LayerNorm, ) from metaseq.modules.megatron.mpu import ( ColumnPar...
null
141,245
import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List def relu(x): return F.relu(x) def relu_squared(x: torch.Tensor): return F.relu(x).pow(2)
null
141,246
import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List def gelu_back(g, x): tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243 ff = 0.5 * x * ( (1 - tanh_out * tanh_out) * (0.79788456 + 0.107032224...
null
141,247
import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List def relu_back(g, x): return g.masked_fill_(x <= 0, 0)
null
141,248
import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List def swiglu(x: torch.Tensor, gate: torch.Tensor): return F.silu(x) * gate
null
141,249
import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List def gelu(x): return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) def geglu(x: torch.Tensor, gate: torch.Tensor): return gelu(x) * gate
null
141,250
import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List def get_available_activation_fns() -> List: return [ "relu", "relu_squared", "gelu", "tanh", "linear", "swiglu", "geglu", ]
null
141,251
from typing import Optional import torch from torch import nn as nn The provided code snippet includes necessary dependencies for implementing the `Embedding` function. Write a Python function `def Embedding( num_embeddings, embedding_dim, padding_idx, initialize_params_on_gpu=False, dtype: Optiona...
Returns an embedding initialized to normal(0, 1/sqrt(embedding_dim)) with the padding token embedding initialized to 0.
141,252
import functools import threading import weakref from contextlib import contextmanager from typing import Any, Dict, Generator, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm from fairscale.utils.c...
Makes :func:`is_checkpointing_disabled` return :data:`True` within a context.
141,253
import functools import threading import weakref from contextlib import contextmanager from typing import Any, Dict, Generator, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm from fairscale.utils.c...
Makes :func:`is_checkpointing` return :data:`True` within a context.
141,254
import functools import threading import weakref from contextlib import contextmanager from typing import Any, Dict, Generator, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm from fairscale.utils.c...
Makes :func:`is_recomputing` return :data:`True` within a context.
141,255
import functools import threading import weakref from contextlib import contextmanager from typing import Any, Dict, Generator, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm from fairscale.utils.c...
A friendlier wrapper for performing activation checkpointing. Compared to the PyTorch version, this version: - wraps an nn.Module, so that all subsequent calls will use checkpointing - handles keyword arguments in the forward - handles non-Tensor outputs from the forward - supports offloading activations to CPU Usage::...
141,256
import functools import threading import weakref from contextlib import contextmanager from typing import Any, Dict, Generator, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm from fairscale.utils.c...
null
141,257
import functools import threading import weakref from contextlib import contextmanager from typing import Any, Dict, Generator, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm from fairscale.utils.c...
null
141,258
import functools import threading import weakref from contextlib import contextmanager from typing import Any, Dict, Generator, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm from fairscale.utils.c...
Similar to torch.is_autocast_enabled, but compatible with torch 1.5.1
141,259
import functools import threading import weakref from contextlib import contextmanager from typing import Any, Dict, Generator, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from fairscale.nn.checkpoint.checkpoint_utils import patch_batchnorm from fairscale.utils.c...
Similar to torch.cuda.amp.autocast, but compatible with torch 1.5.1
141,261
import torch import torch.nn as nn from .learned_positional_embedding import LearnedPositionalEmbedding from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding...
null
141,262
import torch from .utils import ensure_divisibility _TENSOR_MODEL_PARALLEL_GROUP = None _PIPELINE_MODEL_PARALLEL_GROUP = None _MODEL_PARALLEL_GROUP = None _EMBEDDING_GROUP = None _POSITION_EMBEDDING_GROUP = None _DATA_PARALLEL_GROUP = None _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None _VIRTUAL_PIPELINE_MODEL_PARALLEL_WO...
Initialize model data parallel groups. Arguments: tensor_model_parallel_size: number of GPUs used for tensor model parallelism. pipeline_model_parallel_size: number of GPUs used for pipeline model parallelism. virtual_pipeline_model_parallel_size: number of virtual stages (interleaved pipeline). pipeline_model_parallel...
141,264
import torch from .utils import ensure_divisibility _TENSOR_MODEL_PARALLEL_GROUP = None _PIPELINE_MODEL_PARALLEL_GROUP = None _MODEL_PARALLEL_GROUP = None _EMBEDDING_GROUP = None _POSITION_EMBEDDING_GROUP = None _DATA_PARALLEL_GROUP = None The provided code snippet includes necessary dependencies for implementing the ...
Set the groups to none.
141,265
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim def get_tensor_model_parallel_gr...
All-reduce the input tensor across model parallel group.
141,266
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim def get_tensor_model_parallel_wo...
Split the tensor along its last dimension and keep the corresponding slice.
141,267
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim def get_tensor_model_parallel_wo...
Split the tensor along its first dimension and keep the corresponding slice.
141,268
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim def get_tensor_model_parallel_gr...
Gather tensors and concatinate along the last dimension.
141,269
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim def get_global_memory_buffer(): ...
Gather tensors and concatinate along the first dimension.
141,270
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim def get_tensor_model_parallel_gr...
Reduce-scatter the input tensor across model parallel group.
141,271
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim class _CopyToModelParallelRegion(...
null
141,272
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim class _ReduceFromModelParallelReg...
null
141,273
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim class _ScatterToModelParallelRegi...
null
141,274
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim class _GatherFromModelParallelReg...
null
141,275
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim class _ScatterToSequenceParallelR...
null
141,276
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim class _GatherFromSequenceParallel...
null
141,277
import os import torch from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank, ) from .utils import split_tensor_along_last_dim class _ReduceScatterToSequencePar...
null
141,278
import torch from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_rank, ) from .utils import VocabUtility class _VocabParallelCrossEntropy(torch.autograd.Function): def forward(ctx, vocab_parallel_logits, target): # Maximum value along vocab dimension across all GPUs....
Helper function for the cross entropy.
141,279
import contextlib import torch from torch import _C from torch.cuda import _lazy_call, device as device_ctx_manager from .initialize import ( get_data_parallel_rank, get_tensor_model_parallel_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, ) The provided code snippet includ...
Sets the random number generator state of the current GPU. Argumentss: new_state (torch.ByteTensor): The desired state This function is adapted from PyTorch repo (torch.cuda.set_rng_state) with a single change: the input state is not cloned. Cloning caused major performance issues for +4 GPU cases.
141,280
import contextlib import torch from torch import _C from torch.cuda import _lazy_call, device as device_ctx_manager from .initialize import ( get_data_parallel_rank, get_tensor_model_parallel_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, ) def get_tensor_model_parallel_wo...
Break a tensor into equal 1D chunks.
141,281
import contextlib import torch from torch import _C from torch.cuda import _lazy_call, device as device_ctx_manager from .initialize import ( get_data_parallel_rank, get_tensor_model_parallel_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, ) def get_tensor_model_parallel_gr...
Opposite of above function, gather values from model parallel ranks.
141,282
import contextlib import torch from torch import _C from torch.cuda import _lazy_call, device as device_ctx_manager from .initialize import ( get_data_parallel_rank, get_tensor_model_parallel_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, ) _MODEL_PARALLEL_RNG_TRACKER_NAME ...
Initialize model parallel cuda seed. This function should be called after the model parallel is initialized. Also, no torch.cuda.manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: default state: This is for data parallelism and is the s...
141,283
import torch import torch.nn.functional as F import torch.nn.init as init from torch.nn.parameter import Parameter from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_wo...
Initialize affine weight for model parallel on GPU.
141,284
import torch import torch.nn.functional as F import torch.nn.init as init from torch.nn.parameter import Parameter from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_wo...
Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk.
141,285
import torch import torch.nn.functional as F import torch.nn.init as init from torch.nn.parameter import Parameter from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_wo...
Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk.
141,286
import torch import torch.nn.functional as F import torch.nn.init as init from torch.nn.parameter import Parameter from metaseq.modules.megatron.global_vars import get_global_memory_buffer from .initialize import ( get_tensor_model_parallel_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_wo...
Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk.
141,287
import operator from functools import reduce import torch _GLOBAL_ARGS = None def _ensure_var_is_initialized(var, name): """Make sure the input variable is not None.""" assert var is not None, "{} is not initialized.".format(name) The provided code snippet includes necessary dependencies for implementing the `...
Return arguments.
141,288
from torch import nn as nn from metaseq.modules import Linear The provided code snippet includes necessary dependencies for implementing the `FeedForward` function. Write a Python function `def FeedForward(x, fc1, activation_fn, fc2, dropout_module)` to solve the following problem: Feedforward network consisting of tw...
Feedforward network consisting of two linear layers (fc1, fc2), where activation_fn is applied between the two layers and dropout_module is applied at the end.
141,289
import ast import collections import logging import os import re import socket from typing import Any, Dict, List, Optional, Tuple import math import torch from omegaconf import OmegaConf from metaseq.dataclass.configs import CheckpointConfig from metaseq.dataclass.utils import overwrite_args_by_name, overwrite_keys_no...
Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``.
141,290
import ast import collections import logging import os import re import socket from typing import Any, Dict, List, Optional, Tuple import math import torch from omegaconf import OmegaConf from metaseq.dataclass.configs import CheckpointConfig from metaseq.dataclass.utils import overwrite_args_by_name, overwrite_keys_no...
Retrieves all checkpoints found in `path` directory. Checkpoints are identified by matching filename to the specified pattern. If the pattern contains groups, the result will be sorted by the first group in descending order.
141,291
import ast import collections import logging import os import re import socket from typing import Any, Dict, List, Optional, Tuple import math import torch from omegaconf import OmegaConf from metaseq.dataclass.configs import CheckpointConfig from metaseq.dataclass.utils import overwrite_args_by_name, overwrite_keys_no...
null
141,292
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,293
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored.
141,294
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,295
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
FP16-compatible function that fills a tensor with -inf.
141,296
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
Resolve max position constraints from multiple sources.
141,297
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,298
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,299
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,300
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,301
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,302
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,303
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,304
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,305
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
Convert a tensor x into the desired dtype. Also sanity checks combinations of options.
141,306
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,307
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,308
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,309
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
Init method based on N(0, sigma).
141,310
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
Init method based on N(0, sigma/sqrt(2*num_layers).
141,311
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,312
import copy import importlib import logging import math import os import random import re import sys import warnings from itertools import accumulate from typing import List, Optional import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from metaseq.distributed import utils a...
null
141,313
from metaseq.logging.progress_bar.base_progress_bar import ( BaseProgressBar, logger, ) def get_aim_run(repo, run_hash): from aim import Run return Run(run_hash=run_hash, repo=repo)
null