id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
178,946 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
... | The wrapper function for :func:`F.cross_entropy` |
178,947 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_sha... | Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". ... |
178,948 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
The provided code snippet includes necessary dependencies for implementing the `mask_cross_entropy` function. Write a Python function `def mask_cross_entropy(pred, ... | Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask' corresponding object. This will be used to select the ma... |
178,949 | import warnings
from mmcv.utils import Registry, build_from_cfg
from torch import nn
BACKBONES = Registry('backbone')
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
registr... | Build backbone. |
178,950 | import warnings
from mmcv.utils import Registry, build_from_cfg
from torch import nn
NECKS = Registry('neck')
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
registry (:obj:... | Build neck. |
178,951 | import warnings
from mmcv.utils import Registry, build_from_cfg
from torch import nn
HEADS = Registry('head')
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
registry (:obj:... | Build head. |
178,952 | import warnings
from mmcv.utils import Registry, build_from_cfg
from torch import nn
LOSSES = Registry('loss')
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
registry (:obj... | Build loss. |
178,953 | import warnings
from mmcv.utils import Registry, build_from_cfg
from torch import nn
SEGMENTORS = Registry('segmentor')
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, is is either a dict
or a list of configs.
regis... | Build segmentor. |
178,954 | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, normal_init
from mmcv.ops import point_sample
from mmseg.models.builder import HEADS
from mmseg.ops import resize
from ..losses import accuracy
from .cascade_decode_head import BaseCascadeDecodeHead
The provided code snippet includes necessary depende... | Estimate uncertainty based on seg logits. For each location of the prediction ``seg_logits`` we estimate uncertainty as the difference between top first and top second predicted logits. Args: seg_logits (Tensor): Semantic segmentation logits, shape (batch_size, num_classes, height, width). Returns: scores (Tensor): T u... |
178,955 | import math
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .decode_head import BaseDecodeHead
The provided code snippet includes necessary dependencies for implementing the `reduce_mean` function. Writ... | Reduce mean when distributed training. |
178,958 | import torch
import torch.nn as nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `drop_block_fast_2d` function. Write a Python function `def drop_block_fast_2d( x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: fl... | DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid block mask at edges. |
178,960 | import torch
import math
import warnings
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes st... | r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values wo... |
178,961 | from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmseg
The provided code snippet includes necessary dependencies for implementing the `collect_env` function. Write a Python function `def collect_env()` to solve the following problem:
Collect the information of the runni... | Collect the information of the running environments. |
178,962 | import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The ... | Print a log message. Args: msg (str): The message to be logged. logger (logging.Logger | str | None): The logger to be used. Some special loggers are: - "root": the root logger obtained with `get_root_logger()`. - "silent": no message will be printed. - None: The `print()` method will be used to print log messages. lev... |
178,963 | import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset
from m... | null |
178,964 | import argparse
from mmcv import Config
from mmcv.cnn import get_model_complexity_info
from mmcv.cnn.utils.flops_counter import flops_to_string, params_to_string
from mmseg.models import build_segmentor
import torch
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_... | null |
178,965 | import argparse
from mmcv import Config
from mmcv.cnn import get_model_complexity_info
from mmcv.cnn.utils.flops_counter import flops_to_string, params_to_string
from mmseg.models import build_segmentor
import torch
def sra_flops(h, w, r, dim, num_heads):
def get_tr_flops(net, input_shape):
flops, params = get_mod... | null |
178,966 | import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from IPython import embed
from collections import OrderedDict
def parse_args():
parser = argparse.ArgumentParser(description='Tr... | null |
178,967 | import argparse
from functools import partial
import mmcv
import numpy as np
import onnxruntime as rt
import torch
import torch._C
import torch.serialization
from mmcv.onnx import register_extra_symbolics
from mmcv.runner import load_checkpoint
from torch import nn
from mmseg.models import build_segmentor
torch.manual_... | null |
178,968 | import argparse
from functools import partial
import mmcv
import numpy as np
import onnxruntime as rt
import torch
import torch._C
import torch.serialization
from mmcv.onnx import register_extra_symbolics
from mmcv.runner import load_checkpoint
from torch import nn
from mmseg.models import build_segmentor
torch.manual_... | Export Pytorch model to ONNX model and verify the outputs are same between Pytorch and ONNX. Args: model (nn.Module): Pytorch model we want to export. input_shape (tuple): Use this input shape to construct the corresponding dummy input and execute the model. opset_version (int): The onnx op version. Default: 11. show (... |
178,969 | import argparse
from functools import partial
import mmcv
import numpy as np
import onnxruntime as rt
import torch
import torch._C
import torch.serialization
from mmcv.onnx import register_extra_symbolics
from mmcv.runner import load_checkpoint
from torch import nn
from mmseg.models import build_segmentor
def parse_ar... | null |
178,984 | import argparse
import time
import torch
from mmcv import Config
from mmcv.parallel import MMDataParallel
from mmcv.runner import load_checkpoint
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
def parse_args():
parser = argparse.ArgumentParser(description='MMSeg... | null |
178,989 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
178,990 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
178,991 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
178,992 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
178,993 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
178,994 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
178,995 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
178,996 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
178,997 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import time
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigp... | null |
179,003 | import math
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...modeling_... | null |
179,004 | import math
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...modeling_... | Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices ... |
179,012 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigpt4.common.re... | null |
179,014 | import argparse
import os
import random
from collections import defaultdict
import cv2
import re
import numpy as np
from PIL import Image
import torch
import html
import gradio as gr
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from minigpt4.common.config import Config
from minigpt4.common.re... | null |
179,051 | import gzip
import logging
import os
import random as rnd
import tarfile
import zipfile
import random
from typing import List
from tqdm import tqdm
import decord
from decord import VideoReader
import webdataset as wds
import numpy as np
import torch
from torch.utils.data.dataset import IterableDataset
from minigpt4.com... | null |
179,055 | import os
import json
import pickle
import random
import time
import itertools
import numpy as np
from PIL import Image
import skimage.io as io
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon, Rectangle
from torch.utils.data import Dataset
import... | null |
179,088 | import os
import logging
import contextlib
from omegaconf import OmegaConf
import numpy as np
import torch
import torch.nn as nn
from transformers import AutoTokenizer
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_int8_training,
)
from minigpt4.common.dist_utils import download_cached_fil... | Overwrite model.train with this function to make sure train/eval mode does not change anymore. |
179,089 | from __future__ import annotations
import math
from dataclasses import dataclass, field
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from einops import rearrange, repeat
from transformers import PretrainedConfig, PreTrainedModel
from transformers.activations import ACT2FN
from... | null |
179,090 | from __future__ import annotations
import math
from dataclasses import dataclass, field
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from einops import rearrange, repeat
from transformers import PretrainedConfig, PreTrainedModel
from transformers.activations import ACT2FN
from... | null |
179,091 | from __future__ import annotations
import math
from dataclasses import dataclass, field
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from einops import rearrange, repeat
from transformers import PretrainedConfig, PreTrainedModel
from transformers.activations import ACT2FN
from... | null |
179,092 | from __future__ import annotations
import math
from dataclasses import dataclass, field
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from einops import rearrange, repeat
from transformers import PretrainedConfig, PreTrainedModel
from transformers.activations import ACT2FN
from... | null |
179,093 | from __future__ import annotations
import math
from dataclasses import dataclass, field
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from einops import rearrange, repeat
from transformers import PretrainedConfig, PreTrainedModel
from transformers.activations import ACT2FN
from... | null |
179,152 | import argparse
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
from minigpt4.common.registry import registry
from minigpt4.common.config import Config
from minigpt4.datasets.builders import *
from minigpt4.models import *
from minigpt4.processors import *
from minigpt4.runners import *
from mini... | null |
179,174 | import cv2
import os
import torch
import torch.nn as nn
from torchvision.transforms import Compose
from .midas.dpt_depth import DPTDepthModel
from .midas.midas_net import MidasNet
from .midas.midas_net_custom import MidasNet_small
from .midas.transforms import Resize, NormalizeImage, PrepareForNet
from annotator.util i... | null |
179,296 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def load_video(folder: str, resize=(432, 7... | null |
179,297 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
class IMLP(nn.Module):
def __init__(
... | null |
179,298 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def get_grid_indices(x_start, y_start, h_cr... | null |
179,299 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def reconstruct_video_layer(uv_values, atl... | null |
179,300 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def get_grid_indices(x_start, y_start, h_cr... | null |
179,301 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def get_grid_indices(x_start, y_start, h_cr... | null |
179,302 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def get_atlas_crops(uv_values, grid_atlas,... | null |
179,303 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def get_random_crop_params(input_size, out... | null |
179,304 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def get_masks_boundaries(alpha_video, bord... | null |
179,305 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def get_atlas_bounding_box(mask_boundaries... | null |
179,306 | from PIL import Image
from pathlib import Path
import scipy.interpolate
import torch
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm import tqdm
import numpy as np
import cv2
from stablevideo.implicit_neural_networks import IMLP
def tensor2im(input_image, imtype=np.uint8... | null |
179,307 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad) | null |
179,308 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def positionalEncoding_vec(in_tensor, b):
proj = torch.einsum("ij, k -> ijk", in_tensor, b) # shape (batch, in_tensor.size(1), freqNum)
mapped_coords = torch.cat((torch.sin(proj), torch.cos(proj)), dim=1) # shape (batch, 2*... | null |
179,309 |
The provided code snippet includes necessary dependencies for implementing the `replace_with_custom_fn_if_matches_filter` function. Write a Python function `def replace_with_custom_fn_if_matches_filter( model, replacement_fn, filter_fn, cur_fqn='' ) -> None` to solve the following problem:
For each `child` in `mo... | For each `child` in `model`, replaces it with `replacement_fn(child)` if `filter_fn(child)` is `True` |
179,310 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
from segment_anything_fast.flash_4 import _attention_rel_h_rel_w
The provided code snippet includes necessary dependencies for implementing the `window_partition` functi... | Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition |
179,311 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
from segment_anything_fast.flash_4 import _attention_rel_h_rel_w
The provided code snippet includes necessary dependencies for implementing the `window_unpartition` func... | Window unpartition into original sequences and removing padding. Args: windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. pad_hw (Tuple): padded height and width (Hp, Wp). hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitio... |
179,312 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
from segment_anything_fast.flash_4 import _attention_rel_h_rel_w
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relativ... | Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 Args: attn (Tensor): attention map. q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Te... |
179,313 | import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
def build_sam_vit_h(checkpoint=None):
def _apply_eval_dtype_sam(model, dtype):
def build_sam_fast_vit_h(checkpoint=None, compile_mode='max-autotune', dtype=torch.bfloat16):
sam = bui... | null |
179,314 | import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5... | null |
179,315 | import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2,... | null |
179,316 | import torch
import triton
import triton.language as tl
import os
import pathlib
def _attention_rel_h_rel_w_kernel_aligned_meta(q, k, v, rel_h_w, sm_scale):
return q.contiguous() | null |
179,317 | import torch
import triton
import triton.language as tl
import os
import pathlib
def _autotune(configs, function):
import torch.utils.benchmark as benchmark
def benchmark_torch_function_in_microseconds(f, *args, **kwargs):
try:
f(*args, **kwargs)
t0 = benchmark.Timer(
... | null |
179,318 | import torch
import triton
import triton.language as tl
import os
import pathlib
USE_CUSTOM_KERNEL = bool(int(os.environ.get('SEGMENT_ANYTHING_FAST_USE_FLASH_4', 1)))
The provided code snippet includes necessary dependencies for implementing the `_attention_rel_h_rel_w` function. Write a Python function `def _attentio... | Writing this as a composite allows torch.compile to fuse the needed padding into previous operations and memory allocations. |
179,322 | import numpy as np
import torch
import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
The provided code snippet includes necessary dependencies for implementing the `mask_to_rle_pytorch_2` function. Write a Python function `def mask_to_rle_p... | Encodes masks to an uncompressed RLE, in the format expected by pycoco tools. |
179,334 | import pandas as pd
import fire
import matplotlib.pyplot as plt
import matplotlib
def make_row_chart(batch_size_idx, techniques, df, value_column, ax1, ax2, label, ylim_low, ylim_high, va, title="", relative=False, data_format=None):
category_column = "technique"
if not isinstance(ylim_low, tuple):
ylim... | null |
179,335 | import tqdm
import torch
import fire
from metrics import calculate_miou, create_result_entry
from data import build_data, setup_coco_img_ids
import math
import segment_anything_fast
def pad_to_batch_size(batch, batch_size, device):
assert batch.dim() == 4
# assert batch.is_pinned()
global PADDED_TENSOR
... | null |
179,336 | import tqdm
import torch
import fire
from metrics import calculate_miou, create_result_entry
from data import build_data, setup_coco_img_ids
import math
import segment_anything_fast
torch._dynamo.config.cache_size_limit = 50000
def build_results(batched_data_iter,
predictor,
mask_deb... | null |
179,337 | import numpy as np
import torch
import matplotlib.pyplot as plt
import cv2
import torch.utils.benchmark as benchmark
from segment_anything_fast import sam_model_registry, sam_model_fast_registry, SamAutomaticMaskGenerator
torch.cuda.synchronize()
torch.cuda.synchronize()
print(start_event.elapsed_time(end_event) / 10.)... | null |
179,338 | import numpy as np
import torch
import matplotlib.pyplot as plt
import cv2
import torch.utils.benchmark as benchmark
from segment_anything_fast import sam_model_registry, sam_model_fast_registry, SamAutomaticMaskGenerator
plt.figure(figsize=(image.shape[1]/100., image.shape[0]/100.), dpi=100)
plt.imshow(image)
plt.axis... | null |
179,339 | import torch
from torch import nn
from typing import Optional, Tuple, Union
import transformers
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, rotate_half
import math
STORE_KV_BEFORE_ROPE = False
USE_MEM_EFF_ATTENTION = False
def xformers_forward(
self,
hidden_states: torch.Tensor,
... | null |
179,340 | import torch
from torch import nn
from typing import Optional, Tuple, Union
import transformers
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, rotate_half
import math
ALPHA = 1.0
SCALING_FACTOR = None
def adaptive_ntk_init(self, dim, max_position_embeddings=2048, base=10000, device=None, sca... | null |
179,341 | from typing import Optional, Tuple
import torch
import transformers
from einops import rearrange
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
outpu... | null |
179,342 | import argparse
import json, os
DEFAULT_SYSTEM_PROMPT = """You are a helpful assistant. 你是一个乐于助人的助手。"""
TEMPLATE = (
"[INST] <<SYS>>\n"
"{system_prompt}\n"
"<</SYS>>\n\n"
"{instruction} [/INST]"
)
import torch
from transformers import AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizer
from transform... | null |
179,343 | import torch
from transformers import (
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
StoppingCriteria,
BitsAndBytesConfig,
GenerationConfig
)
import gradio as gr
import argparse
import os
from queue import Queue
from threading import Thread
import traceback
import gc
import json
impor... | null |
179,344 | import torch
from transformers import (
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
StoppingCriteria,
BitsAndBytesConfig,
GenerationConfig
)
import gradio as gr
import argparse
import os
from queue import Queue
from threading import Thread
import traceback
import gc
import json
impor... | null |
179,345 | import torch
from transformers import (
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
StoppingCriteria,
BitsAndBytesConfig,
GenerationConfig
)
import gradio as gr
import argparse
import os
from queue import Queue
from threading import Thread
import traceback
import gc
import json
impor... | null |
179,346 | import torch
from transformers import (
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
StoppingCriteria,
BitsAndBytesConfig,
GenerationConfig
)
import gradio as gr
import argparse
import os
from queue import Queue
from threading import Thread
import traceback
import gc
import json
impor... | null |
179,347 | import torch
from transformers import (
AutoModelForCausalLM,
LlamaForCausalLM,
LlamaTokenizer,
StoppingCriteria,
BitsAndBytesConfig,
GenerationConfig
)
import gradio as gr
import argparse
import os
from queue import Queue
from threading import Thread
import traceback
import gc
import json
impor... | null |
179,348 | import argparse
import asyncio
from http import HTTPStatus
import json
import time
from typing import AsyncGenerator, Dict, List, Optional
from packaging import version
import fastapi
from fastapi import BackgroundTasks, Request
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import C... | null |
179,349 | import argparse
import asyncio
from http import HTTPStatus
import json
import time
from typing import AsyncGenerator, Dict, List, Optional
from packaging import version
import fastapi
from fastapi import BackgroundTasks, Request
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import C... | null |
179,350 | import argparse
import asyncio
from http import HTTPStatus
import json
import time
from typing import AsyncGenerator, Dict, List, Optional
from packaging import version
import fastapi
from fastapi import BackgroundTasks, Request
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import C... | Show available models. Right now we only have one model. |
179,351 | import argparse
import asyncio
from http import HTTPStatus
import json
import time
from typing import AsyncGenerator, Dict, List, Optional
from packaging import version
import fastapi
from fastapi import BackgroundTasks, Request
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import C... | Completion API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/chat/create for the API specification. This API mimics the OpenAI ChatCompletion API. NOTE: Currently we do not support the following features: - function_call (Users should implement this by themselves) - logit_bias (to be suppo... |
179,352 | import argparse
import asyncio
from http import HTTPStatus
import json
import time
from typing import AsyncGenerator, Dict, List, Optional
from packaging import version
import fastapi
from fastapi import BackgroundTasks, Request
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import C... | Completion API similar to OpenAI's API. See https://platform.openai.com/docs/api-reference/completions/create for the API specification. This API mimics the OpenAI Completion API. NOTE: Currently we do not support the following features: - echo (since the vLLM engine does not currently support getting the logprobs of p... |
179,353 | import argparse
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from threading import Thread
from sse_starlette.sse import EventSourceResponse
import torch
import torch.nn.functional as F
from transformers import (
AutoModelForCausalLM,
LlamaTokenizer,
... | Creates a completion for the chat message |
179,354 | import argparse
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from threading import Thread
from sse_starlette.sse import EventSourceResponse
import torch
import torch.nn.functional as F
from transformers import (
AutoModelForCausalLM,
LlamaTokenizer,
... | Creates a completion |
179,355 | import argparse
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from threading import Thread
from sse_starlette.sse import EventSourceResponse
import torch
import torch.nn.functional as F
from transformers import (
AutoModelForCausalLM,
LlamaTokenizer,
... | Creates text embedding |
179,356 | from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All, LlamaCpp
import os
import argparse
... | null |
179,358 | import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
... | r""" This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers` |
179,359 | import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
... | null |
179,360 | import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
... | null |
179,361 | import logging
import numpy as np
import math
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, List, Dict, Any, Mapping
from pathlib import Path
import datasets
import torch
from datasets import load_dataset, concatenate_datasets
import transformers
... | null |
179,364 | from .peft_model import (
PeftModel,
PeftModelForCausalLM,
PeftModelForSeq2SeqLM,
PeftModelForSequenceClassification,
PeftModelForTokenClassification,
)
from .tuners import LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig
from .utils import PromptLearningConfig
PEFT_TYPE_TO_CO... | Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. |
179,365 | from .peft_model import (
PeftModel,
PeftModelForCausalLM,
PeftModelForSeq2SeqLM,
PeftModelForSequenceClassification,
PeftModelForTokenClassification,
)
from .tuners import LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig
from .utils import PromptLearningConfig
MODEL_TYPE_TO_P... | Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. |
179,366 | from .config import PeftType
class PeftType(str, enum.Enum):
PROMPT_TUNING = "PROMPT_TUNING"
P_TUNING = "P_TUNING"
PREFIX_TUNING = "PREFIX_TUNING"
LORA = "LORA"
The provided code snippet includes necessary dependencies for implementing the `get_peft_model_state_dict` function. Write a Python function ... | Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provid... |
179,367 | from .config import PeftType
class PeftType(str, enum.Enum):
PROMPT_TUNING = "PROMPT_TUNING"
P_TUNING = "P_TUNING"
PREFIX_TUNING = "PREFIX_TUNING"
LORA = "LORA"
The provided code snippet includes necessary dependencies for implementing the `set_peft_model_state_dict` function. Write a Python function ... | Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.