id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
178,632 | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BitsAndBytesConfig, CLIPVisionModel
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_PATCH_TOKEN)
from .llava.model.language_model.llava_llama import (LlavaLlamaForCausalLM,
LlavaLlamaModel)
from .segment_anything import build_sam_vit_h
The provided code snippet includes necessary dependencies for implementing the `dice_loss` function. Write a Python function `def dice_loss( inputs: torch.Tensor, targets: torch.Tensor, num_masks: float, scale=1000, # 100000.0, eps=1e-6, )` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
Here is the function:
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
scale=1000, # 100000.0,
eps=1e-6,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1, 2)
targets = targets.flatten(1, 2)
numerator = 2 * (inputs / scale * targets).sum(-1)
denominator = (inputs / scale).sum(-1) + (targets / scale).sum(-1)
loss = 1 - (numerator + eps) / (denominator + eps)
loss = loss.sum() / (num_masks + 1e-8)
return loss | Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). |
178,633 | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BitsAndBytesConfig, CLIPVisionModel
from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_PATCH_TOKEN)
from .llava.model.language_model.llava_llama import (LlavaLlamaForCausalLM,
LlavaLlamaModel)
from .segment_anything import build_sam_vit_h
The provided code snippet includes necessary dependencies for implementing the `sigmoid_ce_loss` function. Write a Python function `def sigmoid_ce_loss( inputs: torch.Tensor, targets: torch.Tensor, num_masks: float, )` to solve the following problem:
Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor
Here is the function:
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
loss = loss.flatten(1, 2).mean(1).sum() / (num_masks + 1e-8)
return loss | Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor |
178,634 | from typing import Optional, Tuple, Type
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import LayerNorm2d, MLPBlock
The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition( x: torch.Tensor, window_size: int ) -> Tuple[torch.Tensor, Tuple[int, int]]` to solve the following problem:
Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition
Here is the function:
def window_partition(
x: torch.Tensor, window_size: int
) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
)
return windows, (Hp, Wp) | Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition |
178,635 | from typing import Optional, Tuple, Type
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import LayerNorm2d, MLPBlock
The provided code snippet includes necessary dependencies for implementing the `window_unpartition` function. Write a Python function `def window_unpartition( windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int], ) -> torch.Tensor` to solve the following problem:
Window unpartition into original sequences and removing padding. Args: windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. pad_hw (Tuple): padded height and width (Hp, Wp). hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C].
Here is the function:
def window_unpartition(
windows: torch.Tensor,
window_size: int,
pad_hw: Tuple[int, int],
hw: Tuple[int, int],
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(
B, Hp // window_size, Wp // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x | Window unpartition into original sequences and removing padding. Args: windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. pad_hw (Tuple): padded height and width (Hp, Wp). hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C]. |
178,636 | from typing import Optional, Tuple, Type
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import LayerNorm2d, MLPBlock
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
The provided code snippet includes necessary dependencies for implementing the `add_decomposed_rel_pos` function. Write a Python function `def add_decomposed_rel_pos( attn: torch.Tensor, q: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> torch.Tensor` to solve the following problem:
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 Args: attn (Tensor): attention map. q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. q_size (Tuple): spatial sequence size of query q with (q_h, q_w). k_size (Tuple): spatial sequence size of key k with (k_h, k_w). Returns: attn (Tensor): attention map with added relative positional embeddings.
Here is the function:
def add_decomposed_rel_pos(
attn: torch.Tensor,
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
Args:
attn (Tensor): attention map.
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
attn (Tensor): attention map with added relative positional embeddings.
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn = (
attn.view(B, q_h, q_w, k_h, k_w)
+ rel_h[:, :, :, :, None]
+ rel_w[:, :, :, None, :]
).view(B, q_h * q_w, k_h * k_w)
return attn | Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 Args: attn (Tensor): attention map. q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. q_size (Tuple): spatial sequence size of query q with (q_h, q_w). k_size (Tuple): spatial sequence size of key k with (k_h, k_w). Returns: attn (Tensor): attention map with added relative positional embeddings. |
178,637 | from functools import partial
import torch
from .modeling import (ImageEncoderViT, MaskDecoder, PromptEncoder, Sam,
TwoWayTransformer)
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
sam.eval()
if checkpoint is not None:
with open(checkpoint, "rb") as f:
state_dict = torch.load(f)
sam.load_state_dict(state_dict, strict=False)
return sam
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
) | null |
178,638 | from functools import partial
import torch
from .modeling import (ImageEncoderViT, MaskDecoder, PromptEncoder, Sam,
TwoWayTransformer)
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
sam.eval()
if checkpoint is not None:
with open(checkpoint, "rb") as f:
state_dict = torch.load(f)
sam.load_state_dict(state_dict, strict=False)
return sam
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
) | null |
178,639 | from functools import partial
import torch
from .modeling import (ImageEncoderViT, MaskDecoder, PromptEncoder, Sam,
TwoWayTransformer)
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
sam.eval()
if checkpoint is not None:
with open(checkpoint, "rb") as f:
state_dict = torch.load(f)
sam.load_state_dict(state_dict, strict=False)
return sam
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
) | null |
178,640 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
# Check if boxes has a channel dimension
if len(boxes.shape) == 3:
offset = offset.unsqueeze(1)
return boxes + offset
The provided code snippet includes necessary dependencies for implementing the `is_box_near_crop_edge` function. Write a Python function `def is_box_near_crop_edge( boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 ) -> torch.Tensor` to solve the following problem:
Filter masks at the edge of a crop, but not at the edge of the original image.
Here is the function:
def is_box_near_crop_edge(
boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
) -> torch.Tensor:
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
return torch.any(near_crop_edge, dim=1) | Filter masks at the edge of a crop, but not at the edge of the original image. |
178,641 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
box_xywh = deepcopy(box_xyxy)
box_xywh[2] = box_xywh[2] - box_xywh[0]
box_xywh[3] = box_xywh[3] - box_xywh[1]
return box_xywh | null |
178,642 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
assert len(args) > 0 and all(
len(a) == len(args[0]) for a in args
), "Batched iteration must have inputs of all the same size."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches):
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] | null |
178,643 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `mask_to_rle_pytorch` function. Write a Python function `def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]` to solve the following problem:
Encodes masks to an uncompressed RLE, in the format expected by pycoco tools.
Here is the function:
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
"""
Encodes masks to an uncompressed RLE, in the format expected by
pycoco tools.
"""
# Put in fortran order and flatten h,w
b, h, w = tensor.shape
tensor = tensor.permute(0, 2, 1).flatten(1)
# Compute change indices
diff = tensor[:, 1:] ^ tensor[:, :-1]
change_indices = diff.nonzero()
# Encode run length
out = []
for i in range(b):
cur_idxs = change_indices[change_indices[:, 0] == i, 1]
cur_idxs = torch.cat(
[
torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
cur_idxs + 1,
torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
]
)
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
counts = [] if tensor[i, 0] == 0 else [0]
counts.extend(btw_idxs.detach().cpu().tolist())
out.append({"size": [h, w], "counts": counts})
return out | Encodes masks to an uncompressed RLE, in the format expected by pycoco tools. |
178,644 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `rle_to_mask` function. Write a Python function `def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray` to solve the following problem:
Compute a binary mask from an uncompressed RLE.
Here is the function:
def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
"""Compute a binary mask from an uncompressed RLE."""
h, w = rle["size"]
mask = np.empty(h * w, dtype=bool)
idx = 0
parity = False
for count in rle["counts"]:
mask[idx : idx + count] = parity
idx += count
parity ^= True
mask = mask.reshape(w, h)
return mask.transpose() # Put in C order | Compute a binary mask from an uncompressed RLE. |
178,645 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
def area_from_rle(rle: Dict[str, Any]) -> int:
return sum(rle["counts"][1::2]) | null |
178,646 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `calculate_stability_score` function. Write a Python function `def calculate_stability_score( masks: torch.Tensor, mask_threshold: float, threshold_offset: float ) -> torch.Tensor` to solve the following problem:
Computes the stability score for a batch of masks. The stability score is the IoU between the binary masks obtained by thresholding the predicted mask logits at high and low values.
Here is the function:
def calculate_stability_score(
masks: torch.Tensor, mask_threshold: float, threshold_offset: float
) -> torch.Tensor:
"""
Computes the stability score for a batch of masks. The stability
score is the IoU between the binary masks obtained by thresholding
the predicted mask logits at high and low values.
"""
# One mask is always contained inside the other.
# Save memory by preventing unnecessary cast to torch.int64
intersections = (
(masks > (mask_threshold + threshold_offset))
.sum(-1, dtype=torch.int16)
.sum(-1, dtype=torch.int32)
)
unions = (
(masks > (mask_threshold - threshold_offset))
.sum(-1, dtype=torch.int16)
.sum(-1, dtype=torch.int32)
)
return intersections / unions | Computes the stability score for a batch of masks. The stability score is the IoU between the binary masks obtained by thresholding the predicted mask logits at high and low values. |
178,647 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
def build_point_grid(n_per_side: int) -> np.ndarray:
"""Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
offset = 1 / (2 * n_per_side)
points_one_side = np.linspace(offset, 1 - offset, n_per_side)
points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
points_y = np.tile(points_one_side[:, None], (1, n_per_side))
points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
return points
The provided code snippet includes necessary dependencies for implementing the `build_all_layer_point_grids` function. Write a Python function `def build_all_layer_point_grids( n_per_side: int, n_layers: int, scale_per_layer: int ) -> List[np.ndarray]` to solve the following problem:
Generates point grids for all crop layers.
Here is the function:
def build_all_layer_point_grids(
n_per_side: int, n_layers: int, scale_per_layer: int
) -> List[np.ndarray]:
"""Generates point grids for all crop layers."""
points_by_layer = []
for i in range(n_layers + 1):
n_points = int(n_per_side / (scale_per_layer**i))
points_by_layer.append(build_point_grid(n_points))
return points_by_layer | Generates point grids for all crop layers. |
178,648 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `generate_crop_boxes` function. Write a Python function `def generate_crop_boxes( im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float ) -> Tuple[List[List[int]], List[int]]` to solve the following problem:
Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
Here is the function:
def generate_crop_boxes(
im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
) -> Tuple[List[List[int]], List[int]]:
"""
Generates a list of crop boxes of different sizes. Each layer
has (2**i)**2 boxes for the ith layer.
"""
crop_boxes, layer_idxs = [], []
im_h, im_w = im_size
short_side = min(im_h, im_w)
# Original image
crop_boxes.append([0, 0, im_w, im_h])
layer_idxs.append(0)
def crop_len(orig_len, n_crops, overlap):
return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
for i_layer in range(n_layers):
n_crops_per_side = 2 ** (i_layer + 1)
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
crop_w = crop_len(im_w, n_crops_per_side, overlap)
crop_h = crop_len(im_h, n_crops_per_side, overlap)
crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
# Crops in XYWH format
for x0, y0 in product(crop_box_x0, crop_box_y0):
box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
crop_boxes.append(box)
layer_idxs.append(i_layer + 1)
return crop_boxes, layer_idxs | Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. |
178,649 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0]], device=points.device)
# Check if points has a channel dimension
if len(points.shape) == 3:
offset = offset.unsqueeze(1)
return points + offset | null |
178,650 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
def uncrop_masks(
masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
) -> torch.Tensor:
x0, y0, x1, y1 = crop_box
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
return masks
# Coordinate transform masks
pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
pad = (x0, pad_x - x0, y0, pad_y - y0)
return torch.nn.functional.pad(masks, pad, value=0) | null |
178,651 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `remove_small_regions` function. Write a Python function `def remove_small_regions( mask: np.ndarray, area_thresh: float, mode: str ) -> Tuple[np.ndarray, bool]` to solve the following problem:
Removes small disconnected regions and holes in a mask. Returns the mask and an indicator of if the mask has been modified.
Here is the function:
def remove_small_regions(
mask: np.ndarray, area_thresh: float, mode: str
) -> Tuple[np.ndarray, bool]:
"""
Removes small disconnected regions and holes in a mask. Returns the
mask and an indicator of if the mask has been modified.
"""
import cv2 # type: ignore
assert mode in ["holes", "islands"]
correct_holes = mode == "holes"
working_mask = (correct_holes ^ mask).astype(np.uint8)
n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
sizes = stats[:, -1][1:] # Row 0 is background label
small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
if len(small_regions) == 0:
return mask, False
fill_labels = [0] + small_regions
if not correct_holes:
fill_labels = [i for i in range(n_labels) if i not in fill_labels]
# If every region is below threshold, keep largest
if len(fill_labels) == 0:
fill_labels = [int(np.argmax(sizes)) + 1]
mask = np.isin(regions, fill_labels)
return mask, True | Removes small disconnected regions and holes in a mask. Returns the mask and an indicator of if the mask has been modified. |
178,652 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
from pycocotools import mask as mask_utils # type: ignore
h, w = uncompressed_rle["size"]
rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
return rle | null |
178,653 | import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `batched_mask_to_box` function. Write a Python function `def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor` to solve the following problem:
Calculates boxes in XYXY format around masks. Return [0,0,0,0] for an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
Here is the function:
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
"""
Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
"""
# torch.max below raises an error on empty inputs, just skip in this case
if torch.numel(masks) == 0:
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
# Normalize shape to CxHxW
shape = masks.shape
h, w = shape[-2:]
if len(shape) > 2:
masks = masks.flatten(0, -3)
else:
masks = masks.unsqueeze(0)
# Get top and bottom edges
in_height, _ = torch.max(masks, dim=-1)
in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
in_height_coords = in_height_coords + h * (~in_height)
top_edges, _ = torch.min(in_height_coords, dim=-1)
# Get left and right edges
in_width, _ = torch.max(masks, dim=-2)
in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
right_edges, _ = torch.max(in_width_coords, dim=-1)
in_width_coords = in_width_coords + w * (~in_width)
left_edges, _ = torch.min(in_width_coords, dim=-1)
# If the mask is empty the right edge will be to the left of the left edge.
# Replace these boxes with [0, 0, 0, 0]
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
out = out * (~empty_filter).unsqueeze(-1)
# Return to original shape
if len(shape) > 2:
out = out.reshape(*shape[:-2], 4)
else:
out = out[0]
return out | Calculates boxes in XYXY format around masks. Return [0,0,0,0] for an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. |
178,654 | import datetime
import logging
import logging.handlers
import os
import sys
import requests
from llava.constants import LOGDIR
handler = None
class StreamToLogger(object):
def __init__(self, logger, log_level=logging.INFO):
def __getattr__(self, attr):
def write(self, buf):
def flush(self):
LOGDIR = "."
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Set the format of root handlers
if not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# Redirect stdout and stderr to loggers
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# Get logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
# Add a file handler for all loggers
if handler is None:
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(
filename, when="D", utc=True
)
handler.setFormatter(formatter)
for name, item in logging.root.manager.loggerDict.items():
if isinstance(item, logging.Logger):
item.addHandler(handler)
return logger | null |
178,655 | import datetime
import logging
import logging.handlers
import os
import sys
import requests
from llava.constants import LOGDIR
The provided code snippet includes necessary dependencies for implementing the `disable_torch_init` function. Write a Python function `def disable_torch_init()` to solve the following problem:
Disable the redundant torch default initialization to accelerate model creation.
Here is the function:
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None) | Disable the redundant torch default initialization to accelerate model creation. |
178,656 | import datetime
import logging
import logging.handlers
import os
import sys
import requests
from llava.constants import LOGDIR
The provided code snippet includes necessary dependencies for implementing the `violates_moderation` function. Write a Python function `def violates_moderation(text)` to solve the following problem:
Check whether the text violates OpenAI moderation API.
Here is the function:
def violates_moderation(text):
"""
Check whether the text violates OpenAI moderation API.
"""
url = "https://api.openai.com/v1/moderations"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + os.environ["OPENAI_API_KEY"],
}
text = text.replace("\n", "")
data = "{" + '"input": ' + f'"{text}"' + "}"
data = data.encode("utf-8")
try:
ret = requests.post(url, headers=headers, data=data, timeout=5)
flagged = ret.json()["results"][0]["flagged"]
except requests.exceptions.RequestException as e:
flagged = False
except KeyError as e:
flagged = False
return flagged | Check whether the text violates OpenAI moderation API. |
178,658 | import copy
import json
import logging
import os
import pathlib
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Sequence
import torch
import transformers
from llava import conversation as conversation_lib
from llava.constants import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN, IGNORE_INDEX,
IMAGE_TOKEN_INDEX)
from llava.mm_utils import tokenizer_image_token
from llava.model import *
from llava.train.llava_trainer import LLaVATrainer
from PIL import Image
from torch.utils.data import Dataset
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
lazy_preprocess: bool = False
is_multimodal: bool = False
image_folder: Optional[str] = field(default=None)
image_aspect_ratio: str = "square"
image_grid_pinpoints: Optional[str] = field(default=None)
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
def preprocess_multimodal(sources: Sequence[str], data_args: DataArguments) -> Dict:
is_multimodal = data_args.is_multimodal
if not is_multimodal:
return sources
for source in sources:
for sentence in source:
if DEFAULT_IMAGE_TOKEN in sentence["value"]:
sentence["value"] = (
sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip()
)
sentence["value"] = DEFAULT_IMAGE_TOKEN + "\n" + sentence["value"]
sentence["value"] = sentence["value"].strip()
if "mmtag" in conversation_lib.default_conversation.version:
sentence["value"] = sentence["value"].replace(
DEFAULT_IMAGE_TOKEN,
"<Image>" + DEFAULT_IMAGE_TOKEN + "</Image>",
)
replace_token = DEFAULT_IMAGE_TOKEN
if data_args.mm_use_im_start_end:
replace_token = (
DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
)
sentence["value"] = sentence["value"].replace(
DEFAULT_IMAGE_TOKEN, replace_token
)
return sources | null |
178,659 | import copy
import json
import logging
import os
import pathlib
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Sequence
import torch
import transformers
from llava import conversation as conversation_lib
from llava.constants import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN, IGNORE_INDEX,
IMAGE_TOKEN_INDEX)
from llava.mm_utils import tokenizer_image_token
from llava.model import *
from llava.train.llava_trainer import LLaVATrainer
from PIL import Image
from torch.utils.data import Dataset
def _tokenize_fn(
strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer
) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
for text in strings
]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def _mask_targets(target, tokenized_lens, speakers):
# cur_idx = 0
cur_idx = tokenized_lens[0]
tokenized_lens = tokenized_lens[1:]
target[:cur_idx] = IGNORE_INDEX
for tokenized_len, speaker in zip(tokenized_lens, speakers):
if speaker == "human":
target[cur_idx + 2 : cur_idx + tokenized_len] = IGNORE_INDEX
cur_idx += tokenized_len
def _add_speaker_and_signal(header, source, get_conversation=True):
"""Add speaker and start/end signal on each round."""
BEGIN_SIGNAL = "### "
END_SIGNAL = "\n"
conversation = header
for sentence in source:
from_str = sentence["from"]
if from_str.lower() == "human":
from_str = conversation_lib.default_conversation.roles[0]
elif from_str.lower() == "gpt":
from_str = conversation_lib.default_conversation.roles[1]
else:
from_str = "unknown"
sentence["value"] = (
BEGIN_SIGNAL + from_str + ": " + sentence["value"] + END_SIGNAL
)
if get_conversation:
conversation += sentence["value"]
conversation += BEGIN_SIGNAL
return conversation
def preprocess_llama_2(
sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False
) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
if has_image:
input_ids = torch.stack(
[
tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
for prompt in conversations
],
dim=0,
)
else:
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.LLAMA_2
# Mask targets
sep = "[/INST] "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
if has_image:
round_len = len(tokenizer_image_token(rou, tokenizer))
instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
else:
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
)
def preprocess_v1(
sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False
) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
if has_image:
input_ids = torch.stack(
[
tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
for prompt in conversations
],
dim=0,
)
else:
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
# Mask targets
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
if has_image:
round_len = len(tokenizer_image_token(rou, tokenizer))
instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
else:
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
)
def preprocess_mpt(
sources,
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
input_ids = torch.stack(
[
tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
for prompt in conversations
],
dim=0,
)
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.MPT
# Mask targets
sep = conv.sep + conv.roles[1]
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep)
re_rounds = [conv.sep.join(rounds[:3])] # system + user + gpt
for conv_idx in range(3, len(rounds), 2):
re_rounds.append(
conv.sep.join(rounds[conv_idx : conv_idx + 2])
) # user + gpt
cur_len = 0
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(re_rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
round_len = len(tokenizer_image_token(rou, tokenizer)) + len(
tokenizer_image_token(conv.sep, tokenizer)
)
instruction_len = len(tokenizer_image_token(parts[0], tokenizer))
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
)
def preprocess_plain(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
# add end signal and concatenate together
conversations = []
for source in sources:
assert len(source) == 2
assert DEFAULT_IMAGE_TOKEN in source[0]["value"]
source[0]["value"] = DEFAULT_IMAGE_TOKEN
conversation = (
source[0]["value"]
+ source[1]["value"]
+ conversation_lib.default_conversation.sep
)
conversations.append(conversation)
# tokenize conversations
input_ids = [
tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
for prompt in conversations
]
targets = copy.deepcopy(input_ids)
for target, source in zip(targets, sources):
tokenized_len = len(tokenizer_image_token(source[0]["value"], tokenizer))
target[:tokenized_len] = IGNORE_INDEX
return dict(input_ids=input_ids, labels=targets)
def tokenizer_image_token(
prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None
):
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split("<image>")]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if (
len(prompt_chunks) > 0
and len(prompt_chunks[0]) > 0
and prompt_chunks[0][0] == tokenizer.bos_token_id
):
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == "pt":
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f"Unsupported tensor type: {return_tensors}")
return input_ids
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess( sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False, ) -> Dict` to solve the following problem:
Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
Here is the function:
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
has_image: bool = False,
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
if (
conversation_lib.default_conversation.sep_style
== conversation_lib.SeparatorStyle.PLAIN
):
return preprocess_plain(sources, tokenizer)
if (
conversation_lib.default_conversation.sep_style
== conversation_lib.SeparatorStyle.LLAMA_2
):
return preprocess_llama_2(sources, tokenizer, has_image=has_image)
if conversation_lib.default_conversation.version.startswith("v1"):
return preprocess_v1(sources, tokenizer, has_image=has_image)
if conversation_lib.default_conversation.version == "mpt":
return preprocess_mpt(sources, tokenizer)
# add end signal and concatenate together
conversations = []
for source in sources:
header = f"{conversation_lib.default_conversation.system}\n\n"
conversation = _add_speaker_and_signal(header, source)
conversations.append(conversation)
# tokenize conversations
def get_tokenize_len(prompts):
return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]
if has_image:
input_ids = [
tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
for prompt in conversations
]
else:
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized["input_ids"]
targets = copy.deepcopy(input_ids)
for target, source in zip(targets, sources):
if has_image:
tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source])
else:
tokenized_lens = _tokenize_fn(
[header] + [s["value"] for s in source], tokenizer
)["input_ids_lens"]
speakers = [sentence["from"] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
return dict(input_ids=input_ids, labels=targets) | Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. |
178,660 | import copy
import json
import logging
import os
import pathlib
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Sequence
import torch
import transformers
from llava import conversation as conversation_lib
from llava.constants import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN, IGNORE_INDEX,
IMAGE_TOKEN_INDEX)
from llava.mm_utils import tokenizer_image_token
from llava.model import *
from llava.train.llava_trainer import LLaVATrainer
from PIL import Image
from torch.utils.data import Dataset
local_rank = None
def rank0_print(*args):
class ModelArguments:
class DataArguments:
class TrainingArguments(transformers.TrainingArguments):
def get_peft_state_maybe_zero_3(named_params, bias):
def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
def find_all_linear_names(model):
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args
) -> Dict:
class LLaVATrainer(Trainer):
def _save_checkpoint(self, model, trial, metrics=None):
def _save(self, output_dir: Optional[str] = None, state_dict=None):
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments)
)
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
local_rank = training_args.local_rank
compute_dtype = (
torch.float16
if training_args.fp16
else (torch.bfloat16 if training_args.bf16 else torch.float32)
)
bnb_model_from_pretrained_args = {}
if training_args.bits in [4, 8]:
from transformers import BitsAndBytesConfig
bnb_model_from_pretrained_args.update(
dict(
device_map={"": training_args.device},
load_in_4bit=training_args.bits == 4,
load_in_8bit=training_args.bits == 8,
quantization_config=BitsAndBytesConfig(
load_in_4bit=training_args.bits == 4,
load_in_8bit=training_args.bits == 8,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=training_args.double_quant,
bnb_4bit_quant_type=training_args.quant_type, # {'fp4', 'nf4'}
),
)
)
if model_args.vision_tower is not None:
if "mpt" in model_args.model_name_or_path:
config = transformers.AutoConfig.from_pretrained(
model_args.model_name_or_path, trust_remote_code=True
)
config.attn_config["attn_impl"] = training_args.mpt_attn_impl
model = LlavaMPTForCausalLM.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args,
)
else:
model = LlavaLlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args,
)
else:
model = transformers.LlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args,
)
model.config.use_cache = False
if model_args.freeze_backbone:
model.model.requires_grad_(False)
if training_args.bits in [4, 8]:
from peft import prepare_model_for_kbit_training
model.config.torch_dtype = (
torch.float32
if training_args.fp16
else (torch.bfloat16 if training_args.bf16 else torch.float32)
)
model = prepare_model_for_kbit_training(
model, use_gradient_checkpointing=training_args.gradient_checkpointing
)
if training_args.gradient_checkpointing:
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
if training_args.lora_enable:
from peft import LoraConfig, get_peft_model
lora_config = LoraConfig(
r=training_args.lora_r,
lora_alpha=training_args.lora_alpha,
target_modules=find_all_linear_names(model),
lora_dropout=training_args.lora_dropout,
bias=training_args.lora_bias,
task_type="CAUSAL_LM",
)
if training_args.bits == 16:
if training_args.bf16:
model.to(torch.bfloat16)
if training_args.fp16:
model.to(torch.float16)
rank0_print("Adding LoRA adapters...")
model = get_peft_model(model, lora_config)
if "mpt" in model_args.model_name_or_path:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
)
else:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
if model_args.version == "v0":
if tokenizer.pad_token is None:
smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token="[PAD]"),
tokenizer=tokenizer,
model=model,
)
elif model_args.version == "v0.5":
tokenizer.pad_token = tokenizer.unk_token
else:
tokenizer.pad_token = tokenizer.unk_token
if model_args.version in conversation_lib.conv_templates:
conversation_lib.default_conversation = conversation_lib.conv_templates[
model_args.version
]
else:
conversation_lib.default_conversation = conversation_lib.conv_templates[
"vicuna_v1"
]
if model_args.vision_tower is not None:
model.get_model().initialize_vision_modules(
model_args=model_args, fsdp=training_args.fsdp
)
vision_tower = model.get_vision_tower()
vision_tower.to(dtype=torch.float16, device=training_args.device)
data_args.image_processor = vision_tower.image_processor
data_args.is_multimodal = True
model.config.image_aspect_ratio = data_args.image_aspect_ratio
model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
model.config.tune_mm_mlp_adapter = (
training_args.tune_mm_mlp_adapter
) = model_args.tune_mm_mlp_adapter
if model_args.tune_mm_mlp_adapter:
model.requires_grad_(False)
for p in model.get_model().mm_projector.parameters():
p.requires_grad = True
model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter
if training_args.freeze_mm_mlp_adapter:
for p in model.get_model().mm_projector.parameters():
p.requires_grad = False
if training_args.bits in [4, 8]:
model.get_model().mm_projector.to(
dtype=compute_dtype, device=training_args.device
)
model.config.mm_use_im_start_end = (
data_args.mm_use_im_start_end
) = model_args.mm_use_im_start_end
training_args.use_im_start_end = model_args.mm_use_im_start_end
model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token
model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)
if training_args.bits in [4, 8]:
from peft.tuners.lora import LoraLayer
for name, module in model.named_modules():
if isinstance(module, LoraLayer):
if training_args.bf16:
module = module.to(torch.bfloat16)
if "norm" in name:
module = module.to(torch.float32)
if "lm_head" in name or "embed_tokens" in name:
if hasattr(module, "weight"):
if training_args.bf16 and module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
trainer = LLaVATrainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
model.config.use_cache = True
if training_args.lora_enable:
state_dict = get_peft_state_maybe_zero_3(
model.named_parameters(), training_args.lora_bias
)
non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(
model.named_parameters()
)
if training_args.local_rank == 0 or training_args.local_rank == -1:
model.config.save_pretrained(training_args.output_dir)
model.save_pretrained(training_args.output_dir, state_dict=state_dict)
torch.save(
non_lora_state_dict,
os.path.join(training_args.output_dir, "non_lora_trainables.bin"),
)
else:
safe_save_model_for_hf_trainer(
trainer=trainer, output_dir=training_args.output_dir
) | null |
178,661 | import logging
from typing import List, Optional, Tuple
import torch
import transformers
from einops import rearrange
from torch import nn
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
from flash_attn.bert_padding import pad_input, unpad_input
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel
attention_mask: [bsz, q_len]
"""
bsz, q_len, _ = hidden_states.size()
query_states = (
self.q_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
key_states = (
self.k_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
value_states = (
self.v_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
# [bsz, q_len, nh, hd]
# [bsz, nh, q_len, hd]
kv_seq_len = key_states.shape[-2]
assert past_key_value is None, "past_key_value is not supported"
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin, position_ids
)
# [bsz, nh, t, hd]
assert not output_attentions, "output_attentions is not supported"
assert not use_cache, "use_cache is not supported"
# Flash attention codes from
# https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attention.py
# transform the data into the format required by flash attention
qkv = torch.stack(
[query_states, key_states, value_states], dim=2
) # [bsz, nh, 3, q_len, hd]
qkv = qkv.transpose(1, 3) # [bsz, q_len, 3, nh, hd]
# We have disabled _prepare_decoder_attention_mask in LlamaModel
# the attention_mask should be the same as the key_padding_mask
key_padding_mask = attention_mask
if key_padding_mask is None:
qkv = rearrange(qkv, "b s ... -> (b s) ...")
max_s = q_len
cu_q_lens = torch.arange(
0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device
)
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
)
output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
else:
nheads = qkv.shape[-2]
x = rearrange(qkv, "b s three h d -> b s (three h d)")
x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
x_unpad = rearrange(
x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads
)
output_unpad = flash_attn_unpadded_qkvpacked_func(
x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
)
output = rearrange(
pad_input(
rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len
),
"b s (h d) -> b s h d",
h=nheads,
)
return self.o_proj(rearrange(output, "b s h d -> b s (h d)")), None, None
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# [bsz, seq_len]
return attention_mask
def replace_llama_attn_with_flash_attn():
cuda_major, cuda_minor = torch.cuda.get_device_capability()
if cuda_major < 8:
logging.warning(
"Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward."
"ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593"
)
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (
_prepare_decoder_attention_mask
)
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward | null |
178,662 | import base64
from io import BytesIO
import torch
from PIL import Image
from transformers import StoppingCriteria
from .constants import IMAGE_TOKEN_INDEX
def load_image_from_base64(image):
return Image.open(BytesIO(base64.b64decode(image))) | null |
178,663 | import base64
from io import BytesIO
import torch
from PIL import Image
from transformers import StoppingCriteria
from .constants import IMAGE_TOKEN_INDEX
def process_images(images, image_processor, model_cfg):
return image_processor(images, return_tensors="pt")["pixel_values"] | null |
178,664 | import base64
from io import BytesIO
import torch
from PIL import Image
from transformers import StoppingCriteria
from .constants import IMAGE_TOKEN_INDEX
def get_model_name_from_path(model_path):
model_path = model_path.strip("/")
model_paths = model_path.split("/")
if model_paths[-1].startswith("checkpoint-"):
return model_paths[-2] + "_" + model_paths[-1]
else:
return model_paths[-1] | null |
178,665 | from .clip_encoder import CLIPVisionTower
class CLIPVisionTower(nn.Module):
def __init__(self, vision_tower, args, delay_load=False):
super().__init__()
self.is_loaded = False
self.vision_tower_name = vision_tower
self.select_layer = args.mm_vision_select_layer
self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
if not delay_load:
self.load_model()
else:
self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)
def load_model(self):
self.image_processor = CLIPImageProcessor.from_pretrained(
self.vision_tower_name
)
self.vision_tower = CLIPVisionModel.from_pretrained(
self.vision_tower_name, low_cpu_mem_usage=True
)
self.vision_tower.requires_grad_(False)
self.is_loaded = True
def feature_select(self, image_forward_outs):
image_features = image_forward_outs.hidden_states[self.select_layer]
if self.select_feature == "patch":
image_features = image_features[:, 1:]
elif self.select_feature == "cls_patch":
image_features = image_features
else:
raise ValueError(f"Unexpected select feature: {self.select_feature}")
return image_features
def forward(self, images):
if type(images) is list:
image_features = []
for image in images:
image_forward_out = self.vision_tower(
image.to(device=self.device, dtype=self.dtype).unsqueeze(0),
output_hidden_states=True,
)
image_feature = self.feature_select(image_forward_out).to(image.dtype)
image_features.append(image_feature)
else:
image_forward_outs = self.vision_tower(
images.to(device=self.device, dtype=self.dtype),
output_hidden_states=True,
)
image_features = self.feature_select(image_forward_outs).to(images.dtype)
torch.cuda.empty_cache()
return image_features
def dummy_feature(self):
return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
def dtype(self):
return self.vision_tower.dtype
def device(self):
return self.vision_tower.device
def config(self):
if self.is_loaded:
return self.vision_tower.config
else:
return self.cfg_only
def hidden_size(self):
return self.config.hidden_size
def num_patches(self):
return (self.config.image_size // self.config.patch_size) ** 2
def build_vision_tower(vision_tower_cfg, **kwargs):
vision_tower = getattr(
vision_tower_cfg,
"mm_vision_tower",
getattr(vision_tower_cfg, "vision_tower", None),
)
if (
vision_tower.startswith("openai")
or vision_tower.startswith("laion")
or "clip" in vision_tower
):
return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
raise ValueError(f"Unknown vision tower: {vision_tower}") | null |
178,666 | import os
import shutil
import torch
from llava.constants import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_PATCH_TOKEN)
from llava.model import *
from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig)
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
def load_pretrained_model(
model_path,
model_base,
model_name,
load_8bit=False,
load_4bit=False,
device_map="auto",
):
kwargs = {"device_map": device_map}
if load_8bit:
kwargs["load_in_8bit"] = True
elif load_4bit:
kwargs["load_in_4bit"] = True
kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
)
else:
kwargs["torch_dtype"] = torch.float16
if "llava" in model_name.lower():
# Load LLaVA model
if "lora" in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print("Loading LLaVA from base model...")
model = LlavaLlamaForCausalLM.from_pretrained(
model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs
)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(
torch.empty(
token_num, tokem_dim, device=model.device, dtype=model.dtype
)
)
model.model.embed_tokens.weight = torch.nn.Parameter(
torch.empty(
token_num, tokem_dim, device=model.device, dtype=model.dtype
)
)
print("Loading additional LLaVA weights...")
if os.path.exists(os.path.join(model_path, "non_lora_trainables.bin")):
non_lora_trainables = torch.load(
os.path.join(model_path, "non_lora_trainables.bin"),
map_location="cpu",
)
else:
# this is probably from HF Hub
from huggingface_hub import hf_hub_download
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id, filename=filename, subfolder=subfolder
)
return torch.load(cache_file, map_location="cpu")
non_lora_trainables = load_from_hf(
model_path, "non_lora_trainables.bin"
)
non_lora_trainables = {
(k[11:] if k.startswith("base_model.") else k): v
for k, v in non_lora_trainables.items()
}
if any(k.startswith("model.model.") for k in non_lora_trainables):
non_lora_trainables = {
(k[6:] if k.startswith("model.") else k): v
for k, v in non_lora_trainables.items()
}
model.load_state_dict(non_lora_trainables, strict=False)
from peft import PeftModel
print("Loading LoRA weights...")
model = PeftModel.from_pretrained(model, model_path)
print("Merging LoRA weights...")
model = model.merge_and_unload()
print("Model is loaded...")
elif model_base is not None:
# this may be mm projector only
print("Loading LLaVA from base model...")
if "mpt" in model_name.lower():
if not os.path.isfile(os.path.join(model_path, "configuration_mpt.py")):
shutil.copyfile(
os.path.join(model_base, "configuration_mpt.py"),
os.path.join(model_path, "configuration_mpt.py"),
)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
cfg_pretrained = AutoConfig.from_pretrained(
model_path, trust_remote_code=True
)
model = LlavaMPTForCausalLM.from_pretrained(
model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs
)
else:
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
model = LlavaLlamaForCausalLM.from_pretrained(
model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs
)
mm_projector_weights = torch.load(
os.path.join(model_path, "mm_projector.bin"), map_location="cpu"
)
mm_projector_weights = {
k: v.to(torch.float16) for k, v in mm_projector_weights.items()
}
model.load_state_dict(mm_projector_weights, strict=False)
else:
if "mpt" in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = LlavaMPTForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **kwargs
)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = LlavaLlamaForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **kwargs
)
else:
# Load language model
if model_base is not None:
# PEFT model
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_base,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
device_map="auto",
)
print(f"Loading LoRA weights from {model_path}")
model = PeftModel.from_pretrained(model, model_path)
print(f"Merging weights")
model = model.merge_and_unload()
print("Convert to FP16...")
model.to(torch.float16)
else:
use_fast = False
if "mpt" in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs
)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **kwargs
)
image_processor = None
if "llava" in model_name.lower():
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens(
[DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
)
model.resize_token_embeddings(len(tokenizer))
vision_tower = model.get_vision_tower()
if not vision_tower.is_loaded:
vision_tower.load_model()
vision_tower.to(device="cuda", dtype=torch.float16)
image_processor = vision_tower.image_processor
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return tokenizer, model, image_processor, context_len | null |
178,667 | import argparse
import torch
from llava import LlavaLlamaForCausalLM
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
def apply_delta(base_model_path, target_model_path, delta_path):
print("Loading base model")
base = AutoModelForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print("Loading delta")
delta = LlavaLlamaForCausalLM.from_pretrained(
delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
print("Applying delta")
for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
if name not in base.state_dict():
assert name in [
"model.mm_projector.weight",
"model.mm_projector.bias",
], f"{name} not in base model"
continue
if param.data.shape == base.state_dict()[name].shape:
param.data += base.state_dict()[name]
else:
assert name in [
"model.embed_tokens.weight",
"lm_head.weight",
], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
bparam = base.state_dict()[name]
param.data[: bparam.shape[0], : bparam.shape[1]] += bparam
print("Saving target model")
delta.save_pretrained(target_model_path)
delta_tokenizer.save_pretrained(target_model_path) | null |
178,668 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def scaled_multihead_dot_product_attention(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
q = rearrange(query, "b s (h d) -> b h s d", h=n_heads)
kv_n_heads = 1 if multiquery else n_heads
k = rearrange(key, "b s (h d) -> b h d s", h=kv_n_heads)
v = rearrange(value, "b s (h d) -> b h s d", h=kv_n_heads)
if past_key_value is not None:
if len(past_key_value) != 0:
k = torch.cat([past_key_value[0], k], dim=3)
v = torch.cat([past_key_value[1], v], dim=2)
past_key_value = (k, v)
(b, _, s_q, d) = q.shape
s_k = k.size(-1)
if softmax_scale is None:
softmax_scale = 1 / math.sqrt(d)
attn_weight = q.matmul(k) * softmax_scale
if attn_bias is not None:
_s_q = max(0, attn_bias.size(2) - s_q)
_s_k = max(0, attn_bias.size(3) - s_k)
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if (
attn_bias.size(-1) != 1
and attn_bias.size(-1) != s_k
or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q)
):
raise RuntimeError(
f"attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}."
)
attn_weight = attn_weight + attn_bias
min_val = torch.finfo(q.dtype).min
if key_padding_mask is not None:
if attn_bias is not None:
warnings.warn(
"Propogating key_padding_mask to the attention module "
+ "and applying it within the attention module can cause "
+ "unneccessary computation/memory usage. Consider integrating "
+ "into attn_bias once and passing that to each attention "
+ "module instead."
)
attn_weight = attn_weight.masked_fill(
~key_padding_mask.view((b, 1, 1, s_k)), min_val
)
if is_causal and (not q.size(2) == 1):
s = max(s_q, s_k)
causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
causal_mask = causal_mask.tril()
causal_mask = causal_mask.to(torch.bool)
causal_mask = ~causal_mask
causal_mask = causal_mask[-s_q:, -s_k:]
attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
attn_weight = torch.softmax(attn_weight, dim=-1)
if dropout_p:
attn_weight = torch.nn.functional.dropout(
attn_weight, p=dropout_p, training=training, inplace=True
)
out = attn_weight.to(v.dtype).matmul(v)
out = rearrange(out, "b h s d -> b s (h d)")
if needs_weights:
return (out, attn_weight, past_key_value)
return (out, None, past_key_value) | null |
178,669 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def _reset_is_causal(
num_query_tokens: int, num_key_tokens: int, original_is_causal: bool
):
if original_is_causal and num_query_tokens != num_key_tokens:
if num_query_tokens != 1:
raise NotImplementedError(
"MPT does not support query and key with different number of tokens, unless number of query tokens is 1."
)
else:
return False
return original_is_causal
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
for tensor in tensors:
if tensor.dtype not in valid_dtypes:
raise TypeError(
f"tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}."
)
if not tensor.is_cuda:
raise TypeError(
f"Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r})."
)
def flash_attn_fn(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from flash_attn import bert_padding, flash_attn_interface
except:
raise RuntimeError("Please install flash-attn==1.0.3.post0")
check_valid_inputs(query, key, value)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if attn_bias is not None:
_s_q = max(0, attn_bias.size(2) - query.size(1))
_s_k = max(0, attn_bias.size(3) - key.size(1))
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if attn_bias is not None:
raise NotImplementedError(f"attn_bias not implemented for flash attn.")
(batch_size, seqlen) = query.shape[:2]
if key_padding_mask is None:
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
query_padding_mask = key_padding_mask[:, -query.size(1) :]
(query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(
query, query_padding_mask
)
query_unpad = rearrange(query_unpad, "nnz (h d) -> nnz h d", h=n_heads)
(key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(
key, key_padding_mask
)
key_unpad = rearrange(
key_unpad, "nnz (h d) -> nnz h d", h=1 if multiquery else n_heads
)
(value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
value_unpad = rearrange(
value_unpad, "nnz (h d) -> nnz h d", h=1 if multiquery else n_heads
)
if multiquery:
key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
value_unpad = value_unpad.expand(
value_unpad.size(0), n_heads, value_unpad.size(-1)
)
dropout_p = dropout_p if training else 0.0
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
output_unpad = flash_attn_interface.flash_attn_unpadded_func(
query_unpad,
key_unpad,
value_unpad,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale=softmax_scale,
causal=reset_is_causal,
return_attn_probs=needs_weights,
)
output = bert_padding.pad_input(
rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices_q, batch_size, seqlen
)
return (output, None, past_key_value) | null |
178,670 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def _reset_is_causal(
num_query_tokens: int, num_key_tokens: int, original_is_causal: bool
):
if original_is_causal and num_query_tokens != num_key_tokens:
if num_query_tokens != 1:
raise NotImplementedError(
"MPT does not support query and key with different number of tokens, unless number of query tokens is 1."
)
else:
return False
return original_is_causal
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
for tensor in tensors:
if tensor.dtype not in valid_dtypes:
raise TypeError(
f"tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}."
)
if not tensor.is_cuda:
raise TypeError(
f"Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r})."
)
)
)
)
flash_attn_func = FlashAttnFunc.apply
def triton_flash_attn_fn(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from .flash_attn_triton import flash_attn_func
except:
_installed = False
if version.parse(torch.__version__) < version.parse("2.0.0"):
_installed = True
try:
from flash_attn.flash_attn_triton import flash_attn_func
except:
_installed = False
if not _installed:
raise RuntimeError(
"Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed."
)
check_valid_inputs(query, key, value)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if attn_bias is not None:
_s_q = max(0, attn_bias.size(2) - query.size(1))
_s_k = max(0, attn_bias.size(3) - key.size(1))
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if dropout_p:
raise NotImplementedError(f"Dropout not implemented for attn_impl: triton.")
if needs_weights:
raise NotImplementedError(f"attn_impl: triton cannot return attn weights.")
if key_padding_mask is not None:
warnings.warn(
"Propagating key_padding_mask to the attention module "
+ "and applying it within the attention module can cause "
+ "unnecessary computation/memory usage. Consider integrating "
+ "into attn_bias once and passing that to each attention "
+ "module instead."
)
(b_size, s_k) = key_padding_mask.shape[:2]
if attn_bias is None:
attn_bias = query.new_zeros(b_size, 1, 1, s_k)
attn_bias = attn_bias.masked_fill(
~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min
)
query = rearrange(query, "b s (h d) -> b s h d", h=n_heads)
key = rearrange(key, "b s (h d) -> b s h d", h=1 if multiquery else n_heads)
value = rearrange(value, "b s (h d) -> b s h d", h=1 if multiquery else n_heads)
if multiquery:
key = key.expand(*key.shape[:2], n_heads, key.size(-1))
value = value.expand(*value.shape[:2], n_heads, value.size(-1))
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
attn_output = flash_attn_func(
query, key, value, attn_bias, reset_is_causal, softmax_scale
)
output = attn_output.view(*attn_output.shape[:2], -1)
return (output, None, past_key_value) | null |
178,671 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def attn_bias_shape(
attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id
):
if attn_impl == "flash":
return None
elif attn_impl in ["torch", "triton"]:
if alibi:
if (prefix_lm or not causal) or use_sequence_id:
return (1, n_heads, seq_len, seq_len)
return (1, n_heads, 1, seq_len)
elif prefix_lm or use_sequence_id:
return (1, 1, seq_len, seq_len)
return None
else:
raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.") | null |
178,672 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def build_alibi_bias(
n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None
):
alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(
1, 1, 1, seq_len
)
if full:
alibi_bias = alibi_bias - torch.arange(
1 - seq_len, 1, dtype=torch.int32, device=device
).view(1, 1, seq_len, 1)
alibi_bias = alibi_bias.abs().mul(-1)
slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
alibi_bias = alibi_bias * slopes
return alibi_bias.to(dtype=dtype)
def build_attn_bias(
attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8
):
if attn_impl == "flash":
return None
elif attn_impl in ["torch", "triton"]:
if alibi:
(device, dtype) = (attn_bias.device, attn_bias.dtype)
attn_bias = attn_bias.add(
build_alibi_bias(
n_heads,
seq_len,
full=not causal,
alibi_bias_max=alibi_bias_max,
device=device,
dtype=dtype,
)
)
return attn_bias
else:
raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.") | null |
178,676 | import math
import torch
import triton_pre_mlir as triton
import triton_pre_mlir.language as tl
def _fwd_kernel(
Q,
K,
V,
Bias,
Out,
Lse,
TMP,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_ob,
stride_oh,
stride_om,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
q_ptrs = (
Q
+ off_b * stride_qb
+ off_h * stride_qh
+ (offs_m[:, None] * stride_qm + offs_d[None, :])
)
k_ptrs = (
K
+ off_b * stride_kb
+ off_h * stride_kh
+ (offs_n[:, None] * stride_kn + offs_d[None, :])
)
v_ptrs = (
V
+ off_b * stride_vb
+ off_h * stride_vh
+ (offs_n[:, None] * stride_vn + offs_d[None, :])
)
if BIAS_TYPE == "vector":
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
elif BIAS_TYPE == "matrix":
b_ptrs = (
Bias
+ off_b * stride_bb
+ off_h * stride_bh
+ (offs_m[:, None] * stride_bm + offs_n[None, :])
)
t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M & EVEN_N:
if EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
elif EVEN_HEADDIM:
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
else:
q = tl.load(
q_ptrs,
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0,
)
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
for start_n in range(0, end_n, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn)
else:
k = tl.load(
k_ptrs + start_n * stride_kn,
mask=offs_d[None, :] < headdim,
other=0.0,
)
elif EVEN_HEADDIM:
k = tl.load(
k_ptrs + start_n * stride_kn,
mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0,
)
else:
k = tl.load(
k_ptrs + start_n * stride_kn,
mask=((start_n + offs_n)[:, None] < seqlen_k)
& (offs_d[None, :] < headdim),
other=0.0,
)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
if not EVEN_N:
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float("-inf"))
if IS_CAUSAL:
qk += tl.where(
offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float("-inf")
)
if BIAS_TYPE != "none":
if BIAS_TYPE == "vector":
if EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(
b_ptrs + start_n, mask=start_n + offs_n < seqlen_k, other=0.0
).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == "matrix":
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(
b_ptrs + start_n,
mask=(offs_m[:, None] < seqlen_q)
& ((start_n + offs_n)[None, :] < seqlen_k),
other=0.0,
).to(tl.float32)
qk = qk * softmax_scale + bias
m_ij = tl.maximum(tl.max(qk, 1), lse_i)
p = tl.exp(qk - m_ij[:, None])
else:
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
p = tl.exp(qk * softmax_scale - m_ij[:, None])
l_ij = tl.sum(p, 1)
acc_o_scale = tl.exp(m_i - m_ij)
tl.store(t_ptrs, acc_o_scale)
acc_o_scale = tl.load(t_ptrs)
acc_o = acc_o * acc_o_scale[:, None]
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn)
else:
v = tl.load(
v_ptrs + start_n * stride_vn,
mask=offs_d[None, :] < headdim,
other=0.0,
)
elif EVEN_HEADDIM:
v = tl.load(
v_ptrs + start_n * stride_vn,
mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0,
)
else:
v = tl.load(
v_ptrs + start_n * stride_vn,
mask=((start_n + offs_n)[:, None] < seqlen_k)
& (offs_d[None, :] < headdim),
other=0.0,
)
p = p.to(v.dtype)
acc_o += tl.dot(p, v)
m_i = m_ij
l_i_new = tl.exp(lse_i - m_ij) + l_ij
lse_i = m_ij + tl.log(l_i_new)
o_scale = tl.exp(m_i - lse_i)
tl.store(t_ptrs, o_scale)
o_scale = tl.load(t_ptrs)
acc_o = acc_o * o_scale[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
tl.store(lse_ptrs, lse_i)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = (
Out
+ off_b * stride_ob
+ off_h * stride_oh
+ (offs_m[:, None] * stride_om + offs_d[None, :])
)
if EVEN_M:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o)
else:
tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
elif EVEN_HEADDIM:
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
else:
tl.store(
out_ptrs,
acc_o,
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
)
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
(batch, seqlen_q, nheads, d) = q.shape
(_, seqlen_k, _, _) = k.shape
assert k.shape == (batch, seqlen_k, nheads, d)
assert v.shape == (batch, seqlen_k, nheads, d)
assert d <= 128, "FlashAttention only support head dimensions up to 128"
assert q.dtype == k.dtype == v.dtype, "All tensors must have the same type"
assert q.dtype in [torch.float16, torch.bfloat16], "Only support fp16 and bf16"
assert q.is_cuda and k.is_cuda and v.is_cuda
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
has_bias = bias is not None
bias_type = "none"
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
if bias.stride(-1) != 1:
bias = bias.contiguous()
if bias.shape[2:] == (1, seqlen_k):
bias_type = "vector"
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = "matrix"
else:
raise RuntimeError(
"Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)"
)
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
bias_strides = (
(bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
)
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
lse = torch.empty(
(batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32
)
tmp = torch.empty(
(batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32
)
o = torch.empty_like(q)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
BLOCK = 128
num_warps = 4 if d <= 64 else 8
grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
_fwd_kernel[grid](
q,
k,
v,
bias,
o,
lse,
tmp,
softmax_scale,
q.stride(0),
q.stride(2),
q.stride(1),
k.stride(0),
k.stride(2),
k.stride(1),
v.stride(0),
v.stride(2),
v.stride(1),
*bias_strides,
o.stride(0),
o.stride(2),
o.stride(1),
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
d,
seqlen_q // 32,
seqlen_k // 32,
bias_type,
causal,
BLOCK_HEADDIM,
BLOCK_M=BLOCK,
BLOCK_N=BLOCK,
num_warps=num_warps,
num_stages=1
)
return (o, lse, softmax_scale) | null |
178,677 | import math
import torch
import triton_pre_mlir as triton
import triton_pre_mlir.language as tl
def _bwd_preprocess_do_o_dot(
Out,
DO,
Delta,
stride_ob,
stride_oh,
stride_om,
stride_dob,
stride_doh,
stride_dom,
nheads,
seqlen_q,
seqlen_q_rounded,
headdim,
BLOCK_M: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
):
def _bwd_kernel(
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_dob,
stride_doh,
stride_dom,
stride_dqb,
stride_dqh,
stride_dqm,
stride_dkb,
stride_dkh,
stride_dkn,
stride_dvb,
stride_dvh,
stride_dvn,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
SEQUENCE_PARALLEL: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _flash_attn_backward(
do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None
):
if do.stride(-1) != 1:
do = do.contiguous()
(batch, seqlen_q, nheads, d) = q.shape
(_, seqlen_k, _, _) = k.shape
assert d <= 128
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
assert lse.shape == (batch, nheads, seqlen_q_rounded)
assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
dq_accum = torch.empty_like(q, dtype=torch.float32)
delta = torch.empty_like(lse)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
_bwd_preprocess_do_o_dot[grid](
o,
do,
delta,
o.stride(0),
o.stride(2),
o.stride(1),
do.stride(0),
do.stride(2),
do.stride(1),
nheads,
seqlen_q,
seqlen_q_rounded,
d,
BLOCK_M=128,
BLOCK_HEADDIM=BLOCK_HEADDIM,
)
has_bias = bias is not None
bias_type = "none"
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
assert bias.stride(-1) == 1
if bias.shape[2:] == (1, seqlen_k):
bias_type = "vector"
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = "matrix"
else:
raise RuntimeError(
"Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)"
)
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
bias_strides = (
(bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
)
grid = lambda META: (
triton.cdiv(seqlen_k, META["BLOCK_N"]) if META["SEQUENCE_PARALLEL"] else 1,
batch * nheads,
)
_bwd_kernel[grid](
q,
k,
v,
bias,
do,
dq_accum,
dk,
dv,
lse,
delta,
softmax_scale,
q.stride(0),
q.stride(2),
q.stride(1),
k.stride(0),
k.stride(2),
k.stride(1),
v.stride(0),
v.stride(2),
v.stride(1),
*bias_strides,
do.stride(0),
do.stride(2),
do.stride(1),
dq_accum.stride(0),
dq_accum.stride(2),
dq_accum.stride(1),
dk.stride(0),
dk.stride(2),
dk.stride(1),
dv.stride(0),
dv.stride(2),
dv.stride(1),
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
d,
seqlen_q // 32,
seqlen_k // 32,
bias_type,
causal,
BLOCK_HEADDIM
)
dq.copy_(dq_accum) | null |
178,678 | from contextlib import contextmanager
import torch
import torch.nn as nn
def init_on_device(device: torch.device, include_buffers: bool = False):
"""Device initialization context manager.
A context manager under which models are initialized with all parameters
on the specified device.
Args:
device (`torch.device`): Device to initialize all parameters on.
include_buffers (`bool`, *optional*, defaults to `False`): Whether or
not to also put all buffers on the meta device while initializing.
Example:
```python
import torch.nn as nn
with init_on_device(device=torch.device("cuda")):
tst = nn.Liner(100, 100) # on `cuda` device
```
"""
old_register_parameter = nn.Module.register_parameter
if include_buffers:
old_register_buffer = nn.Module.register_buffer
def register_empty_parameter(module, name, param):
old_register_parameter(module, name, param)
if param is not None:
param_cls = type(module._parameters[name])
kwargs = module._parameters[name].__dict__
module._parameters[name] = param_cls(
module._parameters[name].to(device), **kwargs
)
def register_empty_buffer(module, name, buffer):
old_register_buffer(module, name, buffer)
if buffer is not None:
module._buffers[name] = module._buffers[name].to(device)
if include_buffers:
tensor_constructors_to_patch = {
torch_function_name: getattr(torch, torch_function_name)
for torch_function_name in ["empty", "zeros", "ones", "full"]
}
else:
tensor_constructors_to_patch = {}
def patch_tensor_constructor(fn):
def wrapper(*args, **kwargs):
kwargs["device"] = device
return fn(*args, **kwargs)
return wrapper
try:
nn.Module.register_parameter = register_empty_parameter
if include_buffers:
nn.Module.register_buffer = register_empty_buffer
for torch_function_name in tensor_constructors_to_patch.keys():
setattr(
torch,
torch_function_name,
patch_tensor_constructor(getattr(torch, torch_function_name)),
)
yield
finally:
nn.Module.register_parameter = old_register_parameter
if include_buffers:
nn.Module.register_buffer = old_register_buffer
for (
torch_function_name,
old_torch_function,
) in tensor_constructors_to_patch.items():
setattr(torch, torch_function_name, old_torch_function)
The provided code snippet includes necessary dependencies for implementing the `init_empty_weights` function. Write a Python function `def init_empty_weights(include_buffers: bool = False)` to solve the following problem:
Meta initialization context manager. A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*, defaults to `False`): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. </Tip>
Here is the function:
def init_empty_weights(include_buffers: bool = False):
"""Meta initialization context manager.
A context manager under which models are initialized with all parameters
on the meta device, therefore creating an empty model. Useful when just
initializing the model would blow the available RAM.
Args:
include_buffers (`bool`, *optional*, defaults to `False`): Whether or
not to also put all buffers on the meta device while initializing.
Example:
```python
import torch.nn as nn
# Initialize a model with 100 billions parameters in no time and without using any RAM.
with init_empty_weights():
tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
```
<Tip warning={true}>
Any model created under this context manager has no weights. As such you can't do something like
`model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
</Tip>
"""
with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
yield f | Meta initialization context manager. A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*, defaults to `False`): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. </Tip> |
178,680 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def _normal_param_init_fn_(
module: nn.Module,
std: float,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
init_fn_ = _normal_init_(std=std)
if verbose > 1:
warnings.warn(f"Using torch.nn.init.normal_ init fn mean=0.0, std={std}")
generic_param_init_fn_(
module=module,
init_fn_=init_fn_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
)
def baseline_param_init_fn_(
module: nn.Module,
init_std: float,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if init_std is None:
raise ValueError(
"You must set model.init_config['init_std'] to a float value to use the default initialization scheme."
)
_normal_param_init_fn_(
module=module,
std=init_std,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
178,682 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(
init_div_is_residual, int
):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
div_is_residual = float(init_div_is_residual)
else:
div_is_residual = 1.0
raise ValueError(
f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}"
)
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(
f"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. "
+ f"Set `init_div_is_residual: false` in init config to disable this."
)
if isinstance(module, nn.Linear):
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(
f"Embedding layer initialized using normal distribution with mean=0 and std={std!r}."
)
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(
f"Uniform init requires a min and a max limit. User input: {lim}."
)
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
(a, b) = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(
f"Embedding layer initialized using uniform distribution in range {lim}."
)
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
if verbose > 1:
warnings.warn(
f"Norm weights are set to 1. If norm layer has a bias it is initialized to 0."
)
if hasattr(module, "weight") and module.weight is not None:
torch.nn.init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert (
module.q_proj_weight is None
and module.k_proj_weight is None
and (module.v_proj_weight is None)
)
assert d_model is not None
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert (
module.q_proj_weight is not None
and module.k_proj_weight is not None
and (module.v_proj_weight is not None)
)
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(
module.out_proj, "_is_residual", False
):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
raise NotImplementedError(
f"{module.__class__.__name__} parameters are not initialized by param_init_fn."
)
def kaiming_uniform_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
fan_mode: str = "fan_in",
init_nonlinearity: str = "leaky_relu",
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(
f"Using nn.init.kaiming_uniform_ init fn with parameters: "
+ f"a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}"
)
kaiming_uniform_ = partial(
nn.init.kaiming_uniform_,
a=init_gain,
mode=fan_mode,
nonlinearity=init_nonlinearity,
)
generic_param_init_fn_(
module=module,
init_fn_=kaiming_uniform_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
178,683 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(
init_div_is_residual, int
):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
div_is_residual = float(init_div_is_residual)
else:
div_is_residual = 1.0
raise ValueError(
f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}"
)
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(
f"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. "
+ f"Set `init_div_is_residual: false` in init config to disable this."
)
if isinstance(module, nn.Linear):
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(
f"Embedding layer initialized using normal distribution with mean=0 and std={std!r}."
)
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(
f"Uniform init requires a min and a max limit. User input: {lim}."
)
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
(a, b) = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(
f"Embedding layer initialized using uniform distribution in range {lim}."
)
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
if verbose > 1:
warnings.warn(
f"Norm weights are set to 1. If norm layer has a bias it is initialized to 0."
)
if hasattr(module, "weight") and module.weight is not None:
torch.nn.init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert (
module.q_proj_weight is None
and module.k_proj_weight is None
and (module.v_proj_weight is None)
)
assert d_model is not None
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert (
module.q_proj_weight is not None
and module.k_proj_weight is not None
and (module.v_proj_weight is not None)
)
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(
module.out_proj, "_is_residual", False
):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
raise NotImplementedError(
f"{module.__class__.__name__} parameters are not initialized by param_init_fn."
)
def kaiming_normal_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
fan_mode: str = "fan_in",
init_nonlinearity: str = "leaky_relu",
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(
f"Using nn.init.kaiming_normal_ init fn with parameters: "
+ f"a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}"
)
kaiming_normal_ = partial(
torch.nn.init.kaiming_normal_,
a=init_gain,
mode=fan_mode,
nonlinearity=init_nonlinearity,
)
generic_param_init_fn_(
module=module,
init_fn_=kaiming_normal_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
178,684 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
def xavier_uniform_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
verbose: int = 0,
**kwargs,
):
del kwargs
xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
if verbose > 1:
warnings.warn(
f"Using torch.nn.init.xavier_uniform_ init fn with parameters: "
+ f"gain={init_gain}"
)
generic_param_init_fn_(
module=module,
init_fn_=xavier_uniform_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
178,685 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(
init_div_is_residual, int
):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
div_is_residual = float(init_div_is_residual)
else:
div_is_residual = 1.0
raise ValueError(
f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}"
)
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(
f"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. "
+ f"Set `init_div_is_residual: false` in init config to disable this."
)
if isinstance(module, nn.Linear):
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(
f"Embedding layer initialized using normal distribution with mean=0 and std={std!r}."
)
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(
f"Uniform init requires a min and a max limit. User input: {lim}."
)
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
(a, b) = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(
f"Embedding layer initialized using uniform distribution in range {lim}."
)
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
if verbose > 1:
warnings.warn(
f"Norm weights are set to 1. If norm layer has a bias it is initialized to 0."
)
if hasattr(module, "weight") and module.weight is not None:
torch.nn.init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert (
module.q_proj_weight is None
and module.k_proj_weight is None
and (module.v_proj_weight is None)
)
assert d_model is not None
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert (
module.q_proj_weight is not None
and module.k_proj_weight is not None
and (module.v_proj_weight is not None)
)
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(
module.out_proj, "_is_residual", False
):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
raise NotImplementedError(
f"{module.__class__.__name__} parameters are not initialized by param_init_fn."
)
def xavier_normal_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
verbose: int = 0,
**kwargs,
):
xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain)
if verbose > 1:
warnings.warn(
f"Using torch.nn.init.xavier_normal_ init fn with parameters: "
+ f"gain={init_gain}"
)
generic_param_init_fn_(
module=module,
init_fn_=xavier_normal_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
178,686 | import math
import warnings
from types import MethodType
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from transformers.models.bloom.modeling_bloom import (
BaseModelOutputWithPastAndCrossAttentions, BloomForCausalLM, BloomModel,
CausalLMOutputWithCrossAttentions, CrossEntropyLoss)
from transformers.models.bloom.modeling_bloom import \
_expand_mask as _expand_mask_bloom
from transformers.models.bloom.modeling_bloom import \
_make_causal_mask as _make_causal_mask_bloom
from transformers.models.bloom.modeling_bloom import logging
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
from transformers.models.opt.modeling_opt import OPTForCausalLM
from transformers.models.opt.modeling_opt import \
_expand_mask as _expand_mask_opt
from transformers.models.opt.modeling_opt import \
_make_causal_mask as _make_causal_mask_opt
_SUPPORTED_GPT_MODELS = (
GPT2LMHeadModel,
GPTJForCausalLM,
GPTNeoForCausalLM,
GPTNeoXForCausalLM,
)
def _convert_gpt_causal_lm_to_prefix_lm(model: CAUSAL_GPT_TYPES) -> CAUSAL_GPT_TYPES:
"""Converts a GPT-style Causal LM to a Prefix LM.
Supported HuggingFace model classes:
- `GPT2LMHeadModel`
- `GPTNeoForCausalLM`
- `GPTNeoXForCausalLM`
- `GPTJForCausalLM`
See `convert_hf_causal_lm_to_prefix_lm` for more details.
"""
if hasattr(model, "_prefix_lm_converted"):
return model
assert isinstance(model, _SUPPORTED_GPT_MODELS)
assert (
model.config.add_cross_attention == False
), "Only supports GPT-style decoder-only models"
def _get_attn_modules(model: CAUSAL_GPT_TYPES) -> List[torch.nn.Module]:
"""Helper that gets a list of the model's attention modules.
Each module has a `bias` buffer used for causal masking. The Prefix LM
conversion adds logic to dynamically manipulate these biases to support
Prefix LM attention masking.
"""
attn_modules = []
if isinstance(model, GPTNeoXForCausalLM):
blocks = model.gpt_neox.layers
else:
blocks = model.transformer.h
for block in blocks:
if isinstance(model, GPTNeoForCausalLM):
if block.attn.attention_type != "global":
continue
attn_module = block.attn.attention
elif isinstance(model, GPTNeoXForCausalLM):
attn_module = block.attention
else:
attn_module = block.attn
attn_modules.append(attn_module)
return attn_modules
setattr(model, "_original_forward", getattr(model, "forward"))
setattr(model, "_original_generate", getattr(model, "generate"))
def forward(
self: CAUSAL_GPT_TYPES,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
bidirectional_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
"""Wraps original forward to enable PrefixLM attention."""
def call_og_forward():
if isinstance(self, GPTNeoXForCausalLM):
return self._original_forward(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
else:
return self._original_forward(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if bidirectional_mask is None:
return call_og_forward()
assert isinstance(bidirectional_mask, torch.Tensor)
attn_modules = _get_attn_modules(model)
(b, s) = bidirectional_mask.shape
max_length = attn_modules[0].bias.shape[-1]
if s > max_length:
raise ValueError(
f"bidirectional_mask sequence length (={s}) exceeds the "
+ f"max length allowed by the model ({max_length})."
)
assert s <= max_length
if s < max_length:
pad = torch.zeros(
(int(b), int(max_length - s)),
dtype=bidirectional_mask.dtype,
device=bidirectional_mask.device,
)
bidirectional_mask = torch.cat([bidirectional_mask, pad], dim=1)
bidirectional = bidirectional_mask.unsqueeze(1).unsqueeze(1)
for attn_module in attn_modules:
attn_module.bias.data = torch.logical_or(
attn_module.bias.data, bidirectional
)
output = call_og_forward()
for attn_module in attn_modules:
attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
return output
def generate(self: CAUSAL_GPT_TYPES, *args: tuple, **kwargs: Dict[str, Any]):
"""Wraps original generate to enable PrefixLM attention."""
attn_modules = _get_attn_modules(model)
for attn_module in attn_modules:
attn_module.bias.data[:] = 1
output = self._original_generate(*args, **kwargs)
for attn_module in attn_modules:
attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
return output
setattr(model, "forward", MethodType(forward, model))
setattr(model, "generate", MethodType(generate, model))
setattr(model, "_prefix_lm_converted", True)
return model
def _convert_bloom_causal_lm_to_prefix_lm(model: BloomForCausalLM) -> BloomForCausalLM:
"""Converts a BLOOM Causal LM to a Prefix LM.
Supported HuggingFace model classes:
- `BloomForCausalLM`
See `convert_hf_causal_lm_to_prefix_lm` for more details.
"""
if hasattr(model, "_prefix_lm_converted"):
return model
assert isinstance(model, BloomForCausalLM)
assert (
model.config.add_cross_attention == False
), "Only supports BLOOM decoder-only models"
def _prepare_attn_mask(
self: BloomModel,
attention_mask: torch.Tensor,
bidirectional_mask: Optional[torch.Tensor],
input_shape: Tuple[int, int],
past_key_values_length: int,
) -> torch.BoolTensor:
combined_attention_mask = None
device = attention_mask.device
(_, src_length) = input_shape
if src_length > 1:
combined_attention_mask = _make_causal_mask_bloom(
input_shape,
device=device,
past_key_values_length=past_key_values_length,
)
if bidirectional_mask is not None:
assert attention_mask.shape == bidirectional_mask.shape
expanded_bidirectional_mask = _expand_mask_bloom(
bidirectional_mask, tgt_length=src_length
)
combined_attention_mask = torch.logical_and(
combined_attention_mask, expanded_bidirectional_mask
)
expanded_attn_mask = _expand_mask_bloom(attention_mask, tgt_length=src_length)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask | combined_attention_mask
)
return combined_attention_mask
def _build_alibi_tensor(
self: BloomModel,
batch_size: int,
query_length: int,
key_length: int,
dtype: torch.dtype,
device: torch.device,
) -> torch.Tensor:
num_heads = self.config.n_head
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
base = torch.tensor(
2 ** (-(2 ** (-(math.log2(closest_power_of_2) - 3)))),
device=device,
dtype=torch.float32,
)
powers = torch.arange(
1, 1 + closest_power_of_2, device=device, dtype=torch.int32
)
slopes = torch.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = torch.tensor(
2 ** (-(2 ** (-(math.log2(2 * closest_power_of_2) - 3)))),
device=device,
dtype=torch.float32,
)
num_remaining_heads = min(
closest_power_of_2, num_heads - closest_power_of_2
)
extra_powers = torch.arange(
1, 1 + 2 * num_remaining_heads, 2, device=device, dtype=torch.int32
)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
qa = torch.arange(query_length, device=device, dtype=torch.int32).view(-1, 1)
ka = torch.arange(key_length, device=device, dtype=torch.int32).view(1, -1)
diffs = qa - ka + key_length - query_length
diffs = -diffs.abs()
alibi = slopes.view(1, num_heads, 1, 1) * diffs.view(
1, 1, query_length, key_length
)
alibi = alibi.expand(batch_size, -1, -1, -1).reshape(
-1, query_length, key_length
)
return alibi.to(dtype)
KeyValueT = Tuple[torch.Tensor, torch.Tensor]
def forward(
self: BloomModel,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[KeyValueT, ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
bidirectional_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
if deprecated_arguments.pop("position_ids", False) is not False:
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. "
+ "You can safely ignore passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
(batch_size, seq_length) = input_ids.shape
elif inputs_embeds is not None:
(batch_size, seq_length, _) = inputs_embeds.shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past_key_values is None:
past_key_values = tuple([None] * len(self.h))
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
hidden_states = self.word_embeddings_layernorm(inputs_embeds)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values[0] is not None:
tmp = past_key_values[0][0]
past_key_values_length = tmp.shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past), device=hidden_states.device
)
else:
attention_mask = attention_mask.to(hidden_states.device)
alibi = self._build_alibi_tensor(
batch_size=batch_size,
query_length=seq_length,
key_length=seq_length_with_past,
dtype=hidden_states.dtype,
device=hidden_states.device,
)
causal_mask = self._prepare_attn_mask(
attention_mask,
bidirectional_mask,
input_shape=(batch_size, seq_length),
past_key_values_length=past_key_values_length,
)
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
hst = (hidden_states,)
all_hidden_states = all_hidden_states + hst
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(
*inputs,
use_cache=use_cache,
output_attentions=output_attentions,
)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
alibi,
causal_mask,
head_mask[i],
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=causal_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
alibi=alibi,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
oa = (outputs[2 if use_cache else 1],)
all_self_attentions = all_self_attentions + oa
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
hst = (hidden_states,)
all_hidden_states = all_hidden_states + hst
if not return_dict:
return tuple(
(
v
for v in [
hidden_states,
presents,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
setattr(
model.transformer,
"_prepare_attn_mask",
MethodType(_prepare_attn_mask, model.transformer),
)
setattr(
model.transformer,
"_build_alibi_tensor",
MethodType(_build_alibi_tensor, model.transformer),
)
setattr(model.transformer, "forward", MethodType(forward, model.transformer))
KeyValueT = Tuple[torch.Tensor, torch.Tensor]
def forward(
self: BloomForCausalLM,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[KeyValueT, ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
bidirectional_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
"""Replacement forward method for BloomCausalLM."""
if deprecated_arguments.pop("position_ids", False) is not False:
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed "
+ "in v5.0.0. You can safely ignore passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
bidirectional_mask=bidirectional_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
(batch_size, seq_length, vocab_size) = shift_logits.shape
loss_fct = CrossEntropyLoss()
loss = loss_fct(
shift_logits.view(batch_size * seq_length, vocab_size),
shift_labels.view(batch_size * seq_length),
)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def prepare_inputs_for_generation(
self: BloomForCausalLM,
input_ids: torch.LongTensor,
past: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> dict:
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
bidirectional_mask = None
if past[0][0].shape[0] == input_ids.shape[0]:
past = self._convert_to_bloom_cache(past)
else:
bidirectional_mask = torch.ones_like(input_ids)
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": True,
"attention_mask": attention_mask,
"bidirectional_mask": bidirectional_mask,
}
setattr(model, "forward", MethodType(forward, model))
setattr(
model,
"prepare_inputs_for_generation",
MethodType(prepare_inputs_for_generation, model),
)
setattr(model, "_prefix_lm_converted", True)
return model
def _convert_opt_causal_lm_to_prefix_lm(model: OPTForCausalLM) -> OPTForCausalLM:
"""Converts an OPT Causal LM to a Prefix LM.
Supported HuggingFace model classes:
- `OPTForCausalLM`
See `convert_hf_causal_lm_to_prefix_lm` for more details.
"""
if hasattr(model, "_prefix_lm_converted"):
return model
assert isinstance(model, OPTForCausalLM)
assert (
model.config.add_cross_attention == False
), "Only supports OPT decoder-only models"
setattr(model, "_original_forward", getattr(model, "forward"))
setattr(model, "_original_generate", getattr(model, "generate"))
model.model.decoder.bidirectional_mask = None
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
combined_attention_mask = None
if input_shape[-1] > 1:
if self.bidirectional_mask == "g":
(bsz, src_length) = input_shape
combined_attention_mask = torch.zeros(
(bsz, 1, src_length, src_length + past_key_values_length),
dtype=inputs_embeds.dtype,
device=inputs_embeds.device,
)
else:
combined_attention_mask = _make_causal_mask_opt(
input_shape,
inputs_embeds.dtype,
past_key_values_length=past_key_values_length,
).to(inputs_embeds.device)
if self.bidirectional_mask is not None:
assert attention_mask.shape == self.bidirectional_mask.shape
expanded_bidirectional_mask = _expand_mask_opt(
self.bidirectional_mask,
inputs_embeds.dtype,
tgt_len=input_shape[-1],
).to(inputs_embeds.device)
combined_attention_mask = torch.maximum(
expanded_bidirectional_mask, combined_attention_mask
)
if attention_mask is not None:
expanded_attn_mask = _expand_mask_opt(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
).to(inputs_embeds.device)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
setattr(
model.model.decoder,
"_prepare_decoder_attention_mask",
MethodType(_prepare_decoder_attention_mask, model.model.decoder),
)
def forward(
self: OPTForCausalLM,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
bidirectional_mask: Optional[torch.ByteTensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
def call_og_forward():
return self._original_forward(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if bidirectional_mask is None:
return call_og_forward()
self.model.decoder.bidirectional_mask = bidirectional_mask
try:
outputs = call_og_forward()
except:
self.model.decoder.bidirectional_mask = None
raise
self.model.decoder.bidirectional_mask = None
return outputs
def generate(self: OPTForCausalLM, *args: tuple, **kwargs: Dict[str, Any]):
"""Wraps original generate to enable PrefixLM-style attention."""
self.model.decoder.bidirectional_mask = "g"
try:
output = self._original_generate(*args, **kwargs)
except:
self.model.decoder.bidirectional_mask = None
raise
self.model.decoder.bidirectional_mask = None
return output
setattr(model, "forward", MethodType(forward, model))
setattr(model, "generate", MethodType(generate, model))
setattr(model, "_prefix_lm_converted", True)
return model
_SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS + (BloomForCausalLM, OPTForCausalLM)
CAUSAL_LM_TYPES = Union[
GPT2LMHeadModel,
GPTJForCausalLM,
GPTNeoForCausalLM,
GPTNeoXForCausalLM,
BloomForCausalLM,
OPTForCausalLM,
]
The provided code snippet includes necessary dependencies for implementing the `convert_hf_causal_lm_to_prefix_lm` function. Write a Python function `def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES` to solve the following problem:
Converts a HuggingFace Causal LM to a Prefix LM. Supported HuggingFace model classes: - `GPT2LMHeadModel` - `GPTNeoForCausalLM` - `GPTNeoXForCausalLM` - `GPTJForCausalLM` - `BloomForCausalLM` - `OPTForCausalLM` Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the `generate` method and/or select underlying methods depending on the model class. These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask". Notes on training: To actually train the converted model as a Prefix LM, training batches will need to indicate the prefix/target structure by including `bidirectional_mask` as part of the batch inputs. **This is not a standard input and requires custom layers either within or after your dataloader.** In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels` such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`. That is, the prefix portion of the sequence should not generate any loss. Loss should only be generated by the target portion of the sequence. Notes on `GPTNeoForCausalLM`: To simplify the implementation, "global" and "local" attention layers are handled differently. For "global" layers, we handle conversion as described above. For "local" layers, which use a causal attention mask within a restricted local window, we do not alter the masking. Notes on `forward` method conversion: After conversion, the `forward` method will handle a new input, `bidirectional_mask`, which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions belonging to the prefix (prefix tokens can attend to one another bidirectionally), and 0 indicates token positions belonging to the target. The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset the causal masks before returning the result. Notes on `generate` method conversion: After conversion, the `generate` method will have the same signature but will internally convert all causal masks to be purely bidirectional, call the original `generate` method, and (where appropriate) reset the causal masks before returning the result. This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token "prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and previously-generated tokens (also as expected in a Prefix LM). To preserve the API, the original methods are renamed to `_original_forward` and `_original_generate`, and replaced with new `forward` and `generate` methods that wrap them, respectively. Although implementation details vary by model class.
Here is the function:
def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:
"""Converts a HuggingFace Causal LM to a Prefix LM.
Supported HuggingFace model classes:
- `GPT2LMHeadModel`
- `GPTNeoForCausalLM`
- `GPTNeoXForCausalLM`
- `GPTJForCausalLM`
- `BloomForCausalLM`
- `OPTForCausalLM`
Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the
`generate` method and/or select underlying methods depending on the model class.
These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask".
Notes on training:
To actually train the converted model as a Prefix LM, training batches will need to indicate
the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.
**This is not a standard input and requires custom layers either within or after your dataloader.**
In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`
such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.
That is, the prefix portion of the sequence should not generate any loss. Loss should only be
generated by the target portion of the sequence.
Notes on `GPTNeoForCausalLM`:
To simplify the implementation, "global" and "local" attention layers are handled differently.
For "global" layers, we handle conversion as described above. For "local" layers, which use a
causal attention mask within a restricted local window, we do not alter the masking.
Notes on `forward` method conversion:
After conversion, the `forward` method will handle a new input, `bidirectional_mask`,
which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions
belonging to the prefix (prefix tokens can attend to one another bidirectionally), and
0 indicates token positions belonging to the target.
The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing
causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset
the causal masks before returning the result.
Notes on `generate` method conversion:
After conversion, the `generate` method will have the same signature but will internally
convert all causal masks to be purely bidirectional, call the original `generate` method, and
(where appropriate) reset the causal masks before returning the result.
This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token
"prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates
each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one
another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and
previously-generated tokens (also as expected in a Prefix LM).
To preserve the API, the original methods are renamed to `_original_forward` and
`_original_generate`, and replaced with new `forward` and `generate` methods that wrap
them, respectively. Although implementation details vary by model class.
"""
if isinstance(model, _SUPPORTED_GPT_MODELS):
return _convert_gpt_causal_lm_to_prefix_lm(model)
elif isinstance(model, BloomForCausalLM):
return _convert_bloom_causal_lm_to_prefix_lm(model)
elif isinstance(model, OPTForCausalLM):
return _convert_opt_causal_lm_to_prefix_lm(model)
else:
raise TypeError(
f"Cannot convert model to Prefix LM. "
+ f"Model does not belong to set of supported HF models:"
+ f"\n{_SUPPORTED_HF_MODELS}"
) | Converts a HuggingFace Causal LM to a Prefix LM. Supported HuggingFace model classes: - `GPT2LMHeadModel` - `GPTNeoForCausalLM` - `GPTNeoXForCausalLM` - `GPTJForCausalLM` - `BloomForCausalLM` - `OPTForCausalLM` Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the `generate` method and/or select underlying methods depending on the model class. These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask". Notes on training: To actually train the converted model as a Prefix LM, training batches will need to indicate the prefix/target structure by including `bidirectional_mask` as part of the batch inputs. **This is not a standard input and requires custom layers either within or after your dataloader.** In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels` such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`. That is, the prefix portion of the sequence should not generate any loss. Loss should only be generated by the target portion of the sequence. Notes on `GPTNeoForCausalLM`: To simplify the implementation, "global" and "local" attention layers are handled differently. For "global" layers, we handle conversion as described above. For "local" layers, which use a causal attention mask within a restricted local window, we do not alter the masking. Notes on `forward` method conversion: After conversion, the `forward` method will handle a new input, `bidirectional_mask`, which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions belonging to the prefix (prefix tokens can attend to one another bidirectionally), and 0 indicates token positions belonging to the target. The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset the causal masks before returning the result. Notes on `generate` method conversion: After conversion, the `generate` method will have the same signature but will internally convert all causal masks to be purely bidirectional, call the original `generate` method, and (where appropriate) reset the causal masks before returning the result. This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token "prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and previously-generated tokens (also as expected in a Prefix LM). To preserve the API, the original methods are renamed to `_original_forward` and `_original_generate`, and replaced with new `forward` and `generate` methods that wrap them, respectively. Although implementation details vary by model class. |
178,687 | import math
import warnings
from types import MethodType
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from transformers.models.bloom.modeling_bloom import (
BaseModelOutputWithPastAndCrossAttentions, BloomForCausalLM, BloomModel,
CausalLMOutputWithCrossAttentions, CrossEntropyLoss)
from transformers.models.bloom.modeling_bloom import \
_expand_mask as _expand_mask_bloom
from transformers.models.bloom.modeling_bloom import \
_make_causal_mask as _make_causal_mask_bloom
from transformers.models.bloom.modeling_bloom import logging
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
from transformers.models.opt.modeling_opt import OPTForCausalLM
from transformers.models.opt.modeling_opt import \
_expand_mask as _expand_mask_opt
from transformers.models.opt.modeling_opt import \
_make_causal_mask as _make_causal_mask_opt
The provided code snippet includes necessary dependencies for implementing the `add_bidirectional_mask_if_missing` function. Write a Python function `def add_bidirectional_mask_if_missing(batch: Dict[str, Any])` to solve the following problem:
Attempts to add bidirectional_mask to batch if missing. Raises: KeyError if bidirectional_mask is missing and can't be inferred
Here is the function:
def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):
"""Attempts to add bidirectional_mask to batch if missing.
Raises:
KeyError if bidirectional_mask is missing and can't be inferred
"""
if "bidirectional_mask" not in batch:
if batch.get("mode", None) == "icl_task":
batch["bidirectional_mask"] = batch["attention_mask"].clone()
for i, continuation_indices in enumerate(batch["continuation_indices"]):
batch["bidirectional_mask"][i, continuation_indices] = 0
elif "labels" in batch and "attention_mask" in batch:
batch["bidirectional_mask"] = torch.logical_and(
torch.eq(batch["attention_mask"], 1), torch.eq(batch["labels"], -100)
).type_as(batch["attention_mask"])
else:
raise KeyError(
"No bidirectional_mask in batch and not sure how to construct one."
) | Attempts to add bidirectional_mask to batch if missing. Raises: KeyError if bidirectional_mask is missing and can't be inferred |
178,688 | from typing import Union
from transformers import (AutoTokenizer, PreTrainedTokenizer,
PreTrainedTokenizerFast)
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
NUM_SENTINEL_TOKENS: int = 100
The provided code snippet includes necessary dependencies for implementing the `adapt_tokenizer_for_denoising` function. Write a Python function `def adapt_tokenizer_for_denoising(tokenizer: Tokenizer)` to solve the following problem:
Adds sentinel tokens and padding token (if missing). Expands the tokenizer vocabulary to include sentinel tokens used in mixture-of-denoiser tasks as well as a padding token. All added tokens are added as special tokens. No tokens are added if sentinel tokens and padding token already exist.
Here is the function:
def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
"""Adds sentinel tokens and padding token (if missing).
Expands the tokenizer vocabulary to include sentinel tokens
used in mixture-of-denoiser tasks as well as a padding token.
All added tokens are added as special tokens. No tokens are
added if sentinel tokens and padding token already exist.
"""
sentinels_to_add = [f"<extra_id_{i}>" for i in range(NUM_SENTINEL_TOKENS)]
tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
if tokenizer.pad_token is None:
tokenizer.add_tokens("<pad>", special_tokens=True)
tokenizer.pad_token = "<pad>"
assert tokenizer.pad_token_id is not None
sentinels = "".join([f"<extra_id_{i}>" for i in range(NUM_SENTINEL_TOKENS)])
_sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
tokenizer.sentinel_token_ids = _sentinel_token_ids | Adds sentinel tokens and padding token (if missing). Expands the tokenizer vocabulary to include sentinel tokens used in mixture-of-denoiser tasks as well as a padding token. All added tokens are added as special tokens. No tokens are added if sentinel tokens and padding token already exist. |
178,689 | import argparse
import torch
from llava.model.utils import auto_upgrade
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
def auto_upgrade(config):
cfg = AutoConfig.from_pretrained(config)
if "llava" in config and "llava" not in cfg.model_type:
assert cfg.model_type == "llama"
print(
"You are using newer LLaVA code base, while the checkpoint of v0 is from older code base."
)
print(
"You must upgrade the checkpoint to the new code base (this can be done automatically)."
)
confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]")
if confirm.lower() in ["y", "yes"]:
print("Upgrading checkpoint...")
assert len(cfg.architectures) == 1
setattr(cfg.__class__, "model_type", "llava")
cfg.architectures[0] = "LlavaLlamaForCausalLM"
cfg.save_pretrained(config)
print("Checkpoint upgraded.")
else:
print("Checkpoint upgrade aborted.")
exit(1)
def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
print("Loading base model")
base = AutoModelForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print("Loading target model")
auto_upgrade(target_model_path)
target = AutoModelForCausalLM.from_pretrained(
target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print("Calculating delta")
for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
if name not in base.state_dict():
assert name in [
"model.mm_projector.weight",
"model.mm_projector.bias",
], f"{name} not in base model"
continue
if param.data.shape == base.state_dict()[name].shape:
param.data -= base.state_dict()[name]
else:
assert name in [
"model.embed_tokens.weight",
"lm_head.weight",
], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
bparam = base.state_dict()[name]
param.data[: bparam.shape[0], : bparam.shape[1]] -= bparam
print("Saving delta")
if hub_repo_id:
kwargs = {"push_to_hub": True, "repo_id": hub_repo_id}
else:
kwargs = {}
target.save_pretrained(delta_path, **kwargs)
target_tokenizer = AutoTokenizer.from_pretrained(target_model_path)
target_tokenizer.save_pretrained(delta_path, **kwargs) | null |
178,690 | import argparse
import torch
from llava.model import *
from llava.model.utils import auto_upgrade
from transformers import AutoModelForCausalLM, AutoTokenizer
def auto_upgrade(config):
cfg = AutoConfig.from_pretrained(config)
if "llava" in config and "llava" not in cfg.model_type:
assert cfg.model_type == "llama"
print(
"You are using newer LLaVA code base, while the checkpoint of v0 is from older code base."
)
print(
"You must upgrade the checkpoint to the new code base (this can be done automatically)."
)
confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]")
if confirm.lower() in ["y", "yes"]:
print("Upgrading checkpoint...")
assert len(cfg.architectures) == 1
setattr(cfg.__class__, "model_type", "llava")
cfg.architectures[0] = "LlavaLlamaForCausalLM"
cfg.save_pretrained(config)
print("Checkpoint upgraded.")
else:
print("Checkpoint upgrade aborted.")
exit(1)
def consolidate_ckpt(src_path, dst_path):
print("Loading model")
auto_upgrade(src_path)
src_model = AutoModelForCausalLM.from_pretrained(
src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False)
src_model.save_pretrained(dst_path)
src_tokenizer.save_pretrained(dst_path) | null |
178,691 | import json
import os
import random
import cv2
import torch
import torch.nn.functional as F
from transformers import CLIPImageProcessor
from model.llava import conversation as conversation_lib
from model.segment_anything.utils.transforms import ResizeLongestSide
from .utils import DEFAULT_IMAGE_TOKEN
DEFAULT_IMAGE_TOKEN = "<image>"
def preprocess_multimodal(source, mm_use_im_start_end):
for sentence in source:
if DEFAULT_IMAGE_TOKEN in sentence["value"]:
sentence["value"] = (
sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip()
)
sentence["value"] = DEFAULT_IMAGE_TOKEN + "\n" + sentence["value"]
sentence["value"] = sentence["value"].strip()
if "mmtag" in conversation_lib.default_conversation.version:
sentence["value"] = sentence["value"].replace(
DEFAULT_IMAGE_TOKEN, "<Image>" + DEFAULT_IMAGE_TOKEN + "</Image>"
)
return source | null |
178,692 | import glob
import json
import os
import cv2
import numpy as np
def get_mask_from_json(json_path, img):
try:
with open(json_path, "r") as r:
anno = json.loads(r.read())
except:
with open(json_path, "r", encoding="cp1252") as r:
anno = json.loads(r.read())
inform = anno["shapes"]
comments = anno["text"]
is_sentence = anno["is_sentence"]
height, width = img.shape[:2]
### sort polies by area
area_list = []
valid_poly_list = []
for i in inform:
label_id = i["label"]
points = i["points"]
if "flag" == label_id.lower(): ## meaningless deprecated annotations
continue
tmp_mask = np.zeros((height, width), dtype=np.uint8)
cv2.polylines(tmp_mask, np.array([points], dtype=np.int32), True, 1, 1)
cv2.fillPoly(tmp_mask, np.array([points], dtype=np.int32), 1)
tmp_area = tmp_mask.sum()
area_list.append(tmp_area)
valid_poly_list.append(i)
### ground-truth mask
sort_index = np.argsort(area_list)[::-1].astype(np.int32)
sort_index = list(sort_index)
sort_inform = []
for s_idx in sort_index:
sort_inform.append(valid_poly_list[s_idx])
mask = np.zeros((height, width), dtype=np.uint8)
for i in sort_inform:
label_id = i["label"]
points = i["points"]
if "ignore" in label_id.lower():
label_value = 255 # ignored during evaluation
else:
label_value = 1 # target
cv2.polylines(mask, np.array([points], dtype=np.int32), True, label_value, 1)
cv2.fillPoly(mask, np.array([points], dtype=np.int32), label_value)
return mask, comments, is_sentence | null |
178,693 | import contextlib
import copy
import io
import logging
import os
import random
import numpy as np
import pycocotools.mask as mask_util
from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
from detectron2.utils.file_io import PathManager
from fvcore.common.timer import Timer
from PIL import Image
logger = logging.getLogger(__name__)
class G_REFER:
def __init__(self, data_root, dataset="grefcoco", splitBy="unc"):
# provide data_root folder which contains grefcoco
print("loading dataset %s into memory..." % dataset)
self.ROOT_DIR = osp.abspath(osp.dirname(__file__))
self.DATA_DIR = osp.join(data_root, dataset)
if dataset in ["grefcoco"]:
self.IMAGE_DIR = osp.join(data_root, "images/train2014")
else:
raise KeyError("No refer dataset is called [%s]" % dataset)
tic = time.time()
# load refs from data/dataset/refs(dataset).json
self.data = {}
self.data["dataset"] = dataset
ref_file = osp.join(self.DATA_DIR, f"grefs({splitBy}).p")
if osp.exists(ref_file):
self.data["refs"] = pickle.load(open(ref_file, "rb"), fix_imports=True)
else:
ref_file = osp.join(self.DATA_DIR, f"grefs({splitBy}).json")
if osp.exists(ref_file):
self.data["refs"] = json.load(open(ref_file, "rb"))
else:
raise FileNotFoundError("JSON file not found")
# load annotations from data/dataset/instances.json
instances_file = osp.join(self.DATA_DIR, "instances.json")
instances = json.load(open(instances_file, "r"))
self.data["images"] = instances["images"]
self.data["annotations"] = instances["annotations"]
self.data["categories"] = instances["categories"]
# create index
self.createIndex()
print("DONE (t=%.2fs)" % (time.time() - tic))
def _toList(x):
return x if isinstance(x, list) else [x]
def match_any(a, b):
a = a if isinstance(a, list) else [a]
b = b if isinstance(b, list) else [b]
return set(a) & set(b)
def createIndex(self):
# create sets of mapping
# 1) Refs: {ref_id: ref}
# 2) Anns: {ann_id: ann}
# 3) Imgs: {image_id: image}
# 4) Cats: {category_id: category_name}
# 5) Sents: {sent_id: sent}
# 6) imgToRefs: {image_id: refs}
# 7) imgToAnns: {image_id: anns}
# 8) refToAnn: {ref_id: ann}
# 9) annToRef: {ann_id: ref}
# 10) catToRefs: {category_id: refs}
# 11) sentToRef: {sent_id: ref}
# 12) sentToTokens: {sent_id: tokens}
print("creating index...")
# fetch info from instances
Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}
Anns[-1] = None
for ann in self.data["annotations"]:
Anns[ann["id"]] = ann
imgToAnns[ann["image_id"]] = imgToAnns.get(ann["image_id"], []) + [ann]
for img in self.data["images"]:
Imgs[img["id"]] = img
for cat in self.data["categories"]:
Cats[cat["id"]] = cat["name"]
# fetch info from refs
Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}
Sents, sentToRef, sentToTokens = {}, {}, {}
availableSplits = []
for ref in self.data["refs"]:
# ids
ref_id = ref["ref_id"]
ann_id = ref["ann_id"]
category_id = ref["category_id"]
image_id = ref["image_id"]
if ref["split"] not in availableSplits:
availableSplits.append(ref["split"])
# add mapping related to ref
if ref_id in Refs:
print("Duplicate ref id")
Refs[ref_id] = ref
imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]
category_id = self._toList(category_id)
added_cats = []
for cat in category_id:
if cat not in added_cats:
added_cats.append(cat)
catToRefs[cat] = catToRefs.get(cat, []) + [ref]
ann_id = self._toList(ann_id)
refToAnn[ref_id] = [Anns[ann] for ann in ann_id]
for ann_id_n in ann_id:
annToRef[ann_id_n] = annToRef.get(ann_id_n, []) + [ref]
# add mapping of sent
for sent in ref["sentences"]:
Sents[sent["sent_id"]] = sent
sentToRef[sent["sent_id"]] = ref
sentToTokens[sent["sent_id"]] = sent["tokens"]
# create class members
self.Refs = Refs
self.Anns = Anns
self.Imgs = Imgs
self.Cats = Cats
self.Sents = Sents
self.imgToRefs = imgToRefs
self.imgToAnns = imgToAnns
self.refToAnn = refToAnn
self.annToRef = annToRef
self.catToRefs = catToRefs
self.sentToRef = sentToRef
self.sentToTokens = sentToTokens
self.availableSplits = availableSplits
print("index created.")
def getRefIds(self, image_ids=[], cat_ids=[], split=[]):
image_ids = self._toList(image_ids)
cat_ids = self._toList(cat_ids)
split = self._toList(split)
for s in split:
if s not in self.availableSplits:
raise ValueError(f"Invalid split name: {s}")
refs = self.data["refs"]
if len(image_ids) > 0:
lists = [self.imgToRefs[image_id] for image_id in image_ids]
refs = list(itertools.chain.from_iterable(lists))
if len(cat_ids) > 0:
refs = [ref for ref in refs if self.match_any(ref["category_id"], cat_ids)]
if len(split) > 0:
refs = [ref for ref in refs if ref["split"] in split]
ref_ids = [ref["ref_id"] for ref in refs]
return ref_ids
def getAnnIds(self, image_ids=[], ref_ids=[]):
image_ids = self._toList(image_ids)
ref_ids = self._toList(ref_ids)
if any([len(image_ids), len(ref_ids)]):
if len(image_ids) > 0:
lists = [
self.imgToAnns[image_id]
for image_id in image_ids
if image_id in self.imgToAnns
]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.data["annotations"]
ann_ids = [ann["id"] for ann in anns]
if len(ref_ids) > 0:
lists = [self.Refs[ref_id]["ann_id"] for ref_id in ref_ids]
anns_by_ref_id = list(itertools.chain.from_iterable(lists))
ann_ids = list(set(ann_ids).intersection(set(anns_by_ref_id)))
else:
ann_ids = [ann["id"] for ann in self.data["annotations"]]
return ann_ids
def getImgIds(self, ref_ids=[]):
ref_ids = self._toList(ref_ids)
if len(ref_ids) > 0:
image_ids = list(set([self.Refs[ref_id]["image_id"] for ref_id in ref_ids]))
else:
image_ids = self.Imgs.keys()
return image_ids
def getCatIds(self):
return self.Cats.keys()
def loadRefs(self, ref_ids=[]):
return [self.Refs[ref_id] for ref_id in self._toList(ref_ids)]
def loadAnns(self, ann_ids=[]):
if isinstance(ann_ids, str):
ann_ids = int(ann_ids)
return [self.Anns[ann_id] for ann_id in self._toList(ann_ids)]
def loadImgs(self, image_ids=[]):
return [self.Imgs[image_id] for image_id in self._toList(image_ids)]
def loadCats(self, cat_ids=[]):
return [self.Cats[cat_id] for cat_id in self._toList(cat_ids)]
def getRefBox(self, ref_id):
anns = self.refToAnn[ref_id]
return [ann["bbox"] for ann in anns] # [x, y, w, h]
def showRef(self, ref, seg_box="seg"):
ax = plt.gca()
# show image
image = self.Imgs[ref["image_id"]]
I = io.imread(osp.join(self.IMAGE_DIR, image["file_name"]))
ax.imshow(I)
# show refer expression
for sid, sent in enumerate(ref["sentences"]):
print("%s. %s" % (sid + 1, sent["sent"]))
# show segmentations
if seg_box == "seg":
ann_id = ref["ann_id"]
ann = self.Anns[ann_id]
polygons = []
color = []
c = "none"
if type(ann["segmentation"][0]) == list:
# polygon used for refcoco*
for seg in ann["segmentation"]:
poly = np.array(seg).reshape((len(seg) / 2, 2))
polygons.append(Polygon(poly, True, alpha=0.4))
color.append(c)
p = PatchCollection(
polygons,
facecolors=color,
edgecolors=(1, 1, 0, 0),
linewidths=3,
alpha=1,
)
ax.add_collection(p) # thick yellow polygon
p = PatchCollection(
polygons,
facecolors=color,
edgecolors=(1, 0, 0, 0),
linewidths=1,
alpha=1,
)
ax.add_collection(p) # thin red polygon
else:
# mask used for refclef
rle = ann["segmentation"]
m = mask.decode(rle)
img = np.ones((m.shape[0], m.shape[1], 3))
color_mask = np.array([2.0, 166.0, 101.0]) / 255
for i in range(3):
img[:, :, i] = color_mask[i]
ax.imshow(np.dstack((img, m * 0.5)))
# show bounding-box
elif seg_box == "box":
ann_id = ref["ann_id"]
ann = self.Anns[ann_id]
bbox = self.getRefBox(ref["ref_id"])
box_plot = Rectangle(
(bbox[0], bbox[1]),
bbox[2],
bbox[3],
fill=False,
edgecolor="green",
linewidth=3,
)
ax.add_patch(box_plot)
def getMask(self, ann):
if not ann:
return None
if ann["iscrowd"]:
raise ValueError("Crowd object")
image = self.Imgs[ann["image_id"]]
if type(ann["segmentation"][0]) == list: # polygon
rle = mask.frPyObjects(ann["segmentation"], image["height"], image["width"])
else:
rle = ann["segmentation"]
m = mask.decode(rle)
m = np.sum(
m, axis=2
) # sometimes there are multiple binary map (corresponding to multiple segs)
m = m.astype(np.uint8) # convert to np.uint8
# compute area
area = sum(mask.area(rle)) # should be close to ann['area']
return {"mask": m, "area": area}
def getMaskByRef(self, ref=None, ref_id=None, merge=False):
if not ref and not ref_id:
raise ValueError
if ref:
ann_ids = ref["ann_id"]
ref_id = ref["ref_id"]
else:
ann_ids = self.getAnnIds(ref_ids=ref_id)
if ann_ids == [-1]:
img = self.Imgs[self.Refs[ref_id]["image_id"]]
return {
"mask": np.zeros([img["height"], img["width"]], dtype=np.uint8),
"empty": True,
}
anns = self.loadAnns(ann_ids)
mask_list = [self.getMask(ann) for ann in anns if not ann["iscrowd"]]
if merge:
merged_masks = sum([mask["mask"] for mask in mask_list])
merged_masks[np.where(merged_masks > 1)] = 1
return {"mask": merged_masks, "empty": False}
else:
return mask_list
def showMask(self, ref):
M = self.getMask(ref)
msk = M["mask"]
ax = plt.gca()
ax.imshow(msk)
def load_grefcoco_json(
refer_root,
dataset_name,
splitby,
split,
image_root,
extra_annotation_keys=None,
extra_refer_keys=None,
):
if dataset_name == "refcocop":
dataset_name = "refcoco+"
if dataset_name == "refcoco" or dataset_name == "refcoco+":
splitby == "unc"
if dataset_name == "refcocog":
assert splitby == "umd" or splitby == "google"
dataset_id = "_".join([dataset_name, splitby, split])
from .grefer import G_REFER
logger.info("Loading dataset {} ({}-{}) ...".format(dataset_name, splitby, split))
logger.info("Refcoco root: {}".format(refer_root))
timer = Timer()
refer_root = PathManager.get_local_path(refer_root)
with contextlib.redirect_stdout(io.StringIO()):
refer_api = G_REFER(data_root=refer_root, dataset=dataset_name, splitBy=splitby)
if timer.seconds() > 1:
logger.info(
"Loading {} takes {:.2f} seconds.".format(dataset_id, timer.seconds())
)
ref_ids = refer_api.getRefIds(split=split)
img_ids = refer_api.getImgIds(ref_ids)
refs = refer_api.loadRefs(ref_ids)
imgs = [refer_api.loadImgs(ref["image_id"])[0] for ref in refs]
anns = [refer_api.loadAnns(ref["ann_id"]) for ref in refs]
imgs_refs_anns = list(zip(imgs, refs, anns))
logger.info(
"Loaded {} images, {} referring object sets in G_RefCOCO format from {}".format(
len(img_ids), len(ref_ids), dataset_id
)
)
dataset_dicts = []
ann_keys = ["iscrowd", "bbox", "category_id"] + (extra_annotation_keys or [])
ref_keys = ["raw", "sent_id"] + (extra_refer_keys or [])
ann_lib = {}
NT_count = 0
MT_count = 0
for img_dict, ref_dict, anno_dicts in imgs_refs_anns:
record = {}
record["source"] = "grefcoco"
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
image_id = record["image_id"] = img_dict["id"]
# Check that information of image, ann and ref match each other
# This fails only when the data parsing logic or the annotation file is buggy.
assert ref_dict["image_id"] == image_id
assert ref_dict["split"] == split
if not isinstance(ref_dict["ann_id"], list):
ref_dict["ann_id"] = [ref_dict["ann_id"]]
# No target samples
if None in anno_dicts:
assert anno_dicts == [None]
assert ref_dict["ann_id"] == [-1]
record["empty"] = True
obj = {key: None for key in ann_keys if key in ann_keys}
obj["bbox_mode"] = BoxMode.XYWH_ABS
obj["empty"] = True
obj = [obj]
# Multi target samples
else:
record["empty"] = False
obj = []
for anno_dict in anno_dicts:
ann_id = anno_dict["id"]
if anno_dict["iscrowd"]:
continue
assert anno_dict["image_id"] == image_id
assert ann_id in ref_dict["ann_id"]
if ann_id in ann_lib:
ann = ann_lib[ann_id]
else:
ann = {key: anno_dict[key] for key in ann_keys if key in anno_dict}
ann["bbox_mode"] = BoxMode.XYWH_ABS
ann["empty"] = False
segm = anno_dict.get("segmentation", None)
assert segm # either list[list[float]] or dict(RLE)
if isinstance(segm, dict):
if isinstance(segm["counts"], list):
# convert to compressed RLE
segm = mask_util.frPyObjects(segm, *segm["size"])
else:
# filter out invalid polygons (< 3 points)
segm = [
poly
for poly in segm
if len(poly) % 2 == 0 and len(poly) >= 6
]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue # ignore this instance
ann["segmentation"] = segm
ann_lib[ann_id] = ann
obj.append(ann)
record["annotations"] = obj
# Process referring expressions
sents = ref_dict["sentences"]
for sent in sents:
ref_record = record.copy()
ref = {key: sent[key] for key in ref_keys if key in sent}
ref["ref_id"] = ref_dict["ref_id"]
ref_record["sentence"] = ref
dataset_dicts.append(ref_record)
# if ref_record['empty']:
# NT_count += 1
# else:
# MT_count += 1
# logger.info("NT samples: %d, MT samples: %d", NT_count, MT_count)
# Debug mode
# return dataset_dicts[:100]
return dataset_dicts | null |
178,694 | import dataclasses
from enum import Enum, auto
from typing import Any, List
conv_one_shot = Conversation(
system="A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
roles=("Human", "Assistant"),
messages=(
(
"Human",
"What are the key differences between renewable and non-renewable energy sources?",
),
(
"Assistant",
"Renewable energy sources are those that can be replenished naturally in a relatively "
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
"renewable and non-renewable energy sources:\n"
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
"energy sources are finite and will eventually run out.\n"
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
"and other negative effects.\n"
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
"have lower operational costs than non-renewable sources.\n"
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
"locations than non-renewable sources.\n"
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
"non-renewable sources are not, and their depletion can lead to economic and social instability.",
),
),
offset=2,
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
sep="\n### ",
stop_str="###",
)
conv_vicuna_v1_1 = Conversation(
system="A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
roles=("USER", "ASSISTANT"),
messages=(),
offset=0,
sep_style=SeparatorStyle.ADD_COLON_TWO,
sep=" ",
sep2="</s>",
)
conv_koala_v1 = Conversation(
system="BEGINNING OF CONVERSATION:",
roles=("USER", "GPT"),
messages=(),
offset=0,
sep_style=SeparatorStyle.ADD_COLON_TWO,
sep=" ",
sep2="</s>",
)
conv_dolly = Conversation(
system="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
roles=("### Instruction", "### Response"),
messages=(),
offset=0,
sep_style=SeparatorStyle.DOLLY,
sep="\n\n",
sep2="### End",
)
conv_oasst = Conversation(
system="",
roles=("<|prompter|>", "<|assistant|>"),
messages=(),
offset=0,
sep_style=SeparatorStyle.NO_COLON_SINGLE,
sep="<|endoftext|>",
)
conv_stablelm = Conversation(
system="""<|SYSTEM|># StableLM Tuned (Alpha version)
- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
- StableLM will refuse to participate in anything that could harm a human.
""",
roles=("<|USER|>", "<|ASSISTANT|>"),
messages=(),
offset=0,
sep_style=SeparatorStyle.NO_COLON_SINGLE,
sep="",
stop_token_ids=[50278, 50279, 50277, 1, 0],
)
conv_baize = Conversation(
system="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.",
roles=("[|Human|]", "[|AI|]"),
messages=(
("[|Human|]", "Hello!"),
("[|AI|]", "Hi!"),
),
offset=2,
sep_style=SeparatorStyle.BAIZE,
sep="[|Human|]",
stop_str="[|Human|]",
)
conv_rwkv = Conversation(
system="",
roles=("Bob", "Alice"),
messages=(),
offset=0,
sep_style=SeparatorStyle.RWKV,
sep="",
stop_str="\n\n",
)
def get_default_conv_template(model_name):
model_name = model_name.lower()
if "vicuna" in model_name or "output" in model_name:
return conv_vicuna_v1_1
elif "koala" in model_name:
return conv_koala_v1
elif "dolly-v2" in model_name:
return conv_dolly
elif "oasst" in model_name and "pythia" in model_name:
return conv_oasst
elif "baize" in model_name:
return conv_baize
elif "stablelm" in model_name:
return conv_stablelm
elif "rwkv-4" in model_name:
return conv_rwkv
return conv_one_shot | null |
178,695 | import glob
import os
import random
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from pycocotools import mask
from transformers import CLIPImageProcessor
from model.llava import conversation as conversation_lib
from model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX,
IMAGE_TOKEN_INDEX)
from model.llava.mm_utils import tokenizer_image_token
from model.segment_anything.utils.transforms import ResizeLongestSide
from .conversation import get_default_conv_template
from .data_processing import get_mask_from_json
from .reason_seg_dataset import ReasonSegDataset
from .refer import REFER
from .refer_seg_dataset import ReferSegDataset
from .sem_seg_dataset import SemSegDataset
from .utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN)
from .vqa_dataset import VQADataset
IGNORE_INDEX = -100
DEFAULT_IMAGE_TOKEN = "<image>"
def tokenizer_image_token(
prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None
):
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split("<image>")]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if (
len(prompt_chunks) > 0
and len(prompt_chunks[0]) > 0
and prompt_chunks[0][0] == tokenizer.bos_token_id
):
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == "pt":
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f"Unsupported tensor type: {return_tensors}")
return input_ids
DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"
def collate_fn(
batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1
):
image_path_list = []
images_list = []
images_clip_list = []
conversation_list = []
masks_list = []
label_list = []
resize_list = []
questions_list = []
sampled_classes_list = []
offset_list = [0]
cnt = 0
inferences = []
for (
image_path,
images,
images_clip,
conversations,
masks,
label,
resize,
questions,
sampled_classes,
inference,
) in batch:
image_path_list.append(image_path)
images_list.append(images)
images_clip_list.append(images_clip)
conversation_list.extend(conversations)
label_list.append(label)
masks_list.append(masks.float())
resize_list.append(resize)
questions_list.append(questions)
sampled_classes_list.append(sampled_classes)
cnt += len(conversations)
offset_list.append(cnt)
inferences.append(inference)
if use_mm_start_end:
# replace <image> token
for i in range(len(conversation_list)):
replace_token = DEFAULT_IMAGE_TOKEN
replace_token = (
DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
)
conversation_list[i] = conversation_list[i].replace(
DEFAULT_IMAGE_TOKEN, replace_token
)
input_ids = [
tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
for prompt in conversation_list
]
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids, batch_first=True, padding_value=tokenizer.pad_token_id
)
attention_masks = input_ids.ne(tokenizer.pad_token_id)
conv = conversation_lib.default_conversation.copy()
targets = input_ids.clone()
if conv_type == "llava_v1":
sep = conv.sep + conv.roles[1] + ": "
else:
sep = "[/INST] "
for conversation, target in zip(conversation_list, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
# if len(parts) != 2:
# break
assert len(parts) == 2, (len(parts), rou)
parts[0] += sep
if DEFAULT_IMAGE_TOKEN in conversation:
round_len = len(tokenizer_image_token(rou, tokenizer))
instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
else:
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if False:
z = target.clone()
z = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z)
if local_rank == 0:
print(
"conversation: ",
conversation,
"tokenizer.decode(z): ",
tokenizer.decode(z),
)
if cur_len < tokenizer.model_max_length:
assert cur_len == total_len
if inferences[0] == False:
truncate_len = tokenizer.model_max_length - 255
if input_ids.shape[1] > truncate_len:
input_ids = input_ids[:, :truncate_len]
targets = targets[:, :truncate_len]
attention_masks = attention_masks[:, :truncate_len]
return {
"image_paths": image_path_list,
"images": torch.stack(images_list, dim=0),
"images_clip": torch.stack(images_clip_list, dim=0),
"input_ids": input_ids,
"labels": targets,
"attention_masks": attention_masks,
"masks_list": masks_list,
"label_list": label_list,
"resize_list": resize_list,
"offset": torch.LongTensor(offset_list),
"questions_list": questions_list,
"sampled_classes_list": sampled_classes_list,
"inference": inferences[0],
"conversation_list": conversation_list,
} | null |
178,696 | import glob
import json
import os
import random
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from pycocotools.coco import COCO
from transformers import CLIPImageProcessor
from model.llava import conversation as conversation_lib
from model.segment_anything.utils.transforms import ResizeLongestSide
from .utils import ANSWER_LIST, SHORT_QUESTION_LIST
def init_mapillary(base_image_dir):
mapillary_data_root = os.path.join(base_image_dir, "mapillary")
with open(os.path.join(mapillary_data_root, "config_v2.0.json")) as f:
mapillary_classes = json.load(f)["labels"]
mapillary_classes = [x["readable"].lower() for x in mapillary_classes]
mapillary_classes = np.array(mapillary_classes)
mapillary_labels = sorted(
glob.glob(
os.path.join(mapillary_data_root, "training", "v2.0", "labels", "*.png")
)
)
mapillary_images = [
x.replace(".png", ".jpg").replace("v2.0/labels", "images")
for x in mapillary_labels
]
print("mapillary: ", len(mapillary_images))
return mapillary_classes, mapillary_images, mapillary_labels | null |
178,697 | import glob
import json
import os
import random
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from pycocotools.coco import COCO
from transformers import CLIPImageProcessor
from model.llava import conversation as conversation_lib
from model.segment_anything.utils.transforms import ResizeLongestSide
from .utils import ANSWER_LIST, SHORT_QUESTION_LIST
def init_ade20k(base_image_dir):
with open("utils/ade20k_classes.json", "r") as f:
ade20k_classes = json.load(f)
ade20k_classes = np.array(ade20k_classes)
image_ids = sorted(
os.listdir(os.path.join(base_image_dir, "ade20k/images", "training"))
)
ade20k_image_ids = []
for x in image_ids:
if x.endswith(".jpg"):
ade20k_image_ids.append(x[:-4])
ade20k_images = []
for image_id in ade20k_image_ids: # self.descriptions:
ade20k_images.append(
os.path.join(
base_image_dir,
"ade20k",
"images",
"training",
"{}.jpg".format(image_id),
)
)
ade20k_labels = [
x.replace(".jpg", ".png").replace("images", "annotations")
for x in ade20k_images
]
print("ade20k: ", len(ade20k_images))
return ade20k_classes, ade20k_images, ade20k_labels | null |
178,698 | import glob
import json
import os
import random
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from pycocotools.coco import COCO
from transformers import CLIPImageProcessor
from model.llava import conversation as conversation_lib
from model.segment_anything.utils.transforms import ResizeLongestSide
from .utils import ANSWER_LIST, SHORT_QUESTION_LIST
def init_cocostuff(base_image_dir):
cocostuff_classes = []
with open("utils/cocostuff_classes.txt") as f:
for line in f.readlines()[1:]:
cocostuff_classes.append(line.strip().split(": ")[-1])
cocostuff_classes = np.array(cocostuff_classes)
cocostuff_images = []
cocostuff_labels = glob.glob(
os.path.join(base_image_dir, "cocostuff", "train2017", "*.png")
)
cocostuff_images = [
x.replace(".png", ".jpg").replace("cocostuff", "coco") for x in cocostuff_labels
]
print("cocostuff: ", len(cocostuff_images))
return cocostuff_classes, cocostuff_images, cocostuff_labels | null |
178,699 | import glob
import json
import os
import random
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from pycocotools.coco import COCO
from transformers import CLIPImageProcessor
from model.llava import conversation as conversation_lib
from model.segment_anything.utils.transforms import ResizeLongestSide
from .utils import ANSWER_LIST, SHORT_QUESTION_LIST
def init_paco_lvis(base_image_dir):
coco_api_paco_lvis = COCO(
os.path.join(
base_image_dir, "vlpart", "paco", "annotations", "paco_lvis_v1_train.json"
)
)
all_classes = coco_api_paco_lvis.loadCats(coco_api_paco_lvis.getCatIds())
class_map_paco_lvis = {}
for cat in all_classes:
cat_split = cat["name"].strip().split(":")
if len(cat_split) == 1:
name = cat_split[0].split("_(")[0]
else:
assert len(cat_split) == 2
obj, part = cat_split
obj = obj.split("_(")[0]
part = part.split("_(")[0]
name = (obj, part)
class_map_paco_lvis[cat["id"]] = name
img_ids = coco_api_paco_lvis.getImgIds()
print("paco_lvis: ", len(img_ids))
return class_map_paco_lvis, img_ids, coco_api_paco_lvis | null |
178,700 | import glob
import json
import os
import random
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from pycocotools.coco import COCO
from transformers import CLIPImageProcessor
from model.llava import conversation as conversation_lib
from model.segment_anything.utils.transforms import ResizeLongestSide
from .utils import ANSWER_LIST, SHORT_QUESTION_LIST
def init_pascal_part(base_image_dir):
coco_api_pascal_part = COCO(
os.path.join(base_image_dir, "vlpart", "pascal_part", "train.json")
)
all_classes = coco_api_pascal_part.loadCats(coco_api_pascal_part.getCatIds())
class_map_pascal_part = {}
for cat in all_classes:
cat_main, cat_part = cat["name"].strip().split(":")
name = (cat_main, cat_part)
class_map_pascal_part[cat["id"]] = name
img_ids = coco_api_pascal_part.getImgIds()
print("pascal_part: ", len(img_ids))
return class_map_pascal_part, img_ids, coco_api_pascal_part | null |
178,701 |
def lcs(s1, s2):
cols = len(s1) + 1
rows = len(s2) + 1
t = [[0 for i in range(cols)] for i in range(rows)]
max_length = 0
for i in range(1, rows):
for j in range(1, cols):
if s2[i-1] == s1[j-1]:
t[i][j] = t[i-1][j-1] + 1
max_length = max(max_length, t[i][j])
return max_length | null |
178,702 |
The provided code snippet includes necessary dependencies for implementing the `prefix_sums` function. Write a Python function `def prefix_sums(ls: [int]) -> [int]` to solve the following problem:
Returns list of prefix sums for given list of integers.
Here is the function:
def prefix_sums(ls: [int]) -> [int]:
"""
Returns list of prefix sums for given list of integers.
"""
n = len(ls)
total = 0
sums = [0] * n
for i in range(n):
total += ls[i]
sums[i] = total
return sums | Returns list of prefix sums for given list of integers. |
178,703 |
The provided code snippet includes necessary dependencies for implementing the `prefix_function` function. Write a Python function `def prefix_function(s: str) -> [int]` to solve the following problem:
The prefix function for string s is defined as an array pi of length n, where pi[i] is the length of the longest proper prefix of the substring s[0...i] which is also a suffix of this substring. A proper prefix of a string is a prefix that is not equal to the string itself. By definition, pi[0] = 0.
Here is the function:
def prefix_function(s: str) -> [int]:
"""
The prefix function for string s is defined as an array pi of length n,
where pi[i] is the length of the longest proper prefix of the substring
s[0...i] which is also a suffix of this substring. A proper prefix of a
string is a prefix that is not equal to the string itself.
By definition, pi[0] = 0.
"""
n = len(s)
pi = [0] * n
for i in range(1, n):
j = pi[i - 1]
while (j > 0) and (s[i] != s[j]):
j = pi[j - 1]
if s[i] == s[j]:
j += 1
pi[i] = j
return pi | The prefix function for string s is defined as an array pi of length n, where pi[i] is the length of the longest proper prefix of the substring s[0...i] which is also a suffix of this substring. A proper prefix of a string is a prefix that is not equal to the string itself. By definition, pi[0] = 0. |
178,704 |
def find_seq(arr):
seq = {}
count = 0
for num in arr:
if num - 1 in seq:
seq[num] = seq[num - 1] + 1
count = max(count, seq[num])
else:
seq[num] = 1
return count | null |
178,705 |
def LIS(arr):
n = len(arr)
if n == 0: return 0
res = 1
dp = [0] * n
dp[0] = 1
for i in range(1, n):
dp[i] = 1;
for j in range(0 , i):
if arr[i] > arr[j]:
dp[i] = max(dp[i] , dp[j] + 1)
res = max(res , dp[i])
return res | null |
178,706 |
def longest_palindromic_substring_DP(s):
S = [[False for i in range(len(s))] for j in range(len(s))]
max_palindrome = ""
for i in range(len(s))[::-1]:
for j in range(i, len(s)):
# if j - 1 < 3, then there is one or two characters between these
# two positions, implying that if s[i] == s[j]
# then that small string is a palindrome
# We check if the above cases is valid or i
# they are larger, we use DP to check the substring
# in between j and i
S[i][j] = s[i] == s[j] and (j - i < 3 or S[i+1][j-1])
if S[i][j] and j - i + 1 > len(max_palindrome):
max_palindrome = s[i:j+1]
return max_palindrome | null |
178,707 |
def longest_palindromic_substring_expansion(s):
max_palindrome = ""
for i in range(len(s) * 2 - 1):
if i % 2 == 0:
# This is when you are "on" an actual character
# o = offset, ind = current character
o = 0
ind = i // 2
while ind + o < len(s) and ind - o >= 0:
if(s[ind + o] != s[ind - o]):
break
if ind + o - (ind - o) + 1 > len(max_palindrome):
max_palindrome = s[ind-o:ind+o + 1]
o += 1
else:
# This is when you are "in the middle of" two characters
# o = offset, sind = start char, eind = end char
o = 0
sind = i // 2
eind = i // 2 + 1
while sind - o >= 0 and eind + o < len(s):
if(s[sind - o] != s[eind + o]):
break
if eind + o - (sind - o) + 1 > len(max_palindrome):
max_palindrome = s[sind - o:eind + o + 1]
o += 1
return max_palindrome | null |
178,708 |
def lcs(s1, s2):
cols = len(s1) + 1
rows = len(s2) + 1
t = [[0 for i in range(cols)] for i in range(rows)]
max_length = 0
for i in range(1, rows):
for j in range(1, cols):
if s2[i-1] == s1[j-1]:
t[i][j] = 1 + t[i-1][j-1]
else:
t[i][j] = max(t[i-1][j], t[i][j-1])
max_length = max(max_length, t[i][j])
return max_length | null |
178,709 |
def find_partiion(arr, n) :
sum = 0
# Calculate sum of all elements
for i in range(n) :
sum += arr[i]
if (sum % 2 != 0) :
return 0
part = [0] * ((sum // 2) + 1)
# Initialize the part array as 0
for i in range((sum // 2) + 1) :
part[i] = 0
# Fill the partition table in bottom up manner
for i in range(n) :
# the element to be included
# in the sum cannot be
# greater than the sum
for j in range(sum // 2, arr[i] - 1, -1) :
# check if sum - arr[i]
# could be formed
# from a subset
# using elements
# before index i
if (part[j - arr[i]] == 1 or j == arr[i]) :
part[j] = 1
return part[sum // 2] | null |
178,710 | ans, elements = find_seq(arr, len(arr))
def find_seq(arr, n):
s = set()
for num in arr:
s.add(num)
ans = 0
elements = []
for i in range(n):
temp = []
if arr[i] - 1 not in s:
j = arr[i]
while j in s:
temp.append(j)
j += 1
if j - arr[i] > ans:
ans = j - arr[i]
elements = temp.copy()
return ans, elements | null |
178,711 |
def find_length(arr, k):
hash_table = {}
mod_arr = []
s = 0
length = 0
start, end = 0, 0
for i in range(0, len(arr)):
s += arr[i]
mod_arr.append(s % k)
for i in range(0, len(mod_arr)):
if mod_arr[i] == 0:
length += 1
else:
if mod_arr[i] not in hash_table:
hash_table[mod_arr[i]] = i
else:
if length < (i - mod_arr[i]):
length = i - mod_arr[i]
start = mod_arr[i]
end = i - 1 # i-1 because the current number is not to considered as it makes the sum not divisible by k
return length, arr[start:end+1] | null |
178,712 | import functools
def hamilton_cycle(graph, n):
height = 1 << n
dp = [[False for _ in range(n)] for _ in range(height)]
for i in range(n):
dp[1 << i][i] = True
for i in range(height):
ones, zeros = [], []
for pos in range(n):
if (1 << pos) & i:
ones.append(pos)
else:
zeros.append(pos)
for o in ones:
if not dp[i][o]:
continue
for z in zeros:
if graph[o][z]:
new_val = i + (1 << z)
dp[new_val][z] = True
return functools.reduce(lambda a, b: a or b, dp[height - 1]) | null |
178,713 |
def min_coins(coins, total):
cols = total + 1
min_coins = [float('inf')] * (total + 1)
coins_used = [-1] * (total + 1)
min_coins[0] = 0 # to form 0, we need 0 coins
for i in range(0, len(coins)):
for j in range(1, len(min_coins)):
if coins[i] > j: # if the coin value is more than j (curr total), ignore it
continue
if (1 + min_coins[j - coins[i]]) < min_coins[j]:
min_coins[j] = 1 + min_coins[j - coins[i]]
coins_used[j] = i
# finding which coins were used
picked_coins = []
while total > 0:
index_of_coin_used = coins_used[total]
coin = coins[index_of_coin_used]
picked_coins.append(coin)
total -= coin
print('Min coins needed - ', min_coins[-1])
print('Coins used - ', picked_coins) | null |
178,714 |
The provided code snippet includes necessary dependencies for implementing the `find_subarray` function. Write a Python function `def find_subarray(arr, k)` to solve the following problem:
True means divisible by k
Here is the function:
def find_subarray(arr, k):
"""
True means divisible by k
"""
start, end = 0, 0
max_start, max_end = 0, 0
n = len(arr)
mod_arr = [0] * n
mod_arr[arr[0] % k] = mod_arr[arr[0] % k] + 1
for i in range(1, n):
mod = arr[i] % k
while (mod_arr[k - mod] != 0) or (mod == 0 and mod_arr[mod] != 0):
mod_arr[arr[start] % k] = mod_arr[arr[start] % k] - 1
start += 1
mod_arr[mod] = mod_arr[mod] + 1
end += 1
if (end - start) > (max_end - max_start):
max_end = end
max_start = start
print(f'Max size is {max_end - max_start}')
for i in (max_start, max_end + 1):
print(arr[i], end=" ") | True means divisible by k |
178,715 |
def knapsack(values, weights, total):
total_items = len(weights)
rows = total_items + 1
cols = total + 1
# rows are the number of items
# columns are the values of weights required
t = [[0 for i in range(cols)] for i in range(rows)]
for i in range(1, rows):
for j in range(1, cols):
if j < weights[i-1]:
t[i][j] = t[i-1][j]
else:
t[i][j] = max(t[i-1][j], values[i-1] + t[i-1][j-weights[i-1]])
return t[rows-1][cols-1] | null |
178,716 |
The provided code snippet includes necessary dependencies for implementing the `check_luhn` function. Write a Python function `def check_luhn(card_number)` to solve the following problem:
Luhn algorithm or Luhn formula is a simple checksum formula used to validate a variety of identification numbers, such as credit card numbers, IMEI numbers, National Provider Identifier numbers in some of the countries. It takes a number as an input (Assuming cardnumber as a string) and returns true or false based upon whether number is valid or not :param card_number: :return: bool: valid or not Examples: >>> check_luhn("950123440000") False >>> check_luhn("490154203237518") True
Here is the function:
def check_luhn(card_number):
"""
Luhn algorithm or Luhn formula is a simple checksum formula
used to validate a variety of identification numbers, such as
credit card numbers, IMEI numbers, National Provider Identifier numbers
in some of the countries.
It takes a number as an input
(Assuming cardnumber as a string)
and returns true or false
based upon whether number is valid or not
:param card_number:
:return: bool: valid or not
Examples:
>>> check_luhn("950123440000")
False
>>> check_luhn("490154203237518")
True
"""
card_len = len(card_number)
check_sum = 0
is_parity = False
for digit in range(card_len - 1, -1, -1):
if is_parity:
cal = int(card_number[digit]) * 2
else:
cal = int(card_number[digit])
if cal > 9:
check_sum += cal - 9
else:
check_sum += cal
is_parity = not is_parity
return check_sum % 10 == 0 | Luhn algorithm or Luhn formula is a simple checksum formula used to validate a variety of identification numbers, such as credit card numbers, IMEI numbers, National Provider Identifier numbers in some of the countries. It takes a number as an input (Assuming cardnumber as a string) and returns true or false based upon whether number is valid or not :param card_number: :return: bool: valid or not Examples: >>> check_luhn("950123440000") False >>> check_luhn("490154203237518") True |
178,717 |
The provided code snippet includes necessary dependencies for implementing the `front_and_back_search` function. Write a Python function `def front_and_back_search(lst, item)` to solve the following problem:
args: lst: an unsorted array of integers item: data to be found return: item which is found else False
Here is the function:
def front_and_back_search(lst, item):
'''
args:
lst: an unsorted array of integers
item: data to be found
return:
item which is found else False
'''
rear=0
front=len(lst)-1
u=None
if rear>front:
return False
else:
while rear<=front:
if item==lst[rear] or item==lst[front]:
u=''
return True ##item found
elif item!=lst[rear] and item!=lst[front]:
if item > lst[rear]:
rear=rear+1
elif item < lst[front]:
front=front-1
if u==None:
return False | args: lst: an unsorted array of integers item: data to be found return: item which is found else False |
178,718 | import codecs
import random
def processSeed(seed, mostFreqSeed, mostFreq, ch, k, mp):
seed += ch
if len(seed) == k+1:
oldSeed = seed[:-1]
mp.setdefault(oldSeed, []).append(ch)
if mostFreq < len(mp[oldSeed]):
mostFreq, mostFreqSeed = len(mp[oldSeed]), oldSeed
seed = seed[1:]
return seed, mostFreq, mostFreqSeed
def readFile(f, mp, k):
seed = ''
mostFreqSeed = seed
mostFreq = 1
for line in f:
for ch in line:
seed, mostFreq, mostFreqSeed = processSeed(seed, mostFreqSeed, mostFreq, ch, k, mp)
return mostFreqSeed | null |
178,719 | import codecs
import random
MAX_LETTERS = 2000
def generateText(mp, mostFreqSeed):
text, curSeed = mostFreqSeed, mostFreqSeed
while (len(text) < MAX_LETTERS):
ch = random.choice(mp[curSeed])
text, curSeed = text+ch, curSeed+ch
curSeed = curSeed[1:]
return text | null |
178,720 |
def merge_sort(arr):
if len(arr) >1:
mid = len(arr)//2 #Finding the mid of the array
L = arr[:mid] # Dividing the array elements
R = arr[mid:] # into 2 halves
merge_sort(L) # Sorting the first half
merge_sort(R) # Sorting the second half
i = j = k = 0
# Copy data to temp arrays L[] and R[]
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i+=1
else:
arr[k] = R[j]
j+=1
k+=1
# Checking if any element was left
while i < len(L):
arr[k] = L[i]
i+=1
k+=1
while j < len(R):
arr[k] = R[j]
j+=1
k+=1 | null |
178,721 | my_min = min(a)
my_max = max(a)
size = my_max - my_min + 1
holes = [0] * size
for x in a:
assert type(x) is int, "integers only please"
holes[x - my_min] += 1
i = 0
for count in range(size):
while holes[count] > 0:
holes[count] -= 1
a[i] = count + my_min
i += 1
for i in range(0, len(a)):
print(a[i], end = ' ')
def pigeonhole_sort(a):
my_min = min(a)
my_max = max(a)
size = my_max - my_min + 1
# our list of pigeonholes
holes = [0] * size
# Populate the pigeonholes.
for x in a:
assert type(x) is int, "integers only please"
holes[x - my_min] += 1
# Put the elements back into the array in order.
i = 0
for count in range(size):
while holes[count] > 0:
holes[count] -= 1
a[i] = count + my_min
i += 1 | null |
178,722 | def find_smallest(arr):
smallest = arr[0]
smallest_index = 0
for i in range(1, len(arr)):
if arr[i] < smallest:
smallest = arr[i]
smallest_index = i
return smallest_index
def selection_sort(arr):
new_arr = []
for i in range(len(arr)):
smallest = find_smallest(arr)
new_arr.append(arr.pop(smallest))
return new_arr | null |
178,723 |
def counting_sort(arr):
# Find min and max values
min_value = min(arr)
max_value = max(arr)
# Count number appearances in the array
counting_arr = [0]*(max_value-min_value+1)
for num in arr:
counting_arr[num-min_value] += 1
# Rearrange sequence in the array
index = 0
for i, count in enumerate(counting_arr):
for _ in range(count):
arr[index] = min_value + i
index += 1 | null |
178,724 | def heapify(nums, heap_size, root_index):
# Assume the index of the largest element is the root index
largest = root_index
left_child = (2 * root_index) + 1
right_child = (2 * root_index) + 2
# If the left child of the root is a valid index, and the element is greater
# than the current largest element, then update the largest element
if left_child < heap_size and nums[left_child] > nums[largest]:
largest = left_child
# Do the same for the right child of the root
if right_child < heap_size and nums[right_child] > nums[largest]:
largest = right_child
# If the largest element is no longer the root element, swap them
if largest != root_index:
nums[root_index], nums[largest] = nums[largest], nums[root_index]
# Heapify the new root element to ensure it's the largest
heapify(nums, heap_size, largest)
def heap_sort(nums):
n = len(nums)
# Create a Max Heap from the list
# The 2nd argument of range means we stop at the element before -1 i.e.
# the first element of the list.
# The 3rd argument of range means we iterate backwards, reducing the count
# of i by 1
for i in range(n, -1, -1):
heapify(nums, n, i)
# Move the root of the max heap to the end of
for i in range(n - 1, 0, -1):
nums[i], nums[0] = nums[0], nums[i]
heapify(nums, i, 0) | null |
178,725 | import random
def is_sorted(a):
n = len(a)
for i in range(0, n - 1):
if (a[i] > a[i + 1]):
return False
return True
def shuffle(a):
n = len(a)
for i in range(0, n):
r = random.randint(0, n - 1)
a[i], a[r] = a[r], a[i]
def bogo_sort(a):
n = len(a)
while (is_sorted(a) == False):
shuffle(a) | null |
178,726 |
def insertion_sort(lst):
for i in range(1,len(lst)):
while(i > 0 and lst[i] < lst[i - 1]):
lst[i], lst[i - 1] = lst[i - 1], lst[i]
i -= 1
return lst | null |
178,727 |
The provided code snippet includes necessary dependencies for implementing the `gnome_sort` function. Write a Python function `def gnome_sort(arr)` to solve the following problem:
Examples: >>> gnome_sort([0, 5, 2, 3, 2]) [0, 2, 2, 3, 5] >>> gnome_sort([]) [] >>> gnome_sort([-2, -45, -5]) [-45, -5, -2]
Here is the function:
def gnome_sort(arr):
"""
Examples:
>>> gnome_sort([0, 5, 2, 3, 2])
[0, 2, 2, 3, 5]
>>> gnome_sort([])
[]
>>> gnome_sort([-2, -45, -5])
[-45, -5, -2]
"""
# first case
size = len(arr)
if size <= 1:
return arr
ind = 0
# while loop
while ind < size:
if ind == 0:
ind += 1
elif arr[ind] >= arr[ind - 1]:
ind += 1
else:
# swap
temp = arr[ind - 1]
arr[ind - 1] = arr[ind]
arr[ind] = temp
ind -= 1
return arr | Examples: >>> gnome_sort([0, 5, 2, 3, 2]) [0, 2, 2, 3, 5] >>> gnome_sort([]) [] >>> gnome_sort([-2, -45, -5]) [-45, -5, -2] |
178,728 |
def qsort(arr):
if len(arr) <= 1:
return arr
pivot = arr.pop()
greater, lesser = [], []
for item in arr:
if item > pivot:
greater.append(item)
else:
lesser.append(item)
return qsort(lesser) + [pivot] + qsort(greater) | null |
178,729 |
def shell_sort(arr):
# Start with a big gap, then reduce the gap
n = len(arr)
gap = int(n / 2)
# Do a gapped insertion sort for this gap size.
# The first gap elements a[0..gap-1] are already in gapped
# order keep adding one more element until the entire array
# is gap sorted
while gap > 0:
for i in range(gap, n):
# add a[i] to the elements that have been gap sorted
# save a[i] in temp and make a hole at position i
temp = arr[i]
# shift earlier gap-sorted elements up until the correct
# location for a[i] is found
j = i
while j >= gap and arr[j - gap] > temp:
arr[j] = arr[j - gap]
j -= gap
# put temp (the original a[i]) in its correct location
arr[j] = temp
gap = int(gap / 2) | null |
178,730 |
def bubble_sort(array):
n = len(array)
for i in range(n):
for j in range(0, n-i-1):
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
return array | null |
178,731 |
The provided code snippet includes necessary dependencies for implementing the `bubble_sort_optimized` function. Write a Python function `def bubble_sort_optimized(array)` to solve the following problem:
Optimizes on bubble sort by taking care of already swapped cases Reference - https://github.com/prabhupant/python-ds/pull/346
Here is the function:
def bubble_sort_optimized(array):
"""
Optimizes on bubble sort by taking care of already swapped cases
Reference - https://github.com/prabhupant/python-ds/pull/346
"""
has_swapped = True
num_of_iterations = 0
while has_swapped:
has_swapped = False
for i in range(len(array) - num_of_iterations - 1):
if array[i] > array[i + 1]:
array[i], array[i + 1] = array[i + 1], array[i]
has_swapped = True
num_of_iterations += 1 | Optimizes on bubble sort by taking care of already swapped cases Reference - https://github.com/prabhupant/python-ds/pull/346 |
178,732 |
The provided code snippet includes necessary dependencies for implementing the `getSum` function. Write a Python function `def getSum(a, b)` to solve the following problem:
:type a: int :type b: int :rtype: int
Here is the function:
def getSum(a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
mask = 0xffffffff
diff = 0
carry = 0
while b & mask:
diff = a ^ b
carry = trunc(a & b)
carry = carry << 1
a = diff
b = carry
if b > 0: return (a & mask)
else: return a | :type a: int :type b: int :rtype: int |
178,733 |
The provided code snippet includes necessary dependencies for implementing the `countBits` function. Write a Python function `def countBits(n)` to solve the following problem:
Consider a number x and half of the number (x//2). The binary representation of x has all the digits as the binary representation of x//2 followed by an additional digit at the last position. Therefore, we can find the number of set bits in x by finding the number of set bits in x//2 and determining whether the last digit in x is 0 or 1.
Here is the function:
def countBits(n):
""" Consider a number x and half of the number (x//2).
The binary representation of x has all the digits as
the binary representation of x//2 followed by an additional
digit at the last position. Therefore, we can find the number
of set bits in x by finding the number of set bits in x//2
and determining whether the last digit in x is 0 or 1. """
res = [0] * (n+1)
for i in range(1, n+1):
res[i] = res[i//2] + (i & 1)
return sum(res) | Consider a number x and half of the number (x//2). The binary representation of x has all the digits as the binary representation of x//2 followed by an additional digit at the last position. Therefore, we can find the number of set bits in x by finding the number of set bits in x//2 and determining whether the last digit in x is 0 or 1. |
178,734 |
def cost(arr, A, B):
n = len(arr)
m = len(arr[0])
ans = 0
for i in range(n):
j = 0
while j < m:
if arr[i][j] == '*': # tile is already there
j += 1
continue
if j == m - 1: # if j is pointing to last tile, you can use only 1*1 tile
ans += A
else:
if arr[i][j+1] == '.':
ans += min(2 * A, B)
j += 1
else:
ans += A
j += 1
print('Cost of tiling is - ', ans) | null |
178,735 |
def find_platforms(arrival, departure):
n = len(arrival)
arrival.sort()
departure.sort()
i = 1
j = 0
ans = 1 # atleast one platform is required
plat = 1
while i < n and j < n:
if arrival[i] <= departure[j]:
plat += 1
i += 1
elif arrival[i] > departure[j]:
plat -= 1
j += 1
ans = max(ans, plat)
return ans | null |
178,736 | import math
def egyptian_fraction(nr, dr):
ef = []
while nr != 0:
x = math.ceil(dr / nr)
ef.append(x)
nr = x * nr - dr
dr = dr * x | null |
178,737 |
def find_activities(arr):
n = len(arr)
selected = []
arr.sort(key = lambda x: x[1])
i = 0
# since it is a greedy algorithm, the first acitivity is always
# selected because it is the most optimal choice at that point
selected.append(arr[i])
for j in range(1, n):
start_time_next_activity = arr[j][0]
end_time_prev_activity = arr[i][1]
if start_time_next_activity >= end_time_prev_activity:
selected.append(arr[j])
i = j
return selected | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.