id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
179,576 | import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from videollava.cons... | null |
179,577 | import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from videollava.cons... | null |
179,578 | import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from videollava.cons... | null |
179,579 | import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from videollava.cons... | null |
179,580 | import shutil
import subprocess
import torch
import gradio as gr
from fastapi import FastAPI
import os
from PIL import Image
import tempfile
from decord import VideoReader, cpu
from transformers import TextStreamer
from videollava.constants import DEFAULT_IMAGE_TOKEN
from videollava.conversation import conv_templates, ... | null |
179,581 | import shutil
import subprocess
import torch
import gradio as gr
from fastapi import FastAPI
import os
from PIL import Image
import tempfile
from decord import VideoReader, cpu
from transformers import TextStreamer
from videollava.constants import DEFAULT_IMAGE_TOKEN
from videollava.conversation import conv_templates, ... | null |
179,582 | import shutil
import subprocess
import torch
import gradio as gr
from fastapi import FastAPI
import os
from PIL import Image
import tempfile
from decord import VideoReader, cpu
from transformers import TextStreamer
from videollava.constants import DEFAULT_IMAGE_TOKEN
from videollava.conversation import conv_templates, ... | null |
179,583 | import os
import copy
import random
from dataclasses import dataclass, field
import json
import logging
import pathlib
from typing import Dict, Optional, Sequence, List
import torch
import transformers
from videollava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, \
D... | null |
179,584 | import os
import copy
import random
from dataclasses import dataclass, field
import json
import logging
import pathlib
from typing import Dict, Optional, Sequence, List
import torch
import transformers
from videollava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, \
D... | null |
179,585 | import os
import copy
import random
from dataclasses import dataclass, field
import json
import logging
import pathlib
from typing import Dict, Optional, Sequence, List
import torch
import transformers
from videollava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, \
D... | null |
179,589 | import os
from .clip_encoder import CLIPVisionTower
from .languagebind import LanguageBindImageTower, LanguageBindVideoTower
class CLIPVisionTower(nn.Module):
def __init__(self, vision_tower, args, delay_load=False):
super().__init__()
self.is_loaded = False
self.vision_tower_name = visio... | null |
179,590 | import os
from .clip_encoder import CLIPVisionTower
from .languagebind import LanguageBindImageTower, LanguageBindVideoTower
class LanguageBindVideoTower(nn.Module):
def __init__(self, video_tower, args, delay_load=False, cache_dir='./cache_dir'):
super().__init__()
self.is_loaded = False
... | null |
179,591 | import cv2
import torch
from PIL import Image
from torch import nn
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
def make_list_of_images(x):
if not isinstance(x, list):
return [x]
return x | null |
179,592 | import cv2
import torch
from PIL import Image
from torch import nn
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
OPENAI_DATASET_STD = (0.26862954, 0.26130258... | null |
179,593 | import cv2
import torch
from PIL import Image
from torch import nn
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
def opencv_loader(path):
return cv2.imread(path, cv2.IMREAD_UNCHANGED).astype('float32')
def load... | null |
179,594 | import math
from typing import Optional, Tuple, Union
import torch
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, ... | Make causal mask used for bi-directional self-attention. |
179,595 | import math
from typing import Optional, Tuple, Union
import torch
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, ... | Make causal mask used for bi-directional self-attention. |
179,596 | import cv2
import numpy as np
import torch
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
from torch.nn import functional as F
def make_list_of_images(x):
if not isinstance(x, list):
return [x]
retur... | null |
179,597 | import cv2
import numpy as np
import torch
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
from torch.nn import functional as F
def int16_to_float32_torch(x):
return (x / 32767.0).type(torch.float32) | null |
179,598 | import cv2
import numpy as np
import torch
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
from torch.nn import functional as F
def float32_to_int16_torch(x):
x = torch.clamp(x, min=-1., max=1.)
return (x * 3... | null |
179,599 | import cv2
import numpy as np
import torch
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
from torch.nn import functional as F
class AudioTransform:
def __init__(self, config):
self.sample_rate = config.a... | null |
179,600 | import cv2
import numpy as np
import torch
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
from torch.nn import functional as F
def torchaudio_loader(path):
return torchaudio.load(path)
def load_and_transform_aud... | null |
179,601 | import torch
from PIL import Image
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
def make_list_of_images(x):
if not isinstance(x, list):
return [x]
return x | null |
179,602 | import torch
from PIL import Image
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
def get_thermal_t... | null |
179,603 | import torch
from PIL import Image
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
def load_and_transform_thermal(thermal_path, transform):
thermal = Image.open(thermal_path)
thermal_outputs = transform(therm... | null |
179,604 | import math
from typing import Optional, Tuple, Union
import torch
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, ... | Make causal mask used for bi-directional self-attention. |
179,606 | import torch
from PIL import Image
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
def get_image_tra... | null |
179,607 | import torch
from PIL import Image
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
def load_and_transform_image(image_path, transform):
image = Image.open(image_path).convert('RGB') if isinstance(image_path, str)... | null |
179,608 | import math
from typing import Optional, Tuple, Union
import torch
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, ... | Make causal mask used for bi-directional self-attention. |
179,609 | import math
from typing import Optional, Tuple, Union
import torch
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, ... | Make causal mask used for bi-directional self-attention. |
179,610 | import torch
import cv2
import decord
import numpy as np
from PIL import Image
from decord import VideoReader, cpu
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
from pytorchvideo.data.encoded_video import EncodedVid... | null |
179,611 | import torch
import cv2
import decord
import numpy as np
from PIL import Image
from decord import VideoReader, cpu
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
from pytorchvideo.data.encoded_video import EncodedVid... | null |
179,612 | import torch
import cv2
import decord
import numpy as np
from PIL import Image
from decord import VideoReader, cpu
from torchvision import transforms
from transformers import ProcessorMixin, BatchEncoding
from transformers.image_processing_utils import BatchFeature
from pytorchvideo.data.encoded_video import EncodedVid... | null |
179,613 | import argparse
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
from videollava import LlavaLlamaForCausalLM
def apply_delta(base_model_path, target_model_path, delta_path):
print("Loading base model")
base = AutoModelForCausalLM.from_pretrained(
base_mod... | null |
179,622 | import math
import torch
import triton_pre_mlir as triton
import triton_pre_mlir.language as tl
def _fwd_kernel(Q, K, V, Bias, Out, Lse, TMP, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob, stride_oh, stride_om... | null |
179,629 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is... | null |
179,631 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is... | null |
179,635 | import argparse
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
from videollava.model.utils import auto_upgrade
def auto_upgrade(config):
cfg = AutoConfig.from_pretrained(config)
if 'llava' in config and 'llava' not in cfg.model_type:
assert cfg.model_typ... | null |
179,636 | import argparse
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from videollava.model import *
from videollava.model.utils import auto_upgrade
def auto_upgrade(config):
cfg = AutoConfig.from_pretrained(config)
if 'llava' in config and 'llava' not in cfg.model_type:
assert cfg.... | null |
179,638 | import argparse
import torch
from tqdm import tqdm
import json
from videollava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.model.builder import load_pretrained_model
from videolla... | null |
179,642 | import os
import argparse
import json
import re
from videollava.eval.m4c_evaluator import TextVQAAccuracyEvaluator
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--annotation-file', type=str)
parser.add_argument('--result-file', type=str)
parser.add_argument('--result-dir', typ... | null |
179,643 | import os
import argparse
import json
import re
from videollava.eval.m4c_evaluator import TextVQAAccuracyEvaluator
def prompt_processor(prompt):
if prompt.startswith('OCR tokens: '):
pattern = r"Question: (.*?) Short answer:"
match = re.search(pattern, prompt, re.DOTALL)
question = match.gro... | null |
179,650 | import argparse
from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria
import torch
import os
import json
from tqdm import tqdm
import shortuuid
from videollava.conversation import default_conversation
from videollava.utils import disable_torch_init
class KeywordsStoppingCriteria(StoppingCriteri... | null |
179,651 | import argparse
import torch
import os
import json
from tqdm import tqdm
import shortuuid
from videollava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.model.builder import load_pre... | null |
179,652 | import argparse
import torch
import os
import json
import pandas as pd
from tqdm import tqdm
import shortuuid
from videollava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.model.bui... | null |
179,653 | import argparse
import openai
import json
import os
from tqdm import tqdm
import pandas as pd
import numpy as np
from collections import Counter
import time
len_data = 0
num_run = 1
for k, v in grade_results.items():
if sub_set is not None and k not in sub_set:
continue
for i in range(num_run):
... | null |
179,654 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
def loadFile(name):
# load standard json file
if os.path.isfile(name):
with open(name) as file:
data = json.load(file)
# load file chunks if too big
elif os.p... | null |
179,655 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
def wavg(l, w):
if sum(w) == 0:
return None
return float(sum(l[i] * w[i] for i in range(len(l)))) / sum(w) | null |
179,656 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
def getWordsNum(question):
return len(question["question"].split()) | null |
179,657 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
def getStepsNum(question):
return len([c for c in question["semantic"] if not (any([o in "{}: {}".format(c["operation"], c["argument"])
... | null |
179,658 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
def toSlice(strSlice):
sliceLims = (int(n) for n in strSlice.split(':'))
return apply(slice, sliceLims) | null |
179,659 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
def intsFromSlice(strSlice):
slice_obj = get_slice_obj(slicearg)
return (range(slice_obj.start or 0, slice_obj.stop or -1, slice_obj.step or 1)) | null |
179,660 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
def belongs(element, group, question):
# normalization ()
if "Common" in question["types"]["detailed"]:
group = ["color", "material", "shape"]
return element in group | null |
179,661 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
predictions = loadFile(args.predictions.format(tier=args.tier))
predictions = {p["questionId"]: p["prediction"] for p in predictions}
def toScore(b):
return float(1 if b else 0)
def avg(l):
... | null |
179,662 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
args = parser.parse_args()
if not args.consistency:
print("Please consider using --consistency to compute consistency scores for entailed questions.")
print("If you do so, please provide ... | null |
179,663 | from collections import defaultdict
from tqdm import tqdm
import argparse
import os.path
import glob
import json
import math
def chiSquare(goldDist, predictedDist):
sumScore, sumOverall = 0, 0
for group in goldDist:
score, overall = 0, 0
for ans in goldDist[group]:
e = goldDist[gr... | null |
179,664 | import os
import argparse
import json
from tqdm import tqdm
from videollava.eval.video.run_inference_video_qa import get_model_output
from videollava.mm_utils import get_model_name_from_path
from videollava.model.builder import load_pretrained_model
The provided code snippet includes necessary dependencies for impleme... | Parse command-line arguments. |
179,665 | import os
import argparse
import json
from tqdm import tqdm
from videollava.eval.video.run_inference_video_qa import get_model_output
from videollava.mm_utils import get_model_name_from_path
from videollava.model.builder import load_pretrained_model
def get_model_output(model, video_processor, tokenizer, video, qs, ar... | Run inference on a set of video files using the provided model. Args: args: Command-line arguments. |
179,666 | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
def parse_args():
parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3")
parser.add_argument("--pred_path", required=True, help="The path to file containing prediction.")
par... | null |
179,667 | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
The provided code snippet includes necessary dependencies for implementing the `annotate` function. Write a Python function `def annotate(prediction_set, caption_files, output_dir, args)` to solve the following problem... | Evaluates question and answer pairs using GPT-3 and returns a score for temporal understanding. |
179,669 | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
The provided code snippet includes necessary dependencies for implementing the `annotate` function. Write a Python function `def annotate(prediction_set, caption_files, output_dir, args)` to solve the following problem... | Evaluates question and answer pairs using GPT-3 Returns a score for correctness. |
179,671 | import os
import argparse
import json
from tqdm import tqdm
from videollava.eval.video.run_inference_video_qa import get_model_output
from videollava.mm_utils import get_model_name_from_path
from videollava.model.builder import load_pretrained_model
def get_model_output(model, video_processor, tokenizer, video, qs, ar... | Run inference on a set of video files using the provided model. Args: args: Command-line arguments. |
179,673 | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
The provided code snippet includes necessary dependencies for implementing the `annotate` function. Write a Python function `def annotate(prediction_set, caption_files, output_dir, args)` to solve the following problem... | Evaluates question and answer pairs using GPT-3 and returns a score for consistency. |
179,675 | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
The provided code snippet includes necessary dependencies for implementing the `annotate` function. Write a Python function `def annotate(prediction_set, caption_files, output_dir, args)` to solve the following problem... | Evaluates question and answer pairs using GPT-3 and returns a score for detailed orientation. |
179,677 | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
The provided code snippet includes necessary dependencies for implementing the `annotate` function. Write a Python function `def annotate(prediction_set, caption_files, output_dir, args)` to solve the following problem... | Evaluates question and answer pairs using GPT-3 and returns a score for contextual understanding. |
179,678 | import math
import os
import argparse
import json
import torch
import transformers
from tqdm import tqdm
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.constants import DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX, DEFAULT_VID_START_TOKEN, DEFA... | Parse command-line arguments. |
179,679 | import math
import os
import argparse
import json
import torch
import transformers
from tqdm import tqdm
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.constants import DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX, DEFAULT_VID_START_TOKEN, DEFA... | Run inference on ActivityNet QA DataSet using the Video-ChatGPT model. Args: args: Command-line arguments. |
179,681 | import math
import os
import argparse
import json
import torch
import transformers
from tqdm import tqdm
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.constants import DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX, DEFAULT_VID_START_TOKEN, DEFA... | Run inference on ActivityNet QA DataSet using the Video-ChatGPT model. Args: args: Command-line arguments. |
179,682 | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3")
parser.add_argument("--pred_path", default=r'', help="The path to file containing p... | null |
179,683 | import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `annotate` function. Write a Python function `def annotate(prediction_set, caption_files, output_dir, args)` to solve... | Evaluates question and answer pairs using GPT-3 Returns a score for correctness. |
179,687 | import argparse
import torch
from videollava.constants import (
IMAGE_TOKEN_INDEX,
DEFAULT_IMAGE_TOKEN,
DEFAULT_IM_START_TOKEN,
DEFAULT_IM_END_TOKEN,
IMAGE_PLACEHOLDER,
)
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.model.builder import load_pretrained_model
fro... | null |
179,688 | import argparse
import torch
import os
import json
from tqdm import tqdm
import shortuuid
from videollava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.model.builder import load_pre... | null |
179,692 | import argparse
import torch
import os
import json
from tqdm import tqdm
import shortuuid
from videollava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from videollava.conversation import conv_templates, SeparatorStyle
from videollava.model.builder import load_pre... | null |
179,698 | import argparse
import json
import os
import time
import pandas as pd
import tensor_parallel as tp
import torch
from tqdm import tqdm
from transformers import LlamaForCausalLM, LlamaTokenizer, AutoTokenizer, AutoModelForCausalLM
def load(ckpt_dir, model_type, cache_dir):
# n_gpus = torch.cuda.device_count()
n_g... | null |
179,699 | import argparse
import json
import os
import time
import pandas as pd
import tensor_parallel as tp
import torch
from tqdm import tqdm
from transformers import LlamaForCausalLM, LlamaTokenizer, AutoTokenizer, AutoModelForCausalLM
def format_subject(subject):
l = subject.split("_")
s = ""
for entry in l:
... | null |
179,700 | import argparse
import json
import os
import time
import pandas as pd
import tensor_parallel as tp
import torch
from tqdm import tqdm
from transformers import LlamaForCausalLM, LlamaTokenizer, AutoTokenizer, AutoModelForCausalLM
def prepare_input(tokenizer, prompts):
input_tokens = tokenizer.batch_encode_plus(promp... | null |
179,702 | def get_question_text(problem):
question = problem['question']
return question
def get_context_text(problem, use_caption):
txt_context = problem['hint']
img_context = problem['caption'] if use_caption else ""
context = " ".join([txt_context, img_context]).strip()
if context == "":
contex... | null |
179,708 | import argparse
from llava.model.builder import load_pretrained_model
from llava.mm_utils import get_model_name_from_path
def merge_lora(args):
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_n... | null |
179,712 | import logging
import sys
from datetime import datetime
from logging import getLogger, basicConfig
from pathlib import Path
from time import sleep
from django.conf import settings
from mwmbl.indexer import index_batches, historical
from mwmbl.indexer.batch_cache import BatchCache
from mwmbl.models import OldIndex
from ... | null |
179,713 | import os
import django
import uvicorn
from django.core.management import call_command
from redis import Redis
from mwmbl.indexer.update_urls import update_urls_continuously
from mwmbl.redis_url_queue import RedisURLQueue
def update_urls_continuously(data_path: str, new_item_queue: RedisURLQueue):
batch_cache = Ba... | null |
179,714 | from pandas import DataFrame, Series
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from mwmbl.tinysearchengine.rank import get_features
def get_features(terms, title, url, extract, score, is_complete):
def get_features_as_series(item: Series):
terms = item['query'].lower().split()
f... | null |
179,715 | import math
import re
from abc import abstractmethod
from logging import getLogger
from operator import itemgetter
from typing import Optional
from urllib.parse import urlparse
from mwmbl.format import format_result_with_pattern, get_query_regex
from mwmbl.tokenizer import tokenize, get_bigrams
from mwmbl.tinysearcheng... | null |
179,716 | import math
import re
from abc import abstractmethod
from logging import getLogger
from operator import itemgetter
from typing import Optional
from urllib.parse import urlparse
from mwmbl.format import format_result_with_pattern, get_query_regex
from mwmbl.tokenizer import tokenize, get_bigrams
from mwmbl.tinysearcheng... | null |
179,717 | import math
import re
from abc import abstractmethod
from logging import getLogger
from operator import itemgetter
from typing import Optional
from urllib.parse import urlparse
from mwmbl.format import format_result_with_pattern, get_query_regex
from mwmbl.tokenizer import tokenize, get_bigrams
from mwmbl.tinysearcheng... | null |
179,718 | import json
import os
from dataclasses import dataclass, asdict, field
from enum import IntEnum
from io import UnsupportedOperation
from logging import getLogger
from mmap import mmap, PROT_READ, PROT_WRITE
from typing import TypeVar, Generic, Callable, List, Optional
import mmh3
from zstandard import ZstdDecompressor,... | null |
179,719 | from logging import getLogger
from ninja import NinjaAPI
from mwmbl.format import format_result
from mwmbl.tinysearchengine.rank import HeuristicRanker
def format_result(result, query):
tokens = tokenize(query)
pattern = get_query_regex(tokens, True, False)
return format_result_with_pattern(pattern, result... | null |
179,720 | import re
from django.template import Library
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from mwmbl.format import get_query_regex, DOCUMENT_SOURCES, get_document_source
from mwmbl.tinysearchengine.indexer import DocumentState
from mwmbl.tokenizer import tokenize
def ... | null |
179,721 | import re
from django.template import Library
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from mwmbl.format import get_query_regex, DOCUMENT_SOURCES, get_document_source
from mwmbl.tinysearchengine.indexer import DocumentState
from mwmbl.tokenizer import tokenize
def ... | null |
179,722 | from itertools import groupby
from urllib.parse import urlparse, parse_qs
from django.db import migrations
def create_curations_from_user_curation(apps, schema_editor):
Curation = apps.get_model('mwmbl', 'Curation')
UserCuration = apps.get_model('mwmbl', 'UserCuration')
# Order curations by timestamp
... | null |
179,723 | import gzip
from datetime import datetime, timedelta
from glob import glob
from itertools import islice
from logging import getLogger
from urllib.parse import urlparse
from pydantic import BaseModel
from redis import Redis
from mwmbl.crawler.batch import HashedBatch
from mwmbl.indexer.update_urls import get_datetime_fr... | null |
179,724 | import gzip
import hashlib
import json
import os
from datetime import datetime, timezone, date
from queue import Queue, Empty
from typing import Union
from uuid import uuid4
import boto3
import requests
from fastapi import HTTPException
from ninja import NinjaAPI
from redis import Redis
from mwmbl.crawler.batch import ... | null |
179,725 | from itertools import islice
from logging import getLogger
from django.conf import settings
from pybloomfilter import BloomFilter
from mwmbl.hn_top_domains_filtered import DOMAINS
def get_bloom_filter(domain_group: str) -> BloomFilter:
try:
bloom_filter = BloomFilter.open(settings.DOMAIN_LINKS_BLOOM_FILTER... | null |
179,726 | import glob
import gzip
import json
from collections import defaultdict
from urllib.parse import urlparse
from mwmbl.indexer.paths import CRAWL_GLOB, LINK_COUNT_PATH
def get_urls():
def collect_links(urls):
LINK_COUNT_PATH = MWMBL_DATA_DIR / 'crawl-counts.json'
def run():
url_links = get_urls()
collected = co... | null |
179,727 | import csv
import gzip
from mwmbl.indexer.fsqueue import FSQueue, ZstdJsonSerializer
from mwmbl.indexer.paths import DOMAINS_PATH, DOMAINS_QUEUE_NAME, TINYSEARCH_DATA_DIR
BATCH_SIZE = 250
def get_domains():
class ZstdJsonSerializer(Serializer):
def __init__(self):
def serialize(self, item) -> bytes:
def... | null |
179,728 | from multiprocessing import Process
from time import sleep
from urllib.parse import urlsplit, urlunsplit
import bs4
import requests
from mwmbl.indexer.fsqueue import FSQueue, ZstdJsonSerializer
from mwmbl.indexer.paths import TINYSEARCH_DATA_DIR, DOMAINS_QUEUE_NAME, DOMAINS_TITLES_QUEUE_NAME
NUM_PROCESSES = 10
def get_... | null |
179,729 | from typing import Iterable
from urllib.parse import unquote
from mwmbl.tinysearchengine.indexer import TokenizedDocument
from mwmbl.tokenizer import tokenize, get_bigrams
DEFAULT_SCORE = 0
def tokenize_document(url, title_cleaned, extract, score):
title_tokens = tokenize(title_cleaned)
prepared_url = prepare_u... | null |
179,730 | from datetime import date, timedelta
from mwmbl.crawler.app import get_batches_for_date
from mwmbl.database import Database
from mwmbl.indexer.indexdb import BatchInfo, BatchStatus, IndexDatabase
DAYS = 20
def get_user_id_hash_from_url(url):
return url.split('/')[9]
def get_batches_for_date(date_str):
check_da... | null |
179,731 | import glob
import gzip
import json
from itertools import islice
from typing import Iterator
from mwmbl.indexer.fsqueue import FSQueue, GzipJsonBlobSerializer
from mwmbl.indexer.paths import CRAWL_GLOB, TINYSEARCH_DATA_DIR
def get_deduped_pages():
seen_urls = set()
for path in sorted(glob.glob(CRAWL_GLOB), reve... | null |
179,732 | from logging import getLogger
from random import Random
from urllib.parse import urlparse
from redis import Redis
from mwmbl.crawler.domains import DomainLinkDatabase, TOP_DOMAINS
from mwmbl.crawler.urls import FoundURL
from mwmbl.hn_top_domains_filtered import DOMAINS
from mwmbl.settings import CORE_DOMAINS
MAX_URLS_P... | null |
179,733 | from dataclasses import asdict
from datetime import datetime
from logging import getLogger
from typing import Optional
from urllib.parse import urlencode
import justext
import requests
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseBadReque... | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.