sample_id string | instruction string | completion string | category string | label int64 | metadata string |
|---|---|---|---|---|---|
ray-project/ray:release/train_tests/benchmark/image_classification/s3_url/imagenet.py:_list_s3_image_files_cached | # Context:
from functools import lru_cache
from typing import Callable, Dict, List, Optional, Tuple
def _get_class_labels(bucket: str, prefix: str) -> List[str]: ...
def _list_files_for_label(bucket: str, prefix: str, label: str) -> List[Tuple[str, str]]: ...
def list_s3_image_files(data_dir: str) -> List[Dict[str, str]]: ...
def get_process_batch_fn(random_transforms: bool, label_to_id_map: Optional[Dict[str, int]]) -> Callable[[Dict[str, np.ndarray]], Dict[str, np.ndarray]]: ...
def create_s3_url_dataset(data_dir: str, random_transforms: bool, limit_rows: Optional[int]) -> ray.data.Dataset: ...
# Task:
Write a Python function `_list_s3_image_files_cached` to cached implementation of S3 file listing using Ray tasks for parallelism.
Parameters: data_dir: str
Returns: Tuple[Tuple[str, str], ...] | def _list_s3_image_files_cached(data_dir: str) -> Tuple[Tuple[str, str], ...]:
"""Cached implementation of S3 file listing using Ray tasks for parallelism.
Returns a tuple of tuples for hashability (required by lru_cache).
"""
logger.info(f"Listing JPEG files from {data_dir}...")
# Parse S3 URL: s3://bucket/prefix
s3_path = data_dir
if s3_path.startswith("s3://"):
s3_path = s3_path[5:]
parts = s3_path.split("/", 1)
bucket = parts[0]
prefix = parts[1].rstrip("/") if len(parts) > 1 else ""
# Get all class labels
labels = _get_class_labels(bucket, prefix)
logger.info(
f"Found {len(labels)} class labels, launching Ray tasks for parallel listing..."
)
# Launch Ray tasks for each label
futures = [_list_files_for_label.remote(bucket, prefix, label) for label in labels]
# Wait for all tasks to complete and aggregate results
results = ray.get(futures)
# Flatten the list of lists
file_records = []
for records in results:
file_records.extend(records)
logger.info(f"Listed and cached {len(file_records)} JPEG files")
return tuple(file_records) | function_simple | 0 | {"cognitive_complexity": 3, "loc": 34, "code_loc": 18, "docstring_loc": 4, "function_name": "_list_s3_image_files_cached", "class_name": null, "qualname": "_list_s3_image_files_cached", "file_path": "release/train_tests/benchmark/image_classification/s3_url/imagenet.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:rllib/utils/metrics/stats/mean.py:MeanStats.push | # Context:
from typing import Any, Union
class MeanStats(SeriesStats):
stats_cls_identifier = "mean"
def _np_reduce_fn(self, values: np.ndarray) -> float: ...
def _torch_reduce_fn(self, values: 'torch.Tensor'): ...
def reduce(self, compile: bool) -> Union[Any, 'MeanStats']: ...
def __repr__(self) -> str: ...
# Task:
Write a Python method `push` for the class `MeanStats` to pushes a value into this Stats object.
Parameters: value: Any
Returns: None | def push(self, value: Any) -> None:
"""Pushes a value into this Stats object.
Args:
value: The value to be pushed. Can be of any type.
PyTorch GPU tensors are kept on GPU until reduce() or peek().
TensorFlow tensors are moved to CPU immediately.
"""
# Convert TensorFlow tensors to CPU immediately, keep PyTorch tensors as-is
if tf and tf.is_tensor(value):
value = value.numpy()
self.values.append(value) | function_simple | 0 | {"cognitive_complexity": 2, "loc": 13, "code_loc": 3, "docstring_loc": 7, "function_name": "push", "class_name": "MeanStats", "qualname": "MeanStats.push", "file_path": "rllib/utils/metrics/stats/mean.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
ocrmypdf/OCRmyPDF:src/ocrmypdf/font/font_provider.py:BuiltinFontProvider._load_fonts | # Context:
from ocrmypdf.font.font_manager import FontManager
class FontProvider(Protocol): ...
class ChainedFontProvider: ...
class BuiltinFontProvider:
FONT_FILES = {
def __init__(self, font_dir: Path | None = None):
"""Initialize builtin font provider.
Args:
font_dir: Directory containing font files. If None, uses
the default ocrmypdf/data directory.
"""
if font_dir is None:
font_dir = Path(__file__).parent.parent / "data"
self.font_dir = font_dir
self._fonts: dict[str, FontManager] = {}
self._load_fonts()
def get_font(self, font_name: str) -> FontManager | None: ...
def get_available_fonts(self) -> list[str]: ...
def get_fallback_font(self) -> FontManager: ...
# Task:
Write a Python method `_load_fonts` for the class `BuiltinFontProvider` to load available fonts, logging warnings for missing ones.
Returns: None | def _load_fonts(self) -> None:
"""Load available fonts, logging warnings for missing ones."""
for font_name, font_file in self.FONT_FILES.items():
font_path = self.font_dir / font_file
if not font_path.exists():
if font_name == 'Occulta':
raise FileNotFoundError(
f"Required fallback font not found: {font_path}"
)
log.warning(
"Font %s not found at %s - OCR output quality for some "
"scripts may be affected",
font_name,
font_path,
)
continue
try:
self._fonts[font_name] = FontManager(font_path)
except Exception as e:
if font_name == 'Occulta':
raise ValueError(
f"Failed to load required fallback font {font_file}: {e}"
) from e
log.warning(
"Failed to load font %s: %s - OCR output quality may be affected",
font_name,
e,
) | function_complex | 1 | {"cognitive_complexity": 10, "loc": 29, "code_loc": 26, "docstring_loc": 1, "function_name": "_load_fonts", "class_name": "BuiltinFontProvider", "qualname": "BuiltinFontProvider._load_fonts", "file_path": "src/ocrmypdf/font/font_provider.py", "repo_id": "ocrmypdf/OCRmyPDF", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:rllib/algorithms/tqc/tests/test_tqc.py:module_doc | Write a module-level docstring for the Python module `test_tqc` which contains class `SimpleEnv`, class `TestTQC`. | Tests for the TQC (Truncated Quantile Critics) algorithm. | documentation | 0 | {"doc_type": "module", "module_name": "test_tqc", "file_path": "rllib/algorithms/tqc/tests/test_tqc.py", "repo_id": "ray-project/ray", "char_length": 57} |
huggingface/transformers:src/transformers/models/gemma3n/modular_gemma3n.py:Gemma3nModel.forward | # Context:
import torch
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_compilable_check
class Gemma3nTextConfig(Gemma2Config, PreTrainedConfig): ...
class Gemma3nAudioConfig(PreTrainedConfig): ...
class Gemma3nVisionConfig(TimmWrapperConfig): ...
class Gemma3nConfig(PreTrainedConfig): ...
class Gemma3nAudioEncoderModelOutput(BaseModelOutputWithPooling): ...
class Gemma3nModelOutputWithPast(PaligemmaModelOutputWithPast): ...
class Gemma3nCausalLMOutputWithPast(PaliGemmaCausalLMOutputWithPast): ...
class Gemma3nRMSNorm(Gemma3RMSNorm): ...
class Gemma3nAudioRelativePositionEmbedding(nn.Module): ...
class Gemma3nAudioAttention(nn.Module): ...
class Gemma3nAudioCumulativeGroupNorm(nn.Module): ...
class Gemma3nAudioSSCPConvBlock(nn.Module): ...
class Gemma3nAudioSubSampleConvProjection(nn.Module): ...
class Gemma3nAudioConformerAttention(nn.Module): ...
class Gemma3nAudioConformerFeedForward(nn.Module): ...
class Gemma3nAudioConformerLightConv1d(nn.Module): ...
class Gemma3nAudioConformerBlock(nn.Module): ...
class Gemma3nTextScaledWordEmbedding(Gemma3TextScaledWordEmbedding): ...
class Gemma3nTextLaurelBlock(nn.Module): ...
class Gemma3nTextMLP(Gemma2MLP): ...
class Gemma3nTextAltUp(nn.Module): ...
def apply_rotary_pos_emb(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, unsqueeze_dim: int): ...
class Gemma3nTextAttention(Gemma3Attention): ...
class Gemma3nTextDecoderLayer(Gemma3DecoderLayer): ...
class Gemma3nPreTrainedModel(Gemma2PreTrainedModel): ...
class Gemma3nAudioEncoder(Gemma3nPreTrainedModel): ...
class Gemma3nRotaryEmbedding(Gemma3RotaryEmbedding): ...
class Gemma3nTextModel(Gemma3TextModel): ...
class Gemma3nForCausalLM(Gemma3ForCausalLM): ...
class Gemma3nMultimodalEmbedder(nn.Module): ...
class Gemma3nForConditionalGeneration(PaliGemmaForConditionalGeneration): ...
class Gemma3nModel(PaliGemmaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Gemma3nConfig):
super().__init__(config)
del self.multi_modal_projector # Replaced by Gemma3nVisionEmbedder
del self.text_config_dtype
self.vocab_size_per_layer_input = config.text_config.vocab_size_per_layer_input
self.audio_tower = AutoModel.from_config(config.audio_config)
self.embed_vision = Gemma3nMultimodalEmbedder(config.vision_config, config.text_config)
self.embed_audio = Gemma3nMultimodalEmbedder(config.audio_config, config.text_config)
def get_image_features(self, pixel_values: torch.FloatTensor, **kwargs) -> tuple | BaseModelOutputWithPooling: ...
def get_placeholder_mask(self, input_ids: torch.LongTensor | None, inputs_embeds: torch.FloatTensor | None, image_features: torch.FloatTensor | None, audio_features: torch.FloatTensor | None): ...
def get_audio_features(self, input_features: torch.Tensor, input_features_mask: torch.Tensor, **kwargs) -> tuple | Gemma3nAudioEncoderModelOutput: ...
# Task:
Write a Python method `forward` for the class `Gemma3nModel` to labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):.
Parameters: input_ids: torch.LongTensor | None, pixel_values: torch.FloatTensor | None, input_features: torch.FloatTensor | None, attention_mask: torch.Tensor | None, input_features_mask: torch.Tensor | None, position_ids: torch.LongTensor | None, past_key_values: Cache | None, token_type_ids: torch.LongTensor | None, cache_position: torch.LongTensor | None, inputs_embeds: torch.FloatTensor | None, labels: torch.LongTensor | None, use_cache: bool | None, output_attentions: bool | None, output_hidden_states: bool | None
Returns: Gemma3nModelOutputWithPast | def forward(
self,
input_ids: torch.LongTensor | None = None, # text inputs
pixel_values: torch.FloatTensor | None = None, # vision inputs
input_features: torch.FloatTensor | None = None, # audio inputs
attention_mask: torch.Tensor | None = None,
input_features_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
token_type_ids: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
**lm_kwargs: Unpack[TransformersKwargs],
) -> Gemma3nModelOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, Gemma3nForConditionalGeneration
>>> model = Gemma3nForConditionalGeneration.from_pretrained("google/gemma3n2-3b-mix-224")
>>> processor = AutoProcessor.from_pretrained("google/gemma3n2-3b-mix-224")
>>> prompt = "Where is the cat standing?"
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs,)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Where is the cat standing?\nsnow"
```
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None:
inputs_embeds = self.get_input_embeddings()(input_ids)
# Prepare per-layer inputs from inputs_ids
per_layer_inputs_mask = torch.logical_and(input_ids >= 0, input_ids < self.vocab_size_per_layer_input)
per_layer_inputs_tokens = torch.where(per_layer_inputs_mask, input_ids, torch.zeros_like(input_ids))
per_layer_inputs = self.language_model.get_per_layer_inputs(per_layer_inputs_tokens)
# Handle vision tokens (>= embed_vision.vocab_offset and < embed_audio.vocab_offset)
vision_mask = torch.logical_and(
input_ids >= self.embed_vision.vocab_offset, input_ids < self.embed_audio.vocab_offset
)
dummy_vision_token_id = self.embed_vision.vocab_offset + self.embed_vision.vocab_size - 1
vision_input_ids = torch.where(vision_mask, input_ids, dummy_vision_token_id).to(inputs_embeds.device)
vision_embeds = self.embed_vision(input_ids=vision_input_ids)
vision_embeds = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
expanded_vision_mask = vision_mask.unsqueeze(-1).expand_as(inputs_embeds)
inputs_embeds = torch.where(expanded_vision_mask, vision_embeds, inputs_embeds)
# Handle audio tokens (>= embed_audio.vocab_offset)
audio_mask = input_ids >= self.embed_audio.vocab_offset
dummy_audio_token_id = self.embed_audio.vocab_offset + self.embed_audio.vocab_size - 1
audio_input_ids = torch.where(audio_mask, input_ids, dummy_audio_token_id).to(inputs_embeds.device)
audio_embeds = self.embed_audio(input_ids=audio_input_ids)
audio_embeds = audio_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
expanded_audio_mask = audio_mask.unsqueeze(-1).expand_as(inputs_embeds)
inputs_embeds = torch.where(expanded_audio_mask, audio_embeds, inputs_embeds)
else:
per_layer_inputs = None
# Merge text and images
if pixel_values is not None:
image_features = self.get_image_features(pixel_values, return_dict=True).pooler_output
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
# Merge text and audio
if input_features is not None and input_features_mask is not None:
audio_outputs = self.get_audio_features(input_features, ~input_features_mask, return_dict=True)
audio_features = audio_outputs.pooler_output
audio_mask = audio_outputs.audio_mel_mask
# The Gemma3nProcessor expects all audio will be 30s in length and inserts 188 audio soft tokens into the
# text to account for this. However, the audio preprocessing and encoder do not gurarantee they will
# produce 188 soft tokens; they will produce at most that many tokens, but they may produce fewer tokens
# depending on the length of the longest audio input in the batch. When we encounter this situation, we pad
# the audio feature out to 188 soft tokens with the emebedding of the last token in the embed_audio vocab.
audio_padding_toks = torch.tensor([[self.vocab_size - 1]], dtype=torch.long, device=audio_features.device)
audio_padding_embs = self.embed_audio(input_ids=audio_padding_toks)
audio_features = torch.where(audio_mask.unsqueeze(-1), audio_padding_embs, audio_features)
audio_batch_size, audio_seq_len, audio_embed_dim = audio_features.shape
extra_padding_tokens = self.config.audio_soft_tokens_per_image - audio_seq_len
extra_padding_features = audio_padding_embs.expand(audio_batch_size, extra_padding_tokens, audio_embed_dim)
audio_features = torch.cat((audio_features, extra_padding_features), dim=1)
audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)
_, special_audio_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, audio_features=audio_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_audio_mask, audio_features)
outputs = self.language_model(
input_ids=None,
per_layer_inputs=per_layer_inputs,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**lm_kwargs,
)
return Gemma3nModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values if use_cache else None,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
audio_hidden_states=audio_features if input_features is not None else None,
) | function_complex | 0 | {"cognitive_complexity": 8, "loc": 144, "code_loc": 74, "docstring_loc": 30, "function_name": "forward", "class_name": "Gemma3nModel", "qualname": "Gemma3nModel.forward", "file_path": "src/transformers/models/gemma3n/modular_gemma3n.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
docling-project/docling:docs/examples/post_process_ocr_with_vlm.py:no_long_repeats | # Context:
import re
def is_empty_fast_with_lines_pil(pil_img: Image.Image, downscale_max_side: int, grad_threshold: float, min_line_coverage: float, max_allowed_lines: int, edge_fraction_threshold: float): ...
def remove_break_lines(text: str) -> str: ...
def safe_crop(img: Image.Image, bbox): ...
class PostOcrEnrichmentElement(BaseModel): ...
class PostOcrEnrichmentPipelineOptions(ConvertPipelineOptions): ...
class PostOcrEnrichmentPipeline(SimplePipeline): ...
class PostOcrApiEnrichmentModel(GenericEnrichmentModel[PostOcrEnrichmentElement], BaseModelWithOptions): ...
def convert_pdf(pdf_path: Path, out_intermediate_json: Path): ...
def post_process_json(in_json: Path, out_final_json: Path): ...
def process_pdf(pdf_path: Path, scratch_dir: Path, out_dir: Path): ...
def process_json(json_path: Path, out_dir: Path): ...
def filter_jsons_by_ocr_list(jsons, folder): ...
def run_jsons(in_path: Path, out_dir: Path): ...
def main(): ...
# Task:
Write a Python function `no_long_repeats` to returns False if the string `s` contains more than `threshold`.
Parameters: s: str, threshold: int
Returns: bool | def no_long_repeats(s: str, threshold: int) -> bool:
"""
Returns False if the string `s` contains more than `threshold`
identical characters in a row, otherwise True.
"""
pattern = r"(.)\1{" + str(threshold) + ",}"
return re.search(pattern, s) is None | function_simple | 1 | {"cognitive_complexity": 0, "loc": 7, "code_loc": 2, "docstring_loc": 4, "function_name": "no_long_repeats", "class_name": null, "qualname": "no_long_repeats", "file_path": "docs/examples/post_process_ocr_with_vlm.py", "repo_id": "docling-project/docling", "has_docstring": true, "runnable_level": "slib_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/utils/agent_card.py:inject_a2a_server_methods | # Context:
from types import MethodType
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
from crewai.agent import Agent
def _get_tls_verify(auth: ClientAuthScheme | None) -> ssl.SSLContext | bool | str: ...
async def _prepare_auth_headers(auth: ClientAuthScheme | None, timeout: int) -> tuple[MutableMapping[str, str], ssl.SSLContext | bool | str]: ...
def _get_server_config(agent: Agent) -> A2AServerConfig | None: ...
def fetch_agent_card(endpoint: str, auth: ClientAuthScheme | None, timeout: int, use_cache: bool, cache_ttl: int) -> AgentCard: ...
async def afetch_agent_card(endpoint: str, auth: ClientAuthScheme | None, timeout: int, use_cache: bool) -> AgentCard: ...
def _fetch_agent_card_cached(endpoint: str, auth_hash: str, timeout: int, _ttl_hash: int) -> AgentCard: ...
async def _afetch_agent_card_cached(endpoint: str, auth_hash: str, timeout: int) -> AgentCard: ...
async def _afetch_agent_card_impl(endpoint: str, auth: ClientAuthScheme | None, timeout: int) -> AgentCard: ...
def _task_to_skill(task: Task) -> AgentSkill: ...
def _tool_to_skill(tool_name: str, tool_description: str) -> AgentSkill: ...
def _crew_to_agent_card(crew: Crew, url: str) -> AgentCard: ...
def _agent_to_agent_card(agent: Agent, url: str) -> AgentCard: ...
# Task:
Write a Python function `inject_a2a_server_methods` to inject A2A server methods onto an Agent instance.
Parameters: agent: Agent
Returns: None | def inject_a2a_server_methods(agent: Agent) -> None:
"""Inject A2A server methods onto an Agent instance.
Adds a `to_agent_card(url: str) -> AgentCard` method to the agent
that generates an A2A-compliant AgentCard.
Only injects if the agent has an A2AServerConfig.
Args:
agent: The Agent instance to inject methods onto.
"""
if _get_server_config(agent) is None:
return
def _to_agent_card(self: Agent, url: str) -> AgentCard:
return _agent_to_agent_card(self, url)
object.__setattr__(agent, "to_agent_card", MethodType(_to_agent_card, agent)) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 18, "code_loc": 5, "docstring_loc": 10, "function_name": "inject_a2a_server_methods", "class_name": null, "qualname": "inject_a2a_server_methods", "file_path": "lib/crewai/src/crewai/a2a/utils/agent_card.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_delete_chunks.py:TestChunksDeletion.test_delete_1k | # Context:
import pytest
from common import batch_add_chunks
from time import sleep
class TestChunksDeletion:
def test_delete_partial_invalid_id(self, add_chunks_func, payload): ...
def test_repeated_deletion(self, add_chunks_func): ...
def test_duplicate_deletion(self, add_chunks_func): ...
def test_concurrent_deletion(self, add_document): ...
def test_basic_scenarios(self, add_chunks_func, payload, expected_message, remaining): ...
# Task:
Write a Python test method `test_delete_1k` in test class `TestChunksDeletion` to verify the behavior of `delete_1k`.
Module under test: concurrent.futures, common, time | def test_delete_1k(self, add_document):
count = 1_000
_, document = add_document
chunks = batch_add_chunks(document, count)
chunk_ids = [chunk.id for chunk in chunks]
from time import sleep
sleep(1)
document.delete_chunks(ids=chunk_ids)
remaining_chunks = document.list_chunks()
assert len(remaining_chunks) == 0, str(remaining_chunks) | test | 1 | {"function_name": "test_delete_1k", "class_name": "TestChunksDeletion", "qualname": "TestChunksDeletion.test_delete_1k", "file_path": "test/testcases/test_sdk_api/test_chunk_management_within_dataset/test_delete_chunks.py", "repo_id": "infiniflow/ragflow", "loc": 13, "tested_modules": ["concurrent.futures", "common", "time"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/test_multimodal_integration.py:TestLiteLLMMultimodalIntegration.test_describe_image_claude | # Context:
import pytest
from crewai.llm import LLM
from crewai_files import (
AudioFile,
File,
ImageFile,
PDFFile,
TextFile,
VideoFile,
format_multimodal_content,
)
def test_image_bytes() -> bytes: ...
def test_text_bytes() -> bytes: ...
def test_video_bytes() -> bytes: ...
def test_audio_bytes() -> bytes: ...
def _build_multimodal_message(llm: LLM, prompt: str, files: dict) -> list[dict]: ...
class TestOpenAIMultimodalIntegration: ...
class TestOpenAIO4MiniMultimodalIntegration: ...
class TestOpenAIGPT41MiniMultimodalIntegration: ...
class TestOpenAIGPT5MultimodalIntegration: ...
class TestOpenAIGPT5MiniMultimodalIntegration: ...
class TestOpenAIGPT5NanoMultimodalIntegration: ...
class TestAnthropicMultimodalIntegration: ...
class TestAzureMultimodalIntegration: ...
class TestBedrockMultimodalIntegration: ...
class TestGeminiMultimodalIntegration: ...
class TestMultipleFilesIntegration: ...
class TestGenericFileIntegration: ...
def _build_multimodal_message_with_upload(llm: LLM, prompt: str, files: dict) -> tuple[list[dict], list[dict]]: ...
def _build_responses_message_with_upload(llm: LLM, prompt: str, files: dict) -> tuple[list[dict], list[dict]]: ...
class TestAnthropicFileUploadIntegration: ...
class TestOpenAIResponsesFileUploadIntegration: ...
class TestLiteLLMMultimodalIntegration:
def test_describe_image_gpt4o(self, test_image_bytes: bytes) -> None: ...
# Task:
Write a Python test method `test_describe_image_claude` in test class `TestLiteLLMMultimodalIntegration` to test LiteLLM with Claude can describe an image.
Module under test: pathlib, crewai.llm, crewai_files | def test_describe_image_claude(self, test_image_bytes: bytes) -> None:
"""Test LiteLLM with Claude can describe an image."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", is_litellm=True)
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0 | test | 0 | {"function_name": "test_describe_image_claude", "class_name": "TestLiteLLMMultimodalIntegration", "qualname": "TestLiteLLMMultimodalIntegration.test_describe_image_claude", "file_path": "lib/crewai/tests/llms/test_multimodal_integration.py", "repo_id": "crewAIInc/crewAI", "loc": 16, "tested_modules": ["pathlib", "crewai.llm", "crewai_files", "crewai_files.resolution.resolver", "crewai_files.formatting.anthropic"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/serve/tests/test_direct_ingress.py:TestDirectIngressBackpressure.test_requests_are_not_running_serially | # Context:
import asyncio
from concurrent.futures import ThreadPoolExecutor
import httpx
from ray import serve
from ray._common.test_utils import Semaphore, SignalActor, wait_for_condition
from ray.serve._private.test_utils import (
check_deployment_status,
check_num_replicas_gte,
check_num_replicas_lte,
get_application_url,
get_application_urls,
ping_grpc_list_applications,
send_signal_on_cancellation,
)
class Collector: ...
def _skip_if_ff_not_enabled(): ...
def _skip_if_haproxy_enabled(): ...
def _shared_serve_instance(): ...
def serve_instance(_shared_serve_instance): ...
class Hybrid: ...
def get_target_groups(app_name: str, from_proxy_manager: bool): ...
def test_proxy_is_started_on_head_only_mode(_skip_if_ff_not_enabled, serve_instance): ...
def get_http_ports(route_prefix, first_only): ...
def get_grpc_ports(route_prefix, first_only): ...
def test_basic(_skip_if_ff_not_enabled, serve_instance): ...
def test_internal_server_error(_skip_if_ff_not_enabled, serve_instance): ...
def test_fastapi_app(_skip_if_ff_not_enabled, serve_instance): ...
def test_http_request_id(_skip_if_ff_not_enabled, serve_instance, use_fastapi: bool): ...
def test_grpc_request_id(_skip_if_ff_not_enabled, serve_instance): ...
def test_multiplexed_model_id(_skip_if_ff_not_enabled, serve_instance): ...
def test_health_check(_skip_if_ff_not_enabled, serve_instance): ...
def test_port_retry_logic(_skip_if_ff_not_enabled, serve_instance): ...
def test_replica_gives_up_after_max_port_retries_for_http(_skip_if_ff_not_enabled, serve_instance): ...
def test_replica_gives_up_after_max_port_retries_for_grpc(_skip_if_ff_not_enabled, serve_instance): ...
def test_no_port_available(_skip_if_ff_not_enabled, serve_instance): ...
def test_replica_releases_ports_on_shutdown(_skip_if_ff_not_enabled, serve_instance): ...
def test_get_serve_instance_details(_skip_if_ff_not_enabled, serve_instance): ...
def test_only_ingress_deployment_replicas_are_used_for_target_groups(_skip_if_ff_not_enabled, serve_instance): ...
def test_crashed_replica_port_is_released_and_reused(_skip_if_ff_not_enabled, serve_instance): ...
def test_multiple_applications_on_same_node(_skip_if_ff_not_enabled, serve_instance): ...
def test_app_with_composite_deployments(_skip_if_ff_not_enabled, serve_instance): ...
def test_only_running_apps_are_used_for_target_groups(_skip_if_ff_not_enabled, serve_instance): ...
def test_some_replicas_not_running(_skip_if_ff_not_enabled, serve_instance): ...
def test_port_recovery_on_controller_restart(_skip_if_ff_not_enabled, serve_instance): ...
class TestDirectIngressAutoscaling: ...
def test_disconnect(_skip_if_ff_not_enabled, serve_instance): ...
def test_context_propagation(_skip_if_ff_not_enabled, serve_instance): ...
def test_context_propagation_with_child(_skip_if_ff_not_enabled, serve_instance): ...
def test_shutdown_replica_only_after_draining_requests(_skip_if_ff_not_enabled, serve_instance): ...
def test_http_routes_endpoint(_skip_if_ff_not_enabled, serve_instance): ...
def test_grpc_list_applications_endpoint(_skip_if_ff_not_enabled, _skip_if_haproxy_enabled, serve_instance): ...
def test_redeploy_start_time(_skip_if_ff_not_enabled, serve_instance): ...
def test_deploy_app_custom_exception(_skip_if_ff_not_enabled, serve_instance): ...
def test_get_serve_instance_details_json_serializable(_skip_if_ff_not_enabled, serve_instance, policy_name): ...
def test_get_deployment_config(_skip_if_ff_not_enabled, serve_instance): ...
def test_stuck_requests_are_force_killed(_skip_if_ff_not_enabled, serve_instance): ...
class TestDirectIngressBackpressure:
def _do_http_request(self, url: str) -> bool: ...
def _do_grpc_request(self, url: str) -> bool: ...
def test_max_ongoing_requests(self, _skip_if_ff_not_enabled, serve_instance): ...
def test_backpressure_queued_requests(self, _skip_if_ff_not_enabled, serve_instance): ...
def test_drop_after_max_queued_requests(self, _skip_if_ff_not_enabled, serve_instance): ...
def test_mixed_http_grpc_backpressure(self, _skip_if_ff_not_enabled, serve_instance): ...
def test_health_check_during_backpressure(self, _skip_if_ff_not_enabled, serve_instance): ...
def test_multiple_deployment_backpressure_isolation(self, _skip_if_ff_not_enabled, serve_instance): ...
def test_backpressure_with_composite_deployments(self, _skip_if_ff_not_enabled, serve_instance): ...
def test_client_disconnect_during_request(self, _skip_if_ff_not_enabled, serve_instance): ...
def test_graceful_shutdown_wait_loop(self, _skip_if_ff_not_enabled, serve_instance): ...
# Task:
Write a Python test method `test_requests_are_not_running_serially` in test class `TestDirectIngressBackpressure` to test that requests are processed concurrently, not serially.
Module under test: concurrent.futures, typing, uuid | def test_requests_are_not_running_serially(
self, _skip_if_ff_not_enabled, serve_instance
):
"""Test that requests are processed concurrently, not serially"""
@serve.deployment(
max_ongoing_requests=20,
)
class A:
async def __call__(self):
await asyncio.sleep(1)
return "ok"
serve.run(A.options(name="A").bind(), name="app-1", route_prefix="/app-1")
http_url = get_application_url("HTTP", app_name="app-1")
num_requests = 20
with ThreadPoolExecutor(num_requests) as tpe:
futures = [
tpe.submit(httpx.get, http_url, timeout=None)
for _ in range(num_requests)
]
def _func():
for future in futures:
assert future.result().status_code == 200
return True
wait_for_condition(_func, timeout=5) | test | 0 | {"function_name": "test_requests_are_not_running_serially", "class_name": "TestDirectIngressBackpressure", "qualname": "TestDirectIngressBackpressure.test_requests_are_not_running_serially", "file_path": "python/ray/serve/tests/test_direct_ingress.py", "repo_id": "ray-project/ray", "loc": 30, "tested_modules": ["concurrent.futures", "typing", "uuid", "fastapi", "starlette.requests"], "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/llm/_internal/common/utils/cloud_filesystem/pyarrow_filesystem.py:PyArrowFileSystem._create_azure_filesystem | # Context:
from typing import List, Optional, Tuple, Union
from urllib.parse import urlparse
import pyarrow.fs as pa_fs
import adlfs
from azure.identity import DefaultAzureCredential
class PyArrowFileSystem(BaseCloudFileSystem):
def get_fs_and_path(object_uri: str) -> Tuple[pa_fs.FileSystem, str]: ...
def _create_abfss_filesystem(object_uri: str) -> Tuple[pa_fs.FileSystem, str]: ...
def _filter_files(fs: pa_fs.FileSystem, source_path: str, destination_path: str, substrings_to_include: Optional[List[str]], suffixes_to_exclude: Optional[List[str]]) -> List[Tuple[str, str]]: ...
def get_file(object_uri: str, decode_as_utf_8: bool) -> Optional[Union[str, bytes]]: ...
def list_subfolders(folder_uri: str) -> List[str]: ...
def download_files(path: str, bucket_uri: str, substrings_to_include: Optional[List[str]], suffixes_to_exclude: Optional[List[str]], max_concurrency: int, chunk_size: int) -> None: ...
def upload_files(local_path: str, bucket_uri: str) -> None: ...
# Task:
Write a Python method `_create_azure_filesystem` for the class `PyArrowFileSystem` to create an Azure filesystem for Azure Blob Storage or ABFSS.
Parameters: object_uri: str
Returns: Tuple[pa_fs.FileSystem, str] | def _create_azure_filesystem(object_uri: str) -> Tuple[pa_fs.FileSystem, str]:
"""Create an Azure filesystem for Azure Blob Storage or ABFSS.
Args:
object_uri: Azure URI (azure://container@account.blob.core.windows.net/path or
abfss://container@account.dfs.core.windows.net/path)
Returns:
Tuple of (PyArrow FileSystem, path without scheme prefix)
Raises:
ImportError: If required dependencies are not installed.
ValueError: If the Azure URI format is invalid.
"""
try:
import adlfs
from azure.identity import DefaultAzureCredential
except ImportError:
raise ImportError(
"You must `pip install adlfs azure-identity` "
"to use Azure/ABFSS URIs. "
"Note that these must be preinstalled on all nodes in the Ray cluster."
)
# Parse and validate the Azure URI
parsed = urlparse(object_uri)
scheme = parsed.scheme.lower()
# Validate URI format: scheme://container@account.domain/path
if not parsed.netloc or "@" not in parsed.netloc:
raise ValueError(
f"Invalid {scheme.upper()} URI format - missing container@account: {object_uri}"
)
container_part, hostname_part = parsed.netloc.split("@", 1)
# Validate container name (must be non-empty)
if not container_part:
raise ValueError(
f"Invalid {scheme.upper()} URI format - empty container name: {object_uri}"
)
# Validate hostname format based on scheme
valid_hostname = False
if scheme == "abfss":
valid_hostname = hostname_part.endswith(".dfs.core.windows.net")
expected_domains = ".dfs.core.windows.net"
elif scheme == "azure":
valid_hostname = hostname_part.endswith(
".blob.core.windows.net"
) or hostname_part.endswith(".dfs.core.windows.net")
expected_domains = ".blob.core.windows.net or .dfs.core.windows.net"
if not hostname_part or not valid_hostname:
raise ValueError(
f"Invalid {scheme.upper()} URI format - invalid hostname (must end with {expected_domains}): {object_uri}"
)
# Extract and validate account name
azure_storage_account_name = hostname_part.split(".")[0]
if not azure_storage_account_name:
raise ValueError(
f"Invalid {scheme.upper()} URI format - empty account name: {object_uri}"
)
# Create the adlfs filesystem
adlfs_fs = adlfs.AzureBlobFileSystem(
account_name=azure_storage_account_name,
credential=DefaultAzureCredential(),
)
# Wrap with PyArrow's PyFileSystem for compatibility
fs = pa_fs.PyFileSystem(pa_fs.FSSpecHandler(adlfs_fs))
# Return the path without the scheme prefix
path = f"{container_part}{parsed.path}"
return fs, path | function_complex | 0 | {"cognitive_complexity": 10, "loc": 78, "code_loc": 45, "docstring_loc": 13, "function_name": "_create_azure_filesystem", "class_name": "PyArrowFileSystem", "qualname": "PyArrowFileSystem._create_azure_filesystem", "file_path": "python/ray/llm/_internal/common/utils/cloud_filesystem/pyarrow_filesystem.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/test_limit_operator.py:test_per_block_limit_fn | # Context:
import pytest
from ray.data._internal.execution.interfaces.task_context import TaskContext
from ray.data._internal.execution.operators.map_operator import _per_block_limit_fn
import pandas as pd
def test_limit_operator(ray_start_regular_shared): ...
def test_limit_operator_memory_leak_fix(ray_start_regular_shared, tmp_path): ...
def test_limit_estimated_num_output_bundles(): ...
# Task:
Write a Python test function `test_per_block_limit_fn` to test the _per_block_limit_fn function with various inputs.
Module under test: ray.data._internal.execution.interfaces.task_context, ray.data._internal.execution.operators.input_data_buffer, ray.data._internal.execution.operators.limit_operator | def test_per_block_limit_fn(blocks_data, per_block_limit, expected_output):
"""Test the _per_block_limit_fn function with various inputs."""
import pandas as pd
# Convert test data to pandas blocks
blocks = [pd.DataFrame({"value": data}) for data in blocks_data]
# Create a mock TaskContext
ctx = TaskContext(op_name="test", task_idx=0, target_max_block_size_override=None)
# Call the function
result_blocks = list(_per_block_limit_fn(blocks, ctx, per_block_limit))
# Convert result back to lists for comparison
result_data = []
for block in result_blocks:
block_data = block["value"].tolist()
result_data.append(block_data)
assert result_data == expected_output | test | 0 | {"function_name": "test_per_block_limit_fn", "class_name": null, "qualname": "test_per_block_limit_fn", "file_path": "python/ray/data/tests/test_limit_operator.py", "repo_id": "ray-project/ray", "loc": 20, "tested_modules": ["ray.data._internal.execution.interfaces.task_context", "ray.data._internal.execution.operators.input_data_buffer", "ray.data._internal.execution.operators.limit_operator", "ray.data._internal.execution.operators.map_operator", "ray.data._internal.execution.streaming_executor"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/components/bundles/agentics/test_semantic_map.py:TestSemanticMapComponent.test_should_have_schema_with_table_schema | # Context:
from lfx.components.agentics.semantic_map import SemanticMap
class TestSemanticMapComponent:
def test_should_have_correct_display_name(self): ...
def test_should_have_correct_icon(self): ...
def test_should_have_required_inputs(self): ...
def test_should_have_dataframe_output(self): ...
def test_should_have_provider_specific_inputs(self): ...
def test_should_have_model_input_with_real_time_refresh(self): ...
def test_should_have_append_to_input_columns_as_boolean(self): ...
# Task:
Write a Python test method `test_should_have_schema_with_table_schema` in test class `TestSemanticMapComponent` to test that schema input has table_schema defined.
Module under test: __future__, lfx.components.agentics.semantic_map | def test_should_have_schema_with_table_schema(self):
"""Test that schema input has table_schema defined."""
schema_input = next((i for i in SemanticMap.inputs if i.name == "schema"), None)
assert schema_input is not None
assert schema_input.table_schema is not None
assert len(schema_input.table_schema) > 0
field_names = {field["name"] for field in schema_input.table_schema}
assert "name" in field_names
assert "description" in field_names
assert "type" in field_names
assert "multiple" in field_names | test | 1 | {"function_name": "test_should_have_schema_with_table_schema", "class_name": "TestSemanticMapComponent", "qualname": "TestSemanticMapComponent.test_should_have_schema_with_table_schema", "file_path": "src/backend/tests/unit/components/bundles/agentics/test_semantic_map.py", "repo_id": "langflow-ai/langflow", "loc": 12, "tested_modules": ["__future__", "lfx.components.agentics.semantic_map"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/dinov3_convnext/test_modeling_dinov3_convnext.py:DINOv3ConvNextModelIntegrationTest.test_inference_no_head | # Context:
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
import torch
from transformers import DINOv3ConvNextBackbone, DINOv3ConvNextModel
class DINOv3ConvNextModelTester: ...
class DINOv3ConvNextModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...
def prepare_img(): ...
class DINOv3ConvNextBackboneTest(unittest.TestCase, BackboneTesterMixin): ...
class DINOv3ConvNextModelIntegrationTest(unittest.TestCase):
def default_image_processor(self): ...
# Task:
Write a Python test method `test_inference_no_head` in test class `DINOv3ConvNextModelIntegrationTest` to verify the behavior of `inference_no_head`.
Module under test: functools, transformers, transformers.testing_utils | def test_inference_no_head(self):
model = DINOv3ConvNextModel.from_pretrained("facebook/dinov3-convnext-tiny-pretrain-lvd1689m").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
_, _, height, width = inputs["pixel_values"].shape
expected_seq_length = (height * width) // 4 ** (model.config.num_stages + 1) + 1 # +1 for the "CLS" token
expected_shape = torch.Size((1, expected_seq_length, model.config.hidden_sizes[-1]))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
last_layer_cls_token = outputs.pooler_output
expected_slice = torch.tensor([-6.3721, 1.3008, 2.0743, -0.0800, 0.6072], device=torch_device)
torch.testing.assert_close(last_layer_cls_token[0, :5], expected_slice, rtol=1e-4, atol=1e-4)
last_layer_patch_tokens = outputs.last_hidden_state[:, 1:]
expected_slice = torch.tensor([0.4905, -3.7135, 1.8485, -1.0403, -1.0908], device=torch_device)
torch.testing.assert_close(last_layer_patch_tokens[0, 0, :5], expected_slice, rtol=1e-4, atol=1e-4) | test | 0 | {"function_name": "test_inference_no_head", "class_name": "DINOv3ConvNextModelIntegrationTest", "qualname": "DINOv3ConvNextModelIntegrationTest.test_inference_no_head", "file_path": "tests/models/dinov3_convnext/test_modeling_dinov3_convnext.py", "repo_id": "huggingface/transformers", "loc": 24, "tested_modules": ["functools", "transformers", "transformers.testing_utils", "transformers.utils", "test_backbone_common"], "has_docstring": false, "runnable_level": "file_runnable"} |
apache/airflow:airflow-core/src/airflow/models/callback.py:ExecutorCallback.__init__ | # Context:
from airflow.executors.workloads.callback import CallbackFetchMethod
class CallbackType(str, Enum): ...
class CallbackDefinitionProtocol(Protocol): ...
class ImportPathCallbackDefProtocol(CallbackDefinitionProtocol, Protocol): ...
class ImportPathExecutorCallbackDefProtocol(ImportPathCallbackDefProtocol, Protocol): ...
class Callback(Base, BaseWorkload): ...
class TriggererCallback(Callback): ...
class DagProcessorCallback(Callback): ...
class ExecutorCallback(Callback):
__mapper_args__ = {"polymorphic_identity": CallbackType.EXECUTOR}
def __repr__(self): ...
# Task:
Write a Python method `__init__` for the class `ExecutorCallback` to initialize an ExecutorCallback from a callback definition and fetch method.
Parameters: callback_def: ImportPathExecutorCallbackDefProtocol, fetch_method: CallbackFetchMethod | def __init__(
self, callback_def: ImportPathExecutorCallbackDefProtocol, fetch_method: CallbackFetchMethod, **kwargs
):
"""
Initialize an ExecutorCallback from a callback definition and fetch method.
:param callback_def: Callback definition with path, kwargs, and executor
:param fetch_method: Method to fetch the callback at runtime
:param kwargs: Passed to parent Callback.__init__ (see base class for details)
"""
super().__init__(**kwargs)
self.fetch_method = fetch_method
self.data |= callback_def.serialize() | function_simple | 1 | {"cognitive_complexity": 0, "loc": 13, "code_loc": 3, "docstring_loc": 7, "function_name": "__init__", "class_name": "ExecutorCallback", "qualname": "ExecutorCallback.__init__", "file_path": "airflow-core/src/airflow/models/callback.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/glm_moe_dsa/modular_glm_moe_dsa.py:apply_rotary_pos_emb | # Context:
import torch
from ...models.llama.modeling_llama import rotate_half
class GlmMoeDsaConfig(PreTrainedConfig): ...
class GlmMoeDsaRMSNorm(Glm4MoeRMSNorm): ...
class GlmMoeDsaIndexer(nn.Module): ...
class GlmMoeDsaAttention(nn.Module): ...
class GlmMoeDsaDecoderLayer(Glm4MoeLiteDecoderLayer): ...
class GlmMoeDsaPreTrainedModel(Glm4MoePreTrainedModel): ...
class GlmMoeDsaModel(Glm4MoeModel): ...
class GlmMoeDsaForCausalLM(Glm4MoeForCausalLM): ...
# Task:
Write a Python function `apply_rotary_pos_emb` to applies Rotary Position Embedding to a single tensor.
Parameters: x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, unsqueeze_dim: int
Returns: torch.Tensor | def apply_rotary_pos_emb(
x: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
unsqueeze_dim: int = 1,
) -> torch.Tensor:
"""
Applies Rotary Position Embedding to a single tensor.
This is the transformers equivalent of DeepSeek V3.2's `apply_rotary_emb(x, freqs_cis, interleaved)`.
Instead of using complex-number `freqs_cis`, we use pre-split `(cos, sin)` tensors from RotaryEmbedding.
Args:
x (`torch.Tensor`): Input tensor of shape `[..., head_dim]`.
cos (`torch.Tensor`): Cosine part from RotaryEmbedding, shape `[batch, seq_len, head_dim]`.
sin (`torch.Tensor`): Sine part from RotaryEmbedding, shape `[batch, seq_len, head_dim]`.
unsqueeze_dim (`int`): Dimension along which to unsqueeze cos/sin for broadcasting.
Use `1` when x is `[B, H, S, D]` (BHSD) and `2` when x is `[B, S, H, D]` (BSHD).
Returns:
`torch.Tensor`: Tensor with rotary embeddings applied, same shape as input.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Split-half (NeoX/Llama style): (x[:d/2], x[d/2:])
# This matches llama's apply_rotary_pos_emb logic.
x_rotated = (x * cos) + (rotate_half(x) * sin)
return x_rotated | function_simple | 0 | {"cognitive_complexity": 0, "loc": 29, "code_loc": 4, "docstring_loc": 16, "function_name": "apply_rotary_pos_emb", "class_name": null, "qualname": "apply_rotary_pos_emb", "file_path": "src/transformers/models/glm_moe_dsa/modular_glm_moe_dsa.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_upload_documents.py:TestDocumentsUpload.test_invalid_dataset_id | # Context:
import pytest
from common import FILE_API_URL, list_datasets, upload_documents
from utils.file_utils import create_txt_file
class TestAuthorization: ...
class TestDocumentsUpload:
def test_valid_single_upload(self, HttpApiAuth, add_dataset_func, tmp_path): ...
def test_file_type_validation(self, HttpApiAuth, add_dataset_func, generate_test_files, request): ...
def test_unsupported_file_type(self, HttpApiAuth, add_dataset_func, tmp_path, file_type): ...
def test_missing_file(self, HttpApiAuth, add_dataset_func): ...
def test_empty_file(self, HttpApiAuth, add_dataset_func, tmp_path): ...
def test_filename_empty(self, HttpApiAuth, add_dataset_func, tmp_path): ...
def test_filename_max_length(self, HttpApiAuth, add_dataset_func, tmp_path): ...
def test_duplicate_files(self, HttpApiAuth, add_dataset_func, tmp_path): ...
def test_same_file_repeat(self, HttpApiAuth, add_dataset_func, tmp_path): ...
def test_filename_special_characters(self, HttpApiAuth, add_dataset_func, tmp_path): ...
def test_multiple_files(self, HttpApiAuth, add_dataset_func, tmp_path): ...
def test_concurrent_upload(self, HttpApiAuth, add_dataset_func, tmp_path): ...
# Task:
Write a Python test method `test_invalid_dataset_id` in test class `TestDocumentsUpload` to verify the behavior of `invalid_dataset_id`.
Module under test: concurrent.futures, common, configs | def test_invalid_dataset_id(self, HttpApiAuth, tmp_path):
fp = create_txt_file(tmp_path / "ragflow_test.txt")
res = upload_documents(HttpApiAuth, "invalid_dataset_id", [fp])
assert res["code"] == 100
assert res["message"] == """LookupError("Can\'t find the dataset with ID invalid_dataset_id!")""" | test | 1 | {"function_name": "test_invalid_dataset_id", "class_name": "TestDocumentsUpload", "qualname": "TestDocumentsUpload.test_invalid_dataset_id", "file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_upload_documents.py", "repo_id": "infiniflow/ragflow", "loc": 5, "tested_modules": ["concurrent.futures", "common", "configs", "libs.auth", "requests_toolbelt"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/transformers:utils/modular_model_detector.py:license_header | Add a Apache-2.0 license header comment for the project 'transformers', authored by The HuggingFace Team, year 2025. | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 🔴🔴🔴 THIS IS AN INTERNAL TOOL. It WILL interact with the hub and use significant local compute resources. Use at your own risk. | license | 0 | {"license_type": "Apache-2.0", "author": "The HuggingFace Team", "year": "2025", "source": "header", "repo_id": "huggingface/transformers"} |
crewAIInc/crewAI:lib/crewai/tests/mcp/test_amp_mcp.py:TestBuildMCPConfigFromDict.test_defaults_to_http | # Context:
from crewai.mcp.config import MCPServerHTTP, MCPServerSSE
from crewai.mcp.tool_resolver import MCPToolResolver
def agent(): ...
def resolver(agent): ...
def mock_tool_definitions(): ...
class TestFetchAmpMCPConfigs: ...
class TestParseAmpRef: ...
class TestGetMCPToolsAmpIntegration: ...
class TestBuildMCPConfigFromDict:
def test_builds_http_config(self): ...
def test_builds_sse_config(self): ...
def test_http_defaults(self): ...
# Task:
Write a Python test method `test_defaults_to_http` in test class `TestBuildMCPConfigFromDict` to verify the behavior of `defaults_to_http`.
Module under test: crewai.agent.core, crewai.mcp.config, crewai.mcp.tool_resolver | def test_defaults_to_http(self):
config_dict = {
"url": "https://mcp.example.com/api",
}
result = MCPToolResolver._build_mcp_config_from_dict(config_dict)
assert isinstance(result, MCPServerHTTP)
assert result.streamable is True | test | 0 | {"function_name": "test_defaults_to_http", "class_name": "TestBuildMCPConfigFromDict", "qualname": "TestBuildMCPConfigFromDict.test_defaults_to_http", "file_path": "lib/crewai/tests/mcp/test_amp_mcp.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["crewai.agent.core", "crewai.mcp.config", "crewai.mcp.tool_resolver", "crewai.tools.base_tool"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/test_flow_ask.py:TestAskMetadata.test_ask_provider_returns_string_with_metadata_sent | # Context:
from crewai.flow import Flow, flow_config, listen, start
class MockInputProvider: ...
class SlowMockProvider: ...
class TestAskBasic: ...
class TestAskTimeout: ...
class TestProviderResolution: ...
class TestAskEvents: ...
class TestAskCheckpoint: ...
class TestInputHistory: ...
class TestAskIntegration: ...
class TestConsoleProviderInput: ...
class TestInputProviderProtocol: ...
class TestAskErrorHandling: ...
class TestAskMetadata:
def test_ask_passes_metadata_to_provider(self) -> None: ...
def test_ask_metadata_none_by_default(self) -> None: ...
def test_ask_provider_returns_input_response(self) -> None: ...
def test_ask_metadata_in_requested_event(self) -> None: ...
def test_ask_metadata_in_received_event(self) -> None: ...
def test_ask_input_response_with_none_text(self) -> None: ...
def test_ask_metadata_thread_safe(self) -> None: ...
# Task:
Write a Python test method `test_ask_provider_returns_string_with_metadata_sent` in test class `TestAskMetadata` to provider returns plain string; history has metadata but no response_metadata.
Module under test: __future__, datetime, typing | def test_ask_provider_returns_string_with_metadata_sent(self) -> None:
"""Provider returns plain string; history has metadata but no response_metadata."""
class TestFlow(Flow):
input_provider = MockInputProvider(["answer"])
@start()
def my_method(self):
return self.ask("Q?", metadata={"channel": "#research"})
flow = TestFlow()
flow.kickoff()
entry = flow._input_history[0]
assert entry["metadata"] == {"channel": "#research"}
assert entry["response_metadata"] is None | test | 0 | {"function_name": "test_ask_provider_returns_string_with_metadata_sent", "class_name": "TestAskMetadata", "qualname": "TestAskMetadata.test_ask_provider_returns_string_with_metadata_sent", "file_path": "lib/crewai/tests/test_flow_ask.py", "repo_id": "crewAIInc/crewAI", "loc": 16, "tested_modules": ["__future__", "datetime", "typing", "crewai.flow", "crewai.flow.async_feedback.providers"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:agent/sandbox/tests/test_providers.py:module_doc | Write a module-level docstring for the Python module `test_providers` which contains class `TestSandboxDataclasses`, class `TestProviderManager`, class `TestSelfManagedProvider`, class `TestProviderInterface`. | Unit tests for sandbox provider abstraction layer. | documentation | 1 | {"doc_type": "module", "module_name": "test_providers", "file_path": "agent/sandbox/tests/test_providers.py", "repo_id": "infiniflow/ragflow", "char_length": 50} |
mem0ai/mem0:openmemory/api/app/models.py:after_memory_insert | # Context:
from sqlalchemy import (
JSON,
UUID,
Boolean,
Column,
DateTime,
Enum,
ForeignKey,
Index,
Integer,
String,
Table,
event,
)
from sqlalchemy.orm import Session, relationship
def get_current_utc_time(): ...
class MemoryState(enum.Enum): ...
class User(Base): ...
class App(Base): ...
class Config(Base): ...
class Memory(Base): ...
class Category(Base): ...
class AccessControl(Base): ...
class ArchivePolicy(Base): ...
class MemoryStatusHistory(Base): ...
class MemoryAccessLog(Base): ...
def categorize_memory(memory: Memory, db: Session) -> None: ...
def after_memory_update(mapper, connection, target): ...
# Task:
Write a Python function `after_memory_insert` to trigger categorization after a memory is inserted.
Parameters: mapper, connection, target | def after_memory_insert(mapper, connection, target):
"""Trigger categorization after a memory is inserted."""
db = Session(bind=connection)
categorize_memory(target, db)
db.close() | function_simple | 1 | {"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "after_memory_insert", "class_name": null, "qualname": "after_memory_insert", "file_path": "openmemory/api/app/models.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "runnable_level": "file_runnable"} |
huggingface/transformers:tests/cli/test_chat.py:test_new_chat_history | # Context:
from transformers.cli.chat import new_chat_history, parse_generate_flags, save_chat
def test_help(cli): ...
def test_save_and_clear_chat(): ...
def test_parse_generate_flags(): ...
# Task:
Write a Python test function `test_new_chat_history` to verify the behavior of `new_chat_history`.
Module under test: transformers.cli.chat | def test_new_chat_history():
assert new_chat_history() == []
assert new_chat_history("prompt") == [{"role": "system", "content": "prompt"}] | test | 0 | {"function_name": "test_new_chat_history", "class_name": null, "qualname": "test_new_chat_history", "file_path": "tests/cli/test_chat.py", "repo_id": "huggingface/transformers", "loc": 3, "tested_modules": ["transformers.cli.chat"], "has_docstring": false, "runnable_level": "plib_runnable"} |
langflow-ai/langflow:src/lfx/tests/unit/cli/test_validation.py:TestValidateGlobalVariablesForEnv.test_check_variables_option_in_execute | # Context:
from unittest.mock import MagicMock, patch
from lfx.cli.validation import is_valid_env_var_name, validate_global_variables_for_env
from lfx.graph.graph.base import Graph
from lfx.graph.vertex.base import Vertex
class TestIsValidEnvVarName: ...
class TestValidateGlobalVariablesForEnv:
def test_no_validation_when_database_available(self, mock_get_settings): ...
def test_validation_when_noop_database(self, mock_get_settings): ...
def test_multiple_invalid_fields(self, mock_get_settings): ...
def test_empty_or_none_values_ignored(self, mock_get_settings): ...
def test_vertex_without_load_from_db_fields(self, mock_get_settings): ...
def test_non_string_values_ignored(self, mock_get_settings): ...
def test_check_variables_option_in_serve(self, mock_get_settings): ...
# Task:
Write a Python test method `test_check_variables_option_in_execute` in test class `TestValidateGlobalVariablesForEnv` to test that check_variables option controls validation in execute command.
Module under test: lfx.cli.validation, lfx.graph.graph.base, lfx.graph.vertex.base | def test_check_variables_option_in_execute(self, mock_get_settings):
"""Test that check_variables option controls validation in execute command."""
# This test verifies the check_variables option works correctly
# when used with the execute command (--check-variables/--no-check-variables)
# Mock settings to indicate noop database
mock_settings_service = MagicMock()
mock_settings_service.settings.use_noop_database = True
mock_get_settings.return_value = mock_settings_service
# Create a mock graph with invalid variable
graph = MagicMock(spec=Graph)
vertex = MagicMock(spec=Vertex)
vertex.id = "vertex1"
vertex.display_name = "Test Component"
vertex.load_from_db_fields = ["api_key"]
vertex.params = {"api_key": "INVALID VAR NAME"} # Invalid: contains spaces # pragma: allowlist secret
graph.vertices = [vertex]
# When check_variables=True (default), validation should find errors
errors = validate_global_variables_for_env(graph)
assert len(errors) == 1
assert "INVALID VAR NAME" in errors[0] | test | 1 | {"function_name": "test_check_variables_option_in_execute", "class_name": "TestValidateGlobalVariablesForEnv", "qualname": "TestValidateGlobalVariablesForEnv.test_check_variables_option_in_execute", "file_path": "src/lfx/tests/unit/cli/test_validation.py", "repo_id": "langflow-ai/langflow", "loc": 23, "tested_modules": ["lfx.cli.validation", "lfx.graph.graph.base", "lfx.graph.vertex.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/videomae/test_video_processing_videomae.py:VideoMAEVideoProcessingTest.test_video_processor_properties | # Context:
class VideoMAEVideoProcessingTester: ...
class VideoMAEVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = VideoMAEVideoProcessor if is_torchvision_available() else None
input_name = "pixel_values"
def setUp(self): ...
def video_processor_dict(self): ...
def test_pixel_value_identity(self): ...
# Task:
Write a Python test method `test_video_processor_properties` in test class `VideoMAEVideoProcessingTest` to verify the behavior of `video_processor_properties`.
Module under test: PIL, transformers.image_utils, transformers.testing_utils | def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
self.assertTrue(hasattr(video_processing, "model_input_names"))
self.assertIn("pixel_values", video_processing.model_input_names) | test | 0 | {"function_name": "test_video_processor_properties", "class_name": "VideoMAEVideoProcessingTest", "qualname": "VideoMAEVideoProcessingTest.test_video_processor_properties", "file_path": "tests/models/videomae/test_video_processing_videomae.py", "repo_id": "huggingface/transformers", "loc": 12, "tested_modules": ["PIL", "transformers.image_utils", "transformers.testing_utils", "transformers.utils", "test_video_processing_common"], "has_docstring": false, "runnable_level": "class_runnable"} |
github/spec-kit:src/specify_cli/extensions.py:ExtensionManager.__init__ | # Context:
from pathlib import Path
class ExtensionError(Exception): ...
class ValidationError(ExtensionError): ...
class CompatibilityError(ExtensionError): ...
class ExtensionManifest: ...
class ExtensionRegistry: ...
def version_satisfies(current: str, required: str) -> bool: ...
class CommandRegistrar: ...
class ExtensionCatalog: ...
class ConfigManager: ...
class HookExecutor: ...
class ExtensionManager:
def check_compatibility(self, manifest: ExtensionManifest, speckit_version: str) -> bool: ...
def install_from_directory(self, source_dir: Path, speckit_version: str, register_commands: bool) -> ExtensionManifest: ...
def install_from_zip(self, zip_path: Path, speckit_version: str) -> ExtensionManifest: ...
def remove(self, extension_id: str, keep_config: bool) -> bool: ...
def list_installed(self) -> List[Dict[str, Any]]: ...
def get_extension(self, extension_id: str) -> Optional[ExtensionManifest]: ...
# Task:
Write a Python method `__init__` for the class `ExtensionManager` to initialize extension manager.
Parameters: project_root: Path | def __init__(self, project_root: Path):
"""Initialize extension manager.
Args:
project_root: Path to project root directory
"""
self.project_root = project_root
self.extensions_dir = project_root / ".specify" / "extensions"
self.registry = ExtensionRegistry(self.extensions_dir) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 9, "code_loc": 3, "docstring_loc": 5, "function_name": "__init__", "class_name": "ExtensionManager", "qualname": "ExtensionManager.__init__", "file_path": "src/specify_cli/extensions.py", "repo_id": "github/spec-kit", "has_docstring": true, "runnable_level": "file_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py:_has_data_array | # Context:
from typing import TYPE_CHECKING, Any, TypeGuard, TypedDict
class ExecutionContext(TypedDict): ...
class DatabricksQueryToolSchema(BaseModel): ...
class DatabricksQueryTool(BaseTool): ...
# Task:
Write a Python function `_has_data_array` to type guard to check if result has data_array attribute.
Parameters: result: Any
Returns: TypeGuard[Any] | def _has_data_array(result: Any) -> TypeGuard[Any]:
"""Type guard to check if result has data_array attribute.
Args:
result: The result object to check.
Returns:
True if result.result.data_array exists and is not None.
"""
return (
hasattr(result, "result")
and result.result is not None
and hasattr(result.result, "data_array")
and result.result.data_array is not None
) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 15, "code_loc": 6, "docstring_loc": 8, "function_name": "_has_data_array", "class_name": null, "qualname": "_has_data_array", "file_path": "lib/crewai-tools/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "slib_runnable"} |
apache/airflow:shared/configuration/tests/configuration/test_parser.py:TestAirflowConfigParser.test_deprecated_options_lookup_disabled | # Context:
from configparser import ConfigParser
import pytest
from airflow_shared.configuration.exceptions import AirflowConfigException
class AirflowConfigParser(_SharedAirflowConfigParser): ...
class TestAirflowConfigParser:
def test_getboolean(self): ...
def test_getint(self): ...
def test_getfloat(self): ...
def test_getlist(self): ...
def test_getjson(self, config_str, expected): ...
def test_getenum(self): ...
def test_getenumlist(self): ...
def test_getjson_empty_with_fallback(self): ...
def test_getjson_fallback(self, fallback): ...
def test_has_option(self): ...
def test_remove_option(self): ...
def test_get_with_defaults(self): ...
def test_get_mandatory_value(self): ...
def test_sensitive_config_values(self): ...
def test_deprecated_options(self): ...
def test_deprecated_options_same_section(self): ...
def test_deprecated_options_precedence(self): ...
def test_deprecated_options_with_lookup_from_deprecated(self, deprecated_options_dict, kwargs, new_section_expected_value, old_section_expected_value): ...
def test_deprecated_options_cmd(self): ...
def test_cmd_from_env_var(self): ...
def test_cmd_from_config_file(self): ...
def test_secret_from_config_file(self): ...
def test_secret_from_env_var(self): ...
def test_deprecated_sections(self): ...
def test_gettimedelta(self): ...
def test_getimport(self): ...
def test_get_mandatory_list_value(self): ...
def test_set_case_insensitive(self): ...
def test_configure_parser_from_configuration_description_with_deprecated_options(self): ...
def test_get_default_value_deprecated(self): ...
def test_team_env_var_takes_priority(self): ...
def test_team_config_file_section(self): ...
def test_team_does_not_fallback_to_global_config(self): ...
def test_team_does_not_fallback_to_global_env_var(self): ...
def test_team_skips_cmd_lookup(self): ...
def test_team_skips_secret_lookup(self): ...
def test_team_falls_through_to_defaults(self): ...
def test_team_env_var_format(self): ...
# Task:
Write a Python test method `test_deprecated_options_lookup_disabled` in test class `TestAirflowConfigParser` to test deprecated options with lookup_from_deprecated=False.
Module under test: __future__, configparser, enum | def test_deprecated_options_lookup_disabled(self):
"""Test deprecated options with lookup_from_deprecated=False"""
class TestParserWithDeprecated(AirflowConfigParser):
deprecated_options = {
("new_section", "new_key"): ("old_section", "old_key", "2.0.0"),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.configuration_description = {}
self._default_values = ConfigParser()
self._suppress_future_warnings = False
test_conf = TestParserWithDeprecated()
test_conf.add_section("old_section")
test_conf.set("old_section", "old_key", "old_value")
with pytest.raises(AirflowConfigException):
test_conf.get("new_section", "new_key", lookup_from_deprecated=False) | test | 1 | {"function_name": "test_deprecated_options_lookup_disabled", "class_name": "TestAirflowConfigParser", "qualname": "TestAirflowConfigParser.test_deprecated_options_lookup_disabled", "file_path": "shared/configuration/tests/configuration/test_parser.py", "repo_id": "apache/airflow", "loc": 20, "tested_modules": ["__future__", "configparser", "enum", "airflow_shared.configuration.exceptions", "airflow_shared.configuration.parser"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_agent_utils.py:TestParseToolCallArgs.test_valid_json_with_nested_values | # Context:
from crewai.utilities.agent_utils import (
_asummarize_chunks,
_estimate_token_count,
_extract_summary_tags,
_format_messages_for_summary,
_split_messages_into_chunks,
convert_tools_to_openai_schema,
parse_tool_call_args,
summarize_messages,
)
class CalculatorInput(BaseModel): ...
class CalculatorTool(BaseTool): ...
class SearchInput(BaseModel): ...
class SearchTool(BaseTool): ...
class NoSchemaTool(BaseTool): ...
class TestConvertToolsToOpenaiSchema: ...
def _make_mock_i18n() -> MagicMock: ...
class MCPStyleInput(BaseModel): ...
class MCPStyleTool(BaseTool): ...
class TestOptionalFieldsPreserveNull: ...
class TestSummarizeMessages: ...
class TestFormatMessagesForSummary: ...
class TestExtractSummaryTags: ...
class TestSplitMessagesIntoChunks: ...
class TestEstimateTokenCount: ...
class TestParallelSummarization: ...
def _build_long_conversation() -> list[dict[str, Any]]: ...
class TestParallelSummarizationVCR: ...
class TestParseToolCallArgs:
def test_valid_json_string_returns_dict(self) -> None: ...
def test_malformed_json_returns_error_dict(self) -> None: ...
def test_malformed_json_preserves_original_tool(self) -> None: ...
def test_malformed_json_original_tool_defaults_to_none(self) -> None: ...
def test_dict_input_returned_directly(self) -> None: ...
def test_empty_dict_input_returned_directly(self) -> None: ...
def test_error_result_has_correct_keys(self) -> None: ...
# Task:
Write a Python test method `test_valid_json_with_nested_values` in test class `TestParseToolCallArgs` to verify the behavior of `valid_json_with_nested_values`.
Module under test: __future__, typing, pydantic | def test_valid_json_with_nested_values(self) -> None:
args_dict, error = parse_tool_call_args(
'{"query": "hello", "options": {"limit": 10}}', "search", "call_6"
)
assert error is None
assert args_dict == {"query": "hello", "options": {"limit": 10}} | test | 0 | {"function_name": "test_valid_json_with_nested_values", "class_name": "TestParseToolCallArgs", "qualname": "TestParseToolCallArgs.test_valid_json_with_nested_values", "file_path": "lib/crewai/tests/utilities/test_agent_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 6, "tested_modules": ["__future__", "typing", "pydantic", "crewai.tools.base_tool", "crewai.utilities.agent_utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
jax-ml/jax:jax/_src/pallas/pipelining/schedulers.py:check_async_done | # Context:
import operator
import jax
from jax import numpy as jnp
from jax._src.pallas.pipelining import internal
def compute_grid_indices(linear_index: jax.Array, grid_size: Sequence[int]): ...
def increment_grid(indices: Sequence[int | jax.Array], grid: Sequence[int], dynamic: bool): ...
class PipelineContext: ...
def check_pipeline(stages: Sequence[internal.PipelineStage]): ...
class Scoreboard: ...
class GridCarry: ...
def check_args_ready(stage: internal.PipelineStage, scoreboard: Scoreboard, new_scoreboard: Scoreboard, current_stage_counter: int | jax.Array, dynamic) -> bool | jax.Array: ...
def check_async_start(stage: internal.PipelineStage, scoreboard: Scoreboard, current_stage_counter: int | jax.Array, dynamic) -> bool | jax.Array: ...
class EvalStageFunc(Protocol): ...
def eval_stage(ctx: PipelineContext, stage: internal.PipelineStage, args) -> PipelineState: ...
def linearize_stages(stages: Sequence[internal.PipelineStage]) -> Sequence[internal.PipelineStage]: ...
def make_ctx(stage: internal.PipelineStage, stage_idx: int, scoreboard: Scoreboard, pipeline_state: PipelineState, grid_carry: GridCarry | None, grid: Sequence[int] | None, offset: int | jax.Array) -> PipelineContext: ...
def static_nd_loop_scheduler(nd_loop: internal.NDLoopStruct, args: Sequence[Any], initial_state: PipelineState | None, eval_fn: EvalStageFunc | None): ...
# Task:
Write a Python function `check_async_done` to returns whether the async done stage can run.
Parameters: stage: internal.PipelineStage, scoreboard: Scoreboard, num_itrs: int | jax.Array, current_stage_counter: int | jax.Array, dynamic
Returns: bool | jax.Array | def check_async_done(stage: internal.PipelineStage,
scoreboard: Scoreboard,
num_itrs: int | jax.Array,
current_stage_counter: int | jax.Array,
dynamic=False) -> bool | jax.Array:
"""Returns whether the async done stage can run."""
and_op = jnp.logical_and if dynamic else operator.and_
# For async done stages, we need to insert delays so that they
# happen as late as possible.
# First condition is that there are a full number of async starts
# in flight.
max_in_flight = stage.properties.max_in_flight
can_run = True
token_read_effs = internal.filter_tokens(
internal.filter_read_effects(stage.effects))
read_tokens = {effect.input_index for effect in token_read_effs}
assert len(read_tokens) == 1, stage.effects
read_token = tuple(read_tokens)[0]
async_start_stage_idx = scoreboard.which_stage_writes[read_token]
async_start_counter = scoreboard.get_stage_counter(
async_start_stage_idx)
async_done_counter = current_stage_counter
min_op = jnp.minimum if dynamic else min
start_full = (async_start_counter >=
min_op(async_done_counter + max_in_flight, num_itrs))
can_run = and_op(can_run, start_full)
# Second condition - the consumers of this stage's outputs will
# actually need the results on the next iteration.
for write_idx in stage.get_write_idxs():
which_stages_read = scoreboard.which_stages_read[write_idx]
for read_stage_idx in which_stages_read:
read_itr = scoreboard.stage_counters[read_stage_idx]
can_run = and_op(can_run, (current_stage_counter <= read_itr))
return can_run | function_simple | 1 | {"cognitive_complexity": 5, "loc": 34, "code_loc": 22, "docstring_loc": 1, "function_name": "check_async_done", "class_name": null, "qualname": "check_async_done", "file_path": "jax/_src/pallas/pipelining/schedulers.py", "repo_id": "jax-ml/jax", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/llm/_internal/batch/processor/utils.py:extract_resource_kwargs | # Context:
from typing import Any, Dict, Optional, Tuple, Union
def get_value_or_fallback(value: Any, fallback: Any) -> Any: ...
def normalize_cpu_stage_concurrency(concurrency: Optional[Union[int, Tuple[int, int]]]) -> Dict[str, int]: ...
def build_cpu_stage_map_kwargs(stage_cfg: _StageConfigBase) -> Dict[str, Any]: ...
# Task:
Write a Python function `extract_resource_kwargs` to extract non-None resource kwargs for map_batches.
Parameters: runtime_env: Optional[Dict[str, Any]], num_cpus: Optional[float], memory: Optional[float]
Returns: Dict[str, Any] | def extract_resource_kwargs(
runtime_env: Optional[Dict[str, Any]],
num_cpus: Optional[float],
memory: Optional[float],
) -> Dict[str, Any]:
"""Extract non-None resource kwargs for map_batches."""
kwargs = {}
if runtime_env is not None:
kwargs["runtime_env"] = runtime_env
if num_cpus is not None:
kwargs["num_cpus"] = num_cpus
if memory is not None:
kwargs["memory"] = memory
return kwargs | function_simple | 0 | {"cognitive_complexity": 3, "loc": 14, "code_loc": 8, "docstring_loc": 1, "function_name": "extract_resource_kwargs", "class_name": null, "qualname": "extract_resource_kwargs", "file_path": "python/ray/llm/_internal/batch/processor/utils.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "slib_runnable"} |
huggingface/diffusers:src/diffusers/modular_pipelines/components_manager.py:ComponentsManager.remove | # Context:
import torch
import gc
class CustomOffloadHook(ModelHook): ...
class UserCustomOffloadHook: ...
def custom_offload_with_hook(model_id: str, model: torch.nn.Module, execution_device: str | int | torch.device, offload_strategy: 'AutoOffloadStrategy' | None): ...
class AutoOffloadStrategy: ...
def summarize_dict_by_value_and_parts(d: dict[str, Any]) -> dict[str, Any]: ...
class ComponentsManager:
_available_info_fields = [
def __init__(self):
self.components = OrderedDict()
# YiYi TODO: can remove once confirm we don't need this in mellon
self.added_time = OrderedDict() # Store when components were added
self.collections = OrderedDict() # collection_name -> set of component_names
self.model_hooks = None
self._auto_offload_enabled = False
def _lookup_ids(self, name: str | None, collection: str | None, load_id: str | None, components: OrderedDict | None): ...
def _id_to_name(component_id: str): ...
def add(self, name: str, component: Any, collection: str | None): ...
def remove_from_collection(self, component_id: str, collection: str): ...
def search_components(self, names: str | None, collection: str | None, load_id: str | None, return_dict_with_names: bool): ...
def enable_auto_cpu_offload(self, device: str | int | torch.device, memory_reserve_margin): ...
def disable_auto_cpu_offload(self): ...
def get_model_info(self, component_id: str, fields: str | list[str] | None) -> dict[str, Any] | None: ...
def __repr__(self): ...
def get_one(self, component_id: str | None, name: str | None, collection: str | None, load_id: str | None) -> Any: ...
def get_ids(self, names: str | list[str], collection: str | None): ...
def get_components_by_ids(self, ids: list[str], return_dict_with_names: bool | None): ...
def get_components_by_names(self, names: list[str], collection: str | None): ...
# Task:
Write a Python method `remove` for the class `ComponentsManager` to remove a component from the ComponentsManager.
Parameters: component_id: str | def remove(self, component_id: str = None):
"""
Remove a component from the ComponentsManager.
Args:
component_id (str): The ID of the component to remove
"""
if component_id not in self.components:
logger.warning(f"Component '{component_id}' not found in ComponentsManager")
return
component = self.components.pop(component_id)
self.added_time.pop(component_id)
for collection in self.collections:
if component_id in self.collections[collection]:
self.collections[collection].remove(component_id)
if self._auto_offload_enabled:
self.enable_auto_cpu_offload(self._auto_offload_device)
else:
if isinstance(component, torch.nn.Module):
component.to("cpu")
del component
import gc
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
if torch.xpu.is_available():
torch.xpu.empty_cache() | function_complex | 1 | {"cognitive_complexity": 12, "loc": 31, "code_loc": 20, "docstring_loc": 6, "function_name": "remove", "class_name": "ComponentsManager", "qualname": "ComponentsManager.remove", "file_path": "src/diffusers/modular_pipelines/components_manager.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "file_runnable"} |
apache/airflow:providers/google/src/airflow/providers/google/cloud/hooks/gen_ai.py:GenAIGenerativeModelHook.count_tokens | # Context:
from airflow.providers.google.common.hooks.base_google import (
PROVIDE_PROJECT_ID,
GoogleBaseAsyncHook,
GoogleBaseHook,
)
from google.genai.types import (
BatchJob,
ContentListUnion,
ContentListUnionDict,
CountTokensConfigOrDict,
CountTokensResponse,
CreateBatchJobConfig,
CreateCachedContentConfigOrDict,
CreateTuningJobConfigOrDict,
DeleteFileResponse,
DeleteResourceJob,
EmbedContentConfigOrDict,
EmbedContentResponse,
File,
GenerateContentConfig,
ListBatchJobsConfig,
TuningDatasetOrDict,
TuningJob,
)
class BatchJobStatus(enum.Enum): ...
class GenAIGeminiAPIHook(GoogleBaseHook): ...
class GenAIGeminiAPIAsyncHook(GoogleBaseAsyncHook): ...
class GenAIGenerativeModelHook(GoogleBaseHook):
def get_genai_client(self, project_id: str, location: str): ...
def embed_content(self, model: str, location: str, contents: ContentListUnion | ContentListUnionDict | list[str], config: EmbedContentConfigOrDict | None, project_id: str) -> EmbedContentResponse: ...
def generate_content(self, location: str, model: str, contents: ContentListUnionDict, generation_config: GenerateContentConfig | None, project_id: str) -> str: ...
def supervised_fine_tuning_train(self, source_model: str, location: str, training_dataset: TuningDatasetOrDict, tuning_job_config: CreateTuningJobConfigOrDict | dict[str, Any] | None, project_id: str) -> TuningJob: ...
def create_cached_content(self, model: str, location: str, cached_content_config: CreateCachedContentConfigOrDict | None, project_id: str) -> str: ...
# Task:
Write a Python method `count_tokens` for the class `GenAIGenerativeModelHook` to use Count Tokens API to calculate the number of input tokens before sending a request to Gemini API.
Parameters: location: str, model: str, contents: ContentListUnion | ContentListUnionDict, config: CountTokensConfigOrDict | None, project_id: str
Returns: CountTokensResponse | def count_tokens(
self,
location: str,
model: str,
contents: ContentListUnion | ContentListUnionDict,
config: CountTokensConfigOrDict | None = None,
project_id: str = PROVIDE_PROJECT_ID,
) -> CountTokensResponse:
"""
Use Count Tokens API to calculate the number of input tokens before sending a request to Gemini API.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param contents: Required. The multi-part content of a message that a user or a program
gives to the generative model, in order to elicit a specific response.
:param model: Required. Model,
supporting prompts with text-only input, including natural language
tasks, multi-turn text and code chat, and code generation. It can
output text and code.
:param config: Optional. Configuration for Count Tokens.
"""
client = self.get_genai_client(project_id=project_id, location=location)
response = client.models.count_tokens(
model=model,
contents=contents,
config=config,
)
return response | function_simple | 1 | {"cognitive_complexity": 0, "loc": 29, "code_loc": 7, "docstring_loc": 13, "function_name": "count_tokens", "class_name": "GenAIGenerativeModelHook", "qualname": "GenAIGenerativeModelHook.count_tokens", "file_path": "providers/google/src/airflow/providers/google/cloud/hooks/gen_ai.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/reasoning/test_gptoss_reasoning_parser.py:test_gptoss_is_reasoning_end | # Context:
import pytest
from vllm.reasoning import ReasoningParser
from vllm.reasoning.gptoss_reasoning_parser import GptOssReasoningParser
def gpt_oss_tokenizer(): ...
# Task:
Write a Python test function `test_gptoss_is_reasoning_end` to verify the behavior of `gptoss_is_reasoning_end`.
Module under test: transformers, vllm.reasoning, vllm.reasoning.gptoss_reasoning_parser | def test_gptoss_is_reasoning_end(
output,
is_reasoning_end,
gpt_oss_tokenizer,
):
output = gpt_oss_tokenizer.tokenize(output)
parser: ReasoningParser = GptOssReasoningParser(gpt_oss_tokenizer)
# Test is_reasoning_end
output_ids = gpt_oss_tokenizer.convert_tokens_to_ids(output)
actual_is_reasoning_end = parser.is_reasoning_end(output_ids)
assert is_reasoning_end == actual_is_reasoning_end | test | 1 | {"function_name": "test_gptoss_is_reasoning_end", "class_name": null, "qualname": "test_gptoss_is_reasoning_end", "file_path": "tests/reasoning/test_gptoss_reasoning_parser.py", "repo_id": "vllm-project/vllm", "loc": 12, "tested_modules": ["transformers", "vllm.reasoning", "vllm.reasoning.gptoss_reasoning_parser"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/postgres_loader.py:PostgresLoader.load | # Context:
from urllib.parse import urlparse
from psycopg2 import Error, connect
from psycopg2.extras import RealDictCursor
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class PostgresLoader(BaseLoader):
# Task:
Write a Python method `load` for the class `PostgresLoader` to load content from a PostgreSQL database table.
Parameters: source: SourceContent
Returns: LoaderResult | def load(self, source: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
"""Load content from a PostgreSQL database table.
Args:
source: SQL query (e.g., "SELECT * FROM table_name")
**kwargs: Additional arguments including db_uri
Returns:
LoaderResult with database content
"""
metadata = kwargs.get("metadata", {})
db_uri = metadata.get("db_uri")
if not db_uri:
raise ValueError("Database URI is required for PostgreSQL loader")
query = source.source
parsed = urlparse(db_uri)
if parsed.scheme not in ["postgresql", "postgres", "postgresql+psycopg2"]:
raise ValueError(f"Invalid PostgreSQL URI scheme: {parsed.scheme}")
connection_params = {
"host": parsed.hostname or "localhost",
"port": parsed.port or 5432,
"user": parsed.username,
"password": parsed.password,
"database": parsed.path.lstrip("/") if parsed.path else None,
"cursor_factory": RealDictCursor,
}
if not connection_params["database"]:
raise ValueError("Database name is required in the URI")
try:
connection = connect(**connection_params)
try:
with connection.cursor() as cursor:
cursor.execute(query)
rows = cursor.fetchall()
if not rows:
content = "No data found in the table"
return LoaderResult(
content=content,
metadata={"source": query, "row_count": 0},
doc_id=self.generate_doc_id(
source_ref=query, content=content
),
)
text_parts = []
columns = list(rows[0].keys())
text_parts.append(f"Columns: {', '.join(columns)}")
text_parts.append(f"Total rows: {len(rows)}")
text_parts.append("")
for i, row in enumerate(rows, 1):
text_parts.append(f"Row {i}:")
for col, val in row.items():
if val is not None:
text_parts.append(f" {col}: {val}")
text_parts.append("")
content = "\n".join(text_parts)
if len(content) > 100000:
content = content[:100000] + "\n\n[Content truncated...]"
return LoaderResult(
content=content,
metadata={
"source": query,
"database": connection_params["database"],
"row_count": len(rows),
"columns": columns,
},
doc_id=self.generate_doc_id(source_ref=query, content=content),
)
finally:
connection.close()
except Error as e:
raise ValueError(f"PostgreSQL database error: {e}") from e
except Exception as e:
raise ValueError(f"Failed to load data from PostgreSQL: {e}") from e | function_complex | 0 | {"cognitive_complexity": 31, "loc": 86, "code_loc": 63, "docstring_loc": 9, "function_name": "load", "class_name": "PostgresLoader", "qualname": "PostgresLoader.load", "file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/postgres_loader.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/code_use/utils.py:truncate_message_content | Write a Python function `truncate_message_content` to truncate message content to max_length characters for history.
Parameters: content: str, max_length: int
Returns: str | def truncate_message_content(content: str, max_length: int = 10000) -> str:
"""Truncate message content to max_length characters for history."""
if len(content) <= max_length:
return content
# Truncate and add marker
return content[:max_length] + f'\n\n[... truncated {len(content) - max_length} characters for history]' | function_simple | 0 | {"cognitive_complexity": 1, "loc": 6, "code_loc": 3, "docstring_loc": 1, "function_name": "truncate_message_content", "class_name": null, "qualname": "truncate_message_content", "file_path": "browser_use/code_use/utils.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "self_contained"} |
ray-project/ray:python/ray/_common/tests/test_wait_for_condition.py:TestWaitForCondition.test_immediate_true_condition | # Context:
from ray._common.test_utils import async_wait_for_condition, wait_for_condition
class TestAsyncWaitForCondition: ...
class TestEdgeCases: ...
class TestWaitForCondition:
def test_condition_becomes_true(self): ...
def test_timeout_raises_runtime_error(self): ...
def test_condition_with_kwargs(self): ...
def test_exception_handling_default(self): ...
def test_exception_handling_raise_true(self): ...
def test_custom_retry_interval(self): ...
def test_condition_with_mixed_results(self): ...
# Task:
Write a Python test method `test_immediate_true_condition` in test class `TestWaitForCondition` to test that function returns immediately when condition is already true.
Module under test: ray._common.test_utils | def test_immediate_true_condition(self):
"""Test that function returns immediately when condition is already true."""
def always_true():
return True
wait_for_condition(always_true, timeout=5) | test | 0 | {"function_name": "test_immediate_true_condition", "class_name": "TestWaitForCondition", "qualname": "TestWaitForCondition.test_immediate_true_condition", "file_path": "python/ray/_common/tests/test_wait_for_condition.py", "repo_id": "ray-project/ray", "loc": 7, "tested_modules": ["ray._common.test_utils"], "has_docstring": true, "runnable_level": "plib_runnable"} |
ray-project/ray:python/ray/serve/tests/unit/test_grpc_replica_result.py:TestSeparateLoop.test_streaming_blocked | # Context:
import asyncio
import pytest
class FakegRPCUnaryCall: ...
class FakegRPCStreamCall: ...
def create_asyncio_event_loop_in_thread(): ...
class TestSameLoop: ...
class TestSeparateLoop:
async def make_fake_unary_request(self, data, loop: asyncio.AbstractEventLoop): ...
async def make_fake_streaming_request(self, data, loop: asyncio.AbstractEventLoop, on_separate_loop: bool, is_streaming: bool, event: threading.Event, error): ...
def test_unary_sync(self, create_asyncio_event_loop_in_thread): ...
async def test_unary_async(self, create_asyncio_event_loop_in_thread): ...
def test_streaming_sync(self, create_asyncio_event_loop_in_thread): ...
async def test_streaming_async(self, create_asyncio_event_loop_in_thread): ...
def test_unary_with_gen_sync(self, create_asyncio_event_loop_in_thread): ...
async def test_unary_with_gen_async(self, create_asyncio_event_loop_in_thread): ...
async def test_unary_with_gen_blocked(self, create_asyncio_event_loop_in_thread): ...
def test_unary_with_timeout(self, create_asyncio_event_loop_in_thread): ...
def test_unary_error_sync(self, create_asyncio_event_loop_in_thread): ...
async def test_unary_error_async(self, create_asyncio_event_loop_in_thread): ...
def test_streaming_error_sync(self, create_asyncio_event_loop_in_thread): ...
async def test_streaming_error_async(self, create_asyncio_event_loop_in_thread): ...
# Task:
Write a Python test method `test_streaming_blocked` in test class `TestSeparateLoop` to use threading event to block async generator, check everything works.
Module under test: ray, ray._common.test_utils, ray.serve._private.common | async def test_streaming_blocked(self, create_asyncio_event_loop_in_thread):
"""Use threading event to block async generator, check everything works"""
loop, event = create_asyncio_event_loop_in_thread
fut = asyncio.run_coroutine_threadsafe(
self.make_fake_streaming_request(
[1, 2, 3, 4], loop, on_separate_loop=True, event=event
),
loop=loop,
)
replica_result = fut.result()
async def fetch():
return [r async for r in replica_result]
t = asyncio.create_task(fetch())
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(t), 0.01)
event.set()
assert await t == [1, 2, 3, 4] | test | 0 | {"function_name": "test_streaming_blocked", "class_name": "TestSeparateLoop", "qualname": "TestSeparateLoop.test_streaming_blocked", "file_path": "python/ray/serve/tests/unit/test_grpc_replica_result.py", "repo_id": "ray-project/ray", "loc": 23, "tested_modules": ["ray", "ray._common.test_utils", "ray.serve._private.common", "ray.serve._private.replica_result", "ray.serve.generated"], "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/data/_internal/cluster_autoscaler/default_autoscaling_coordinator.py:handle_timeout_errors | # Context:
import functools
from typing import Callable, Dict, List, Optional
import ray
import inspect
class OngoingRequest: ...
class DefaultAutoscalingCoordinator(AutoscalingCoordinator): ...
class _AutoscalingCoordinatorActor: ...
def get_or_create_autoscaling_coordinator(): ...
# Task:
Write a Python function `handle_timeout_errors` to decorator to handle GetTimeoutError with consecutive failure tracking.
Parameters: failure_counter_attr: str, operation_name: str, requester_id_param: str, error_msg_suffix: Optional[str], on_error_return: Optional[Callable] | def handle_timeout_errors(
failure_counter_attr: str,
operation_name: str,
requester_id_param: str = "requester_id",
error_msg_suffix: Optional[str] = None,
on_error_return: Optional[Callable] = None,
):
"""Decorator to handle GetTimeoutError with consecutive failure tracking.
Args:
failure_counter_attr: Name of the instance attribute that tracks
consecutive failures.
operation_name: Name of the operation for error messages (e.g.,
"send resource request", "cancel resource request").
requester_id_param: Name of the parameter that contains the
requester_id.
error_msg_suffix: Optional suffix to append to the error message.
If None, uses a default message.
on_error_return: Optional callable that takes (self, requester_id)
and returns a value to return on error. If None, no value is
returned (method should return None).
Returns:
A decorator that wraps methods to handle timeout errors.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
# Extract requester_id from args or kwargs
requester_id = kwargs.get(requester_id_param)
if requester_id is None:
# Try to get from args by checking function signature
import inspect
sig = inspect.signature(func)
param_names = list(sig.parameters.keys())
if requester_id_param in param_names:
param_index = param_names.index(requester_id_param) - 1
if param_index < len(args):
requester_id = args[param_index]
failure_counter = getattr(self, failure_counter_attr)
try:
result = func(self, *args, **kwargs)
# Reset counter on success
setattr(self, failure_counter_attr, 0)
return result
except ray.exceptions.GetTimeoutError as exc:
failure_counter += 1
setattr(self, failure_counter_attr, failure_counter)
consecutive_msg = (
f" (consecutive failures: {failure_counter})"
if failure_counter > 1
else ""
)
# Build error message
base_msg = (
f"Failed to {operation_name} for {requester_id}.{consecutive_msg}"
)
if error_msg_suffix is not None:
msg = f"{base_msg} {error_msg_suffix}"
else:
msg = (
f"{base_msg}"
" If this only happens transiently during network"
" partition or CPU being overloaded, it's safe to"
" ignore this error."
" If this error persists, file a GitHub issue."
)
# Check max failures and raise if exceeded
if failure_counter >= self.MAX_CONSECUTIVE_FAILURES:
raise RuntimeError(
f"Failed to {operation_name} for {requester_id} "
f"after {failure_counter} consecutive failures."
) from exc
logger.warning(msg, exc_info=True)
# Return value on error if callback provided
if on_error_return is not None:
return on_error_return(self, requester_id)
return wrapper
return decorator | function_complex | 0 | {"cognitive_complexity": 23, "loc": 90, "code_loc": 48, "docstring_loc": 18, "function_name": "handle_timeout_errors", "class_name": null, "qualname": "handle_timeout_errors", "file_path": "python/ray/data/_internal/cluster_autoscaler/default_autoscaling_coordinator.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
apache/airflow:providers/google/tests/unit/google/cloud/operators/test_cloud_logging_sink.py:TestCloudLoggingUpdateSinksOperator.test_update_sink_raises_not_found | # Context:
from unittest import mock
import pytest
from google.api_core.exceptions import AlreadyExists, GoogleAPICallError, InvalidArgument, NotFound
from airflow.providers.google.cloud.operators.cloud_logging_sink import (
CloudLoggingCreateSinkOperator,
CloudLoggingDeleteSinkOperator,
CloudLoggingListSinksOperator,
CloudLoggingUpdateSinkOperator,
)
def _assert_common_template_fields(template_fields): ...
class TestCloudLoggingCreateSinkOperator: ...
class TestCloudLoggingDeleteSinkOperator: ...
class TestCloudLoggingListSinksOperator: ...
class TestCloudLoggingUpdateSinksOperator:
def test_template_fields(self, sink_config, update_mask): ...
def test_missing_required_params(self): ...
def test_update_sink_success(self, hook_mock, sink_config, update_mask): ...
def test_update_sink_raises_generic_error(self, hook_mock, sink_config, update_mask): ...
def test_create_with_impersonation_chain(self, hook_mock, impersonation_chain): ...
def test_missing_rendered_field_raises(self): ...
def test_template_rendering(self, hook_mock, sink_config, update_mask): ...
def test_template_rendering_with_proto(self, hook_mock, sink_config, update_mask): ...
# Task:
Write a Python test method `test_update_sink_raises_not_found` in test class `TestCloudLoggingUpdateSinksOperator` to verify the behavior of `update_sink_raises_not_found`.
Module under test: __future__, datetime, google.api_core.exceptions | def test_update_sink_raises_not_found(self, hook_mock, sink_config, update_mask):
hook_instance = hook_mock.return_value
hook_instance.get_sink.side_effect = NotFound("not found")
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_name=SINK_NAME,
sink_config=sink_config,
update_mask=update_mask,
project_id=PROJECT_ID,
)
with pytest.raises(NotFound, match="not found"):
operator.execute(context=mock.MagicMock())
hook_instance.get_sink.assert_called_once()
hook_instance.update_sink.assert_not_called() | test | 1 | {"function_name": "test_update_sink_raises_not_found", "class_name": "TestCloudLoggingUpdateSinksOperator", "qualname": "TestCloudLoggingUpdateSinksOperator.test_update_sink_raises_not_found", "file_path": "providers/google/tests/unit/google/cloud/operators/test_cloud_logging_sink.py", "repo_id": "apache/airflow", "loc": 18, "tested_modules": ["__future__", "datetime", "google.api_core.exceptions", "google.cloud.exceptions", "google.cloud.logging_v2.types"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/diffusers:tests/pipelines/qwenimage/test_qwenimage_img2img.py:QwenImageImg2ImgPipelineFastTests.test_inference | # Context:
class QwenImageImg2ImgPipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = QwenImageImg2ImgPipeline
params = frozenset(["prompt", "image", "height", "width", "guidance_scale", "true_cfg_scale", "strength"])
batch_params = frozenset(["prompt", "image"])
image_params = frozenset(["image"])
image_latents_params = frozenset(["latents"])
required_optional_params = frozenset(
supports_dduf = False
test_xformers_attention = False
test_attention_slicing = True
test_layerwise_casting = True
test_group_offloading = True
def get_dummy_components(self): ...
def get_dummy_inputs(self, device, seed): ...
def test_inference_batch_single_identical(self): ...
def test_attention_slicing_forward_pass(self, test_max_difference, test_mean_pixel_difference, expected_max_diff): ...
def test_vae_tiling(self, expected_diff_max: float): ...
# Task:
Write a Python test method `test_inference` in test class `QwenImageImg2ImgPipelineFastTests` to verify the behavior of `inference`.
Module under test: transformers, diffusers, testing_utils | def test_inference(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe(**inputs).images
generated_image = image[0]
self.assertEqual(generated_image.shape, (3, 32, 32)) | test | 1 | {"function_name": "test_inference", "class_name": "QwenImageImg2ImgPipelineFastTests", "qualname": "QwenImageImg2ImgPipelineFastTests.test_inference", "file_path": "tests/pipelines/qwenimage/test_qwenimage_img2img.py", "repo_id": "huggingface/diffusers", "loc": 12, "tested_modules": ["transformers", "diffusers", "testing_utils", "test_pipelines_common"], "has_docstring": false, "runnable_level": "class_runnable"} |
browser-use/browser-use:tests/ci/infrastructure/test_registry_validation.py:TestType1Pattern.test_type1_with_param_model | # Context:
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.tools.registry.service import Registry
from browser_use.tools.registry.views import ActionModel as BaseActionModel
import inspect
from browser_use.tools.registry.views import ActionModel
class TestType2Pattern: ...
class TestValidationRules: ...
class TestDecoratedFunctionBehavior: ...
class TestParamsModelGeneration: ...
class TestParameterOrdering: ...
class TestParamsModelArgsAndKwargs: ...
class TestType1Pattern:
def test_type1_with_multiple_special_params(self): ...
# Task:
Write a Python test method `test_type1_with_param_model` in test class `TestType1Pattern` to type 1: action(params: Model, special_args...) should work.
Module under test: pydantic, browser_use.agent.views, browser_use.browser | def test_type1_with_param_model(self):
"""Type 1: action(params: Model, special_args...) should work"""
registry = Registry()
class ClickAction(BaseActionModel):
index: int
delay: float = 0.0
@registry.action('Click element', param_model=ClickAction)
async def click_element(params: ClickAction, browser_session: BrowserSession):
return ActionResult(extracted_content=f'Clicked {params.index}')
# Verify registration
assert 'click_element' in registry.registry.actions
action = registry.registry.actions['click_element']
assert action.param_model == ClickAction
# Verify decorated function signature (should be kwargs-only)
import inspect
sig = inspect.signature(click_element)
params = list(sig.parameters.values())
# Should have no positional-only or positional-or-keyword params
for param in params:
assert param.kind in (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.VAR_KEYWORD) | test | 0 | {"function_name": "test_type1_with_param_model", "class_name": "TestType1Pattern", "qualname": "TestType1Pattern.test_type1_with_param_model", "file_path": "tests/ci/infrastructure/test_registry_validation.py", "repo_id": "browser-use/browser-use", "loc": 26, "tested_modules": ["pydantic", "browser_use.agent.views", "browser_use.browser", "browser_use.tools.registry.service", "browser_use.tools.registry.views"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/api/v2/test_workflow.py:TestWorkflowBackgroundQueueing.test_background_execution_queue_exception | # Context:
from unittest.mock import AsyncMock, MagicMock, patch
from uuid import UUID, uuid4
from httpx import AsyncClient
from langflow.services.database.models.flow.model import Flow
from lfx.services.deps import session_scope
class TestWorkflowDeveloperAPIProtection: ...
class TestWorkflowErrorHandling: ...
class TestWorkflowSyncExecution: ...
class TestWorkflowStatus: ...
class TestWorkflowStop: ...
class TestWorkflowBackgroundQueueing:
def mock_settings_dev_api_enabled(self): ...
async def test_background_execution_flow(self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled): ...
async def test_background_execution_invalid_flow(self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled): ...
async def test_sync_execution_error_handling(self, client: AsyncClient, created_api_key, mock_settings_dev_api_enabled): ...
# Task:
Write a Python test method `test_background_execution_queue_exception` in test class `TestWorkflowBackgroundQueueing` to test handling of exceptions during task queueing.
Module under test: datetime, uuid, httpx | async def test_background_execution_queue_exception(
self,
client: AsyncClient,
created_api_key,
mock_settings_dev_api_enabled, # noqa: ARG002
):
"""Test handling of exceptions during task queueing."""
flow_id = uuid4()
async with session_scope() as session:
flow = Flow(
id=flow_id,
name="Fail Flow",
data={"nodes": [], "edges": []},
user_id=created_api_key.user_id,
)
session.add(flow)
await session.flush()
try:
request_data = {"flow_id": str(flow_id), "background": True}
headers = {"x-api-key": created_api_key.api_key}
with patch("langflow.api.v2.workflow.get_task_service") as mock_get_task_service:
mock_task_service = MagicMock()
mock_task_service.fire_and_forget_task.side_effect = Exception("Queueing failed")
mock_get_task_service.return_value = mock_task_service
response = await client.post("api/v2/workflows", json=request_data, headers=headers)
assert response.status_code == 500
detail = response.json()["detail"]
message = detail["message"] if isinstance(detail, dict) else detail
assert "Queueing failed" in message
finally:
async with session_scope() as session:
flow = await session.get(Flow, flow_id)
if flow:
await session.delete(flow) | test | 1 | {"function_name": "test_background_execution_queue_exception", "class_name": "TestWorkflowBackgroundQueueing", "qualname": "TestWorkflowBackgroundQueueing.test_background_execution_queue_exception", "file_path": "src/backend/tests/unit/api/v2/test_workflow.py", "repo_id": "langflow-ai/langflow", "loc": 38, "tested_modules": ["datetime", "uuid", "httpx", "langflow.exceptions.api", "langflow.services.database.models.flow.model"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/migrator.py:ESToOceanBaseMigrator:class_doc | Write a class-level docstring for `ESToOceanBaseMigrator` which has methods: `__init__`, `migrate`, `_check_connections`, `_analyze_es_index`, `_migrate_data`. | RAGFlow-specific migration orchestrator.
This migrator is designed specifically for RAGFlow's data structure,
handling the fixed schema and vector embeddings correctly. | documentation | 1 | {"doc_type": "class", "class_name": "ESToOceanBaseMigrator", "file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/migrator.py", "repo_id": "infiniflow/ragflow", "char_length": 169, "methods": ["__init__", "migrate", "_check_connections", "_analyze_es_index", "_migrate_data", "get_schema_preview", "get_data_preview", "list_knowledge_bases"]} |
ray-project/ray:doc/source/serve/tutorials/model_composition_recsys/content/serve_recommendation_pipeline.py:ItemRankingModel.rank_items | # Context:
import asyncio
from typing import List, Dict
from ray import serve
class UserFeatureExtractor: ...
class RecommendationService: ...
class ItemRankingModel:
CANDIDATE_ITEMS = [f"item_{i}" for i in range(1000)]
def __init__(self):
# In production, this is your cloud storage path or model registry
# self.model = load_model("/models/ranking_model.pkl")
pass
def _score_items(self, user_features: Dict[str, float]) -> List[Dict[str, any]]: ...
# Task:
Write a Python async method `rank_items` for the class `ItemRankingModel` to rank candidate items for a batch of users.
Parameters: user_features_batch: List[Dict[str, float]]
Returns: List[List[Dict[str, any]]] | async def rank_items(
self,
user_features_batch: List[Dict[str, float]]
) -> List[List[Dict[str, any]]]:
"""Rank candidate items for a batch of users."""
# Simulate model inference time
await asyncio.sleep(0.05)
# In production, use vectorized batch inference:
# return self.model.batch_predict(user_features_batch, self.CANDIDATE_ITEMS)
return [self._score_items(features) for features in user_features_batch] | function_simple | 0 | {"cognitive_complexity": 0, "loc": 12, "code_loc": 2, "docstring_loc": 1, "function_name": "rank_items", "class_name": "ItemRankingModel", "qualname": "ItemRankingModel.rank_items", "file_path": "doc/source/serve/tutorials/model_composition_recsys/content/serve_recommendation_pipeline.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"} |
google/langextract:langextract/resolver.py:Resolver.extract_ordered_extractions | # Context:
from collections.abc import Iterator, Mapping, Sequence
import operator
from absl import logging
from langextract.core import data
from langextract.core import format_handler as fh
class AbstractResolver(abc.ABC): ...
class ResolverParsingError(exceptions.LangExtractError): ...
class WordAligner: ...
def _tokenize_with_lowercase(text: str, tokenizer_inst: tokenizer_lib.Tokenizer | None) -> Iterator[str]: ...
def _normalize_token(token: str) -> str: ...
class Resolver(AbstractResolver):
def __init__(
self,
format_handler: fh.FormatHandler | None = None,
extraction_index_suffix: str | None = None,
**kwargs, # Collect legacy parameters
):
"""Constructor.
Args:
format_handler: The format handler that knows how to parse output.
extraction_index_suffix: Suffix identifying index keys that determine the
ordering of extractions.
**kwargs: Legacy parameters (fence_output, format_type, etc.) for backward
compatibility. These will be used to create a FormatHandler if one is not
provided. Support for these parameters will be removed in v2.0.0.
"""
constraint = kwargs.pop("constraint", None)
extraction_attributes_suffix = kwargs.pop(
"extraction_attributes_suffix", None
)
if format_handler is None:
if kwargs or extraction_attributes_suffix is not None:
handler_kwargs = dict(kwargs)
if extraction_attributes_suffix is not None:
handler_kwargs["attribute_suffix"] = extraction_attributes_suffix
format_handler = fh.FormatHandler.from_kwargs(**handler_kwargs)
for param in [
"fence_output",
"format_type",
"strict_fences",
"require_extractions_key",
"attribute_suffix",
]:
kwargs.pop(param, None)
else:
format_handler = fh.FormatHandler()
if kwargs:
raise TypeError(
f"got an unexpected keyword argument '{list(kwargs.keys())[0]}'"
)
constraint = constraint or schema.Constraint()
super().__init__(
fence_output=format_handler.use_fences,
format_type=format_handler.format_type,
constraint=constraint,
)
self.format_handler = format_handler
self.extraction_index_suffix = extraction_index_suffix
self._constraint = constraint
def resolve(self, input_text: str, suppress_parse_errors: bool, **kwargs) -> Sequence[data.Extraction]: ...
def align(self, extractions: Sequence[data.Extraction], source_text: str, token_offset: int, char_offset: int | None, enable_fuzzy_alignment: bool, fuzzy_alignment_threshold: float, accept_match_lesser: bool, tokenizer_inst: tokenizer_lib.Tokenizer | None, **kwargs) -> Iterator[data.Extraction]: ...
def string_to_extraction_data(self, input_string: str) -> Sequence[Mapping[str, fh.ExtractionValueType]]: ...
# Task:
Write a Python method `extract_ordered_extractions` for the class `Resolver` to extracts and orders extraction data based on their associated indexes.
Parameters: extraction_data: Sequence[Mapping[str, fh.ExtractionValueType]]
Returns: Sequence[data.Extraction] | def extract_ordered_extractions(
self,
extraction_data: Sequence[Mapping[str, fh.ExtractionValueType]],
) -> Sequence[data.Extraction]:
"""Extracts and orders extraction data based on their associated indexes.
This function processes a list of dictionaries, each containing pairs of
extraction class keys and their corresponding values, along with optionally
associated index keys (identified by the index_suffix). It sorts these pairs
by their indices in ascending order and excludes pairs without an index key,
returning a list of lists of tuples (extraction_class: str, extraction_text:
str).
Args:
extraction_data: A list of dictionaries. Each dictionary contains pairs
of extraction class keys and their values, along with optional index
keys.
Returns:
Extractions sorted by the index attribute or by order of appearance. If
two
extractions have the same index, their group order dictates the sorting
order.
Raises:
ValueError: If the extraction text is not a string or integer, or if the
index is not an integer.
"""
logging.debug("Starting to extract and order extractions from data.")
if not extraction_data:
logging.debug("Received empty extraction data.")
processed_extractions = []
extraction_index = 0
index_suffix = self.extraction_index_suffix
attributes_suffix = self.format_handler.attribute_suffix
for group_index, group in enumerate(extraction_data):
for extraction_class, extraction_value in group.items():
if index_suffix and extraction_class.endswith(index_suffix):
if not isinstance(extraction_value, int):
logging.error(
"Index must be an integer. Found: %s",
type(extraction_value),
)
raise ValueError("Index must be an integer.")
continue
if attributes_suffix and extraction_class.endswith(attributes_suffix):
if not isinstance(extraction_value, (dict, type(None))):
logging.error(
"Attributes must be a dict or None. Found: %s",
type(extraction_value),
)
raise ValueError(
"Extraction value must be a dict or None for attributes."
)
continue
if not isinstance(extraction_value, (str, int, float)):
logging.error(
"Extraction text must be a string, integer, or float. Found: %s",
type(extraction_value),
)
raise ValueError(
"Extraction text must be a string, integer, or float."
)
if not isinstance(extraction_value, str):
extraction_value = str(extraction_value)
if index_suffix:
index_key = extraction_class + index_suffix
extraction_index = group.get(index_key, None)
if extraction_index is None:
logging.debug(
"No index value for %s. Skipping extraction.", extraction_class
)
continue
else:
extraction_index += 1
attributes = None
if attributes_suffix:
attributes_key = extraction_class + attributes_suffix
attributes = group.get(attributes_key, None)
processed_extractions.append(
data.Extraction(
extraction_class=extraction_class,
extraction_text=extraction_value,
extraction_index=extraction_index,
group_index=group_index,
attributes=attributes,
)
)
processed_extractions.sort(key=operator.attrgetter("extraction_index"))
logging.debug("Completed extraction and ordering of extractions.")
return processed_extractions | function_complex | 1 | {"cognitive_complexity": 37, "loc": 100, "code_loc": 63, "docstring_loc": 23, "function_name": "extract_ordered_extractions", "class_name": "Resolver", "qualname": "Resolver.extract_ordered_extractions", "file_path": "langextract/resolver.py", "repo_id": "google/langextract", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/hooks/wrappers.py:AfterToolCallHookMethod:class_doc | Write a class-level docstring for `AfterToolCallHookMethod` which has methods: `__init__`, `__call__`, `__get__`. | Wrapper for methods marked as after_tool_call hooks within @CrewBase classes. | documentation | 0 | {"doc_type": "class", "class_name": "AfterToolCallHookMethod", "file_path": "lib/crewai/src/crewai/hooks/wrappers.py", "repo_id": "crewAIInc/crewAI", "char_length": 77, "methods": ["__init__", "__call__", "__get__"]} |
browser-use/browser-use:examples/features/add_image_context.py:module_doc | Write a module-level docstring for the Python module `add_image_context` which contains function `image_to_base64`, function `create_sample_images`. | Show how to use sample_images to add image context for your task | documentation | 0 | {"doc_type": "module", "module_name": "add_image_context", "file_path": "examples/features/add_image_context.py", "repo_id": "browser-use/browser-use", "char_length": 64} |
unclecode/crawl4ai:deploy/docker/tests/test_security_fixes.py:TestURLValidation.test_raw_url_allowed_when_enabled | # Context:
class TestHookBuiltins(unittest.TestCase): ...
class TestHooksEnabled(unittest.TestCase): ...
class TestURLValidation(unittest.TestCase):
def setUp(self): ...
def validate_url_scheme(self, url: str, allow_raw: bool) -> bool: ...
def test_file_url_blocked(self): ...
def test_file_url_blocked_windows(self): ...
def test_javascript_url_blocked(self): ...
def test_data_url_blocked(self): ...
def test_ftp_url_blocked(self): ...
def test_empty_url_blocked(self): ...
def test_relative_url_blocked(self): ...
def test_http_url_allowed(self): ...
def test_https_url_allowed(self): ...
def test_raw_url_blocked_when_disabled(self): ...
# Task:
Write a Python test method `test_raw_url_allowed_when_enabled` in test class `TestURLValidation` to raw: URLs must be allowed when allow_raw=True. | def test_raw_url_allowed_when_enabled(self):
"""raw: URLs must be allowed when allow_raw=True."""
self.assertTrue(self.validate_url_scheme("raw:<html></html>", allow_raw=True))
self.assertTrue(self.validate_url_scheme("raw://<html></html>", allow_raw=True)) | test | 1 | {"function_name": "test_raw_url_allowed_when_enabled", "class_name": "TestURLValidation", "qualname": "TestURLValidation.test_raw_url_allowed_when_enabled", "file_path": "deploy/docker/tests/test_security_fixes.py", "repo_id": "unclecode/crawl4ai", "loc": 4, "tested_modules": [], "has_docstring": true, "runnable_level": "class_runnable"} |
fastapi/fastapi:tests/test_request_params/test_header/test_optional_list.py:test_optional_list_alias_and_validation_alias_schema | # Context:
import pytest
from inline_snapshot import snapshot
async def read_optional_list_str(p: Annotated[list[str] | None, Header()]): ...
class HeaderModelOptionalListStr(BaseModel): ...
async def read_model_optional_list_str(p: Annotated[HeaderModelOptionalListStr, Header()]): ...
def test_optional_list_str_schema(path: str): ...
def test_optional_list_str_missing(path: str): ...
def test_optional_list_str(path: str): ...
async def read_optional_list_alias(p: Annotated[list[str] | None, Header(alias='p_alias')]): ...
class HeaderModelOptionalListAlias(BaseModel): ...
async def read_model_optional_list_alias(p: Annotated[HeaderModelOptionalListAlias, Header()]): ...
def test_optional_list_str_alias_schema(path: str): ...
def test_optional_list_alias_missing(path: str): ...
def test_optional_list_alias_by_name(path: str): ...
def test_optional_list_alias_by_alias(path: str): ...
def read_optional_list_validation_alias(p: Annotated[list[str] | None, Header(validation_alias='p_val_alias')]): ...
class HeaderModelOptionalListValidationAlias(BaseModel): ...
def read_model_optional_list_validation_alias(p: Annotated[HeaderModelOptionalListValidationAlias, Header()]): ...
def test_optional_list_validation_alias_schema(path: str): ...
def test_optional_list_validation_alias_missing(path: str): ...
def test_optional_list_validation_alias_by_name(path: str): ...
def test_optional_list_validation_alias_by_validation_alias(path: str): ...
def read_optional_list_alias_and_validation_alias(p: Annotated[list[str] | None, Header(alias='p_alias', validation_alias='p_val_alias')]): ...
class HeaderModelOptionalListAliasAndValidationAlias(BaseModel): ...
def read_model_optional_list_alias_and_validation_alias(p: Annotated[HeaderModelOptionalListAliasAndValidationAlias, Header()]): ...
def test_optional_list_alias_and_validation_alias_missing(path: str): ...
def test_optional_list_alias_and_validation_alias_by_name(path: str): ...
def test_optional_list_alias_and_validation_alias_by_alias(path: str): ...
def test_optional_list_alias_and_validation_alias_by_validation_alias(path: str): ...
# Task:
Write a Python test function `test_optional_list_alias_and_validation_alias_schema` to verify the behavior of `optional_list_alias_and_validation_alias_schema`.
Module under test: typing, fastapi, fastapi.testclient | def test_optional_list_alias_and_validation_alias_schema(path: str):
assert app.openapi()["paths"][path]["get"]["parameters"] == snapshot(
[
{
"required": False,
"schema": {
"anyOf": [
{"items": {"type": "string"}, "type": "array"},
{"type": "null"},
],
"title": "P Val Alias",
},
"name": "p_val_alias",
"in": "header",
}
]
) | test | 1 | {"function_name": "test_optional_list_alias_and_validation_alias_schema", "class_name": null, "qualname": "test_optional_list_alias_and_validation_alias_schema", "file_path": "tests/test_request_params/test_header/test_optional_list.py", "repo_id": "fastapi/fastapi", "loc": 17, "tested_modules": ["typing", "fastapi", "fastapi.testclient", "inline_snapshot", "pydantic"], "has_docstring": false, "runnable_level": "project_runnable"} |
binary-husky/gpt_academic:crazy_functions/review_fns/paper_processor/paper_llm_ranker.py:PaperLLMRanker.rank_papers | # Context:
from typing import List, Dict
from crazy_functions.review_fns.data_sources.base_source import PaperMetadata
from crazy_functions.review_fns.query_analyzer import SearchCriteria
class PaperLLMRanker:
def __init__(self, llm_kwargs: Dict = None):
self.ranker = BGELLMRanker(llm_kwargs=llm_kwargs)
self.journal_metrics = JournalMetrics()
def _update_paper_metrics(self, papers: List[PaperMetadata]) -> None: ...
def _get_year_as_int(self, paper) -> int: ...
def _build_enhanced_query(self, query: str, criteria: SearchCriteria) -> str: ...
def _build_enhanced_document(self, paper: PaperMetadata, criteria: SearchCriteria) -> str: ...
def _select_papers_strategically(self, papers: List[PaperMetadata], search_criteria: SearchCriteria, max_papers: int) -> List[PaperMetadata]: ...
# Task:
Write a Python method `rank_papers` for the class `PaperLLMRanker` to 对论文进行重排序.
Parameters: query: str, papers: List[PaperMetadata], search_criteria: SearchCriteria, top_k: int, use_rerank: bool, pre_filter_ratio: float, max_papers: int
Returns: List[PaperMetadata] | def rank_papers(
self,
query: str,
papers: List[PaperMetadata],
search_criteria: SearchCriteria = None,
top_k: int = 40,
use_rerank: bool = False,
pre_filter_ratio: float = 0.5,
max_papers: int = 150
) -> List[PaperMetadata]:
"""对论文进行重排序"""
initial_count = len(papers) if papers else 0
stats = {'initial': initial_count}
if not papers or not query:
return []
# 更新论文的期刊指标
self._update_paper_metrics(papers)
# 构建增强查询
# enhanced_query = self._build_enhanced_query(query, search_criteria) if search_criteria else query
enhanced_query = query
# 首先过滤不满足年份要求的论文
if search_criteria and search_criteria.start_year and search_criteria.end_year:
before_year_filter = len(papers)
filtered_papers = []
start_year = int(search_criteria.start_year)
end_year = int(search_criteria.end_year)
for paper in papers:
paper_year = self._get_year_as_int(paper)
if paper_year == 0 or start_year <= paper_year <= end_year:
filtered_papers.append(paper)
papers = filtered_papers
stats['after_year_filter'] = len(papers)
if not papers: # 如果过滤后没有论文,直接返回空列表
return []
# 新增:对少量论文的快速处理
SMALL_PAPER_THRESHOLD = 10 # 定义"少量"论文的阈值
if len(papers) <= SMALL_PAPER_THRESHOLD:
# 对于少量论文,直接根据查询类型进行简单排序
if search_criteria:
if search_criteria.query_type == "latest":
papers.sort(key=lambda x: getattr(x, 'year', 0) or 0, reverse=True)
elif search_criteria.query_type == "recommend":
papers.sort(key=lambda x: getattr(x, 'citations', 0) or 0, reverse=True)
elif search_criteria.query_type == "review":
papers.sort(key=lambda x:
1 if any(keyword in (getattr(x, 'title', '') or '').lower() or
keyword in (getattr(x, 'abstract', '') or '').lower()
for keyword in ['review', 'survey', 'overview'])
else 0,
reverse=True
)
return papers[:top_k]
# 1. 优先处理最新的论文
if search_criteria and search_criteria.query_type == "latest":
papers = sorted(papers, key=lambda x: self._get_year_as_int(x), reverse=True)
# 2. 如果是综述类查询,优先处理可能的综述论文
if search_criteria and search_criteria.query_type == "review":
papers = sorted(papers, key=lambda x:
1 if any(keyword in (getattr(x, 'title', '') or '').lower() or
keyword in (getattr(x, 'abstract', '') or '').lower()
for keyword in ['review', 'survey', 'overview'])
else 0,
reverse=True
)
# 3. 如果论文数量超过限制,采用分层采样而不是完全随机
if len(papers) > max_papers:
before_max_limit = len(papers)
papers = self._select_papers_strategically(papers, search_criteria, max_papers)
stats['after_max_limit'] = len(papers)
try:
paper_texts = []
valid_papers = [] # 4. 跟踪有效论文
for paper in papers:
if paper is None:
continue
# 5. 预先过滤明显不相关的论文
if search_criteria and search_criteria.start_year:
if getattr(paper, 'year', 0) and self._get_year_as_int(paper.year) < search_criteria.start_year:
continue
doc = self._build_enhanced_document(paper, search_criteria)
paper_texts.append(doc)
valid_papers.append(paper) # 记录对应的论文
stats['after_valid_check'] = len(valid_papers)
if not paper_texts:
return []
# 使用LLM判断相关性
relevance_results = self.ranker.batch_check_relevance(
query=enhanced_query, # 使用增强的查询
paper_texts=paper_texts,
show_progress=True
)
# 6. 优化相关论文的选择策略
relevant_papers = []
for paper, is_relevant in zip(valid_papers, relevance_results):
if is_relevant:
relevant_papers.append(paper)
stats['after_llm_filter'] = len(relevant_papers)
# 打印统计信息
print(f"论文筛选统计: 初始数量={stats['initial']}, " +
f"年份过滤后={stats.get('after_year_filter', stats['initial'])}, " +
f"数量限制后={stats.get('after_max_limit', stats.get('after_year_filter', stats['initial']))}, " +
f"有效性检查后={stats['after_valid_check']}, " +
f"LLM筛选后={stats['after_llm_filter']}")
# 7. 改进回退策略
if len(relevant_papers) < min(5, len(papers)):
# 如果相关论文太少,返回按引用量排序的论文
return sorted(
papers[:top_k],
key=lambda x: getattr(x, 'citations', 0) or 0,
reverse=True
)
# 8. 对最终结果进行排序
if search_criteria:
if search_criteria.query_type == "latest":
# 最新论文优先,但同年份按IF排序
relevant_papers.sort(key=lambda x: (
self._get_year_as_int(x),
getattr(x, 'if_factor', 0) or 0
), reverse=True)
elif search_criteria.query_type == "recommend":
# IF指数优先,其次是引用量
relevant_papers.sort(key=lambda x: (
getattr(x, 'if_factor', 0) or 0,
getattr(x, 'citations', 0) or 0
), reverse=True)
else:
# 默认按IF指数排序
relevant_papers.sort(key=lambda x: getattr(x, 'if_factor', 0) or 0, reverse=True)
return relevant_papers[:top_k]
except Exception as e:
print(f"论文排序时出错: {str(e)}")
# 9. 改进错误处理的回退策略
try:
return sorted(
papers[:top_k],
key=lambda x: getattr(x, 'citations', 0) or 0,
reverse=True
)
except:
return papers[:top_k] if papers else [] | function_complex | 1 | {"cognitive_complexity": 60, "loc": 163, "code_loc": 109, "docstring_loc": 1, "function_name": "rank_papers", "class_name": "PaperLLMRanker", "qualname": "PaperLLMRanker.rank_papers", "file_path": "crazy_functions/review_fns/paper_processor/paper_llm_ranker.py", "repo_id": "binary-husky/gpt_academic", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/datasource/test_turbopuffer_datasink.py:TestSerialization.test_preserves_namespace_column_configuration | # Context:
import pickle
def mock_turbopuffer_module(monkeypatch): ...
def sink(): ...
def mock_client(): ...
def sample_table(): ...
def make_sink(**kwargs) -> TurbopufferDatasink: ...
class TestConstructorValidation: ...
class TestClientInitialization: ...
class TestArrowTablePreparation: ...
class TestSingleNamespaceBatching: ...
class TestTransformToTurbopufferFormat: ...
class TestRetryLogic: ...
class TestWriteOrchestration: ...
class TestStreamingBehavior: ...
class TestMultiNamespaceWrites: ...
class TestSerialization:
def test_preserves_configuration(self, sink, mock_turbopuffer_module): ...
def test_preserves_base_url_configuration(self, mock_turbopuffer_module): ...
# Task:
Write a Python test method `test_preserves_namespace_column_configuration` in test class `TestSerialization` to namespace_column configuration survives pickle round-trip.
Module under test: typing, packaging.version, ray.data._internal.datasource.turbopuffer_datasink | def test_preserves_namespace_column_configuration(self, mock_turbopuffer_module):
"""namespace_column configuration survives pickle round-trip."""
sink = make_sink(namespace=None, namespace_column="tenant")
pickled = pickle.dumps(sink)
unpickled = pickle.loads(pickled)
assert unpickled.namespace is None
assert unpickled.namespace_column == "tenant"
assert unpickled._client is None | test | 0 | {"function_name": "test_preserves_namespace_column_configuration", "class_name": "TestSerialization", "qualname": "TestSerialization.test_preserves_namespace_column_configuration", "file_path": "python/ray/data/tests/datasource/test_turbopuffer_datasink.py", "repo_id": "ray-project/ray", "loc": 9, "tested_modules": ["typing", "packaging.version", "ray.data._internal.datasource.turbopuffer_datasink", "ray.data._internal.utils.arrow_utils"], "has_docstring": true, "runnable_level": "file_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/test_build_component_index.py:TestBuildComponentIndexScript.test_build_script_creates_valid_structure | # Context:
from pathlib import Path
from unittest.mock import patch
import pytest
import sys
class TestBuildComponentIndexScript:
def test_build_script_minifies_json(self, tmp_path): ...
def test_build_script_sha256_integrity(self): ...
def test_build_script_handles_import_errors(self): ...
# Task:
Write a Python test method `test_build_script_creates_valid_structure` in test class `TestBuildComponentIndexScript` to test that the build script creates a valid index structure.
Module under test: pathlib | def test_build_script_creates_valid_structure(self):
"""Test that the build script creates a valid index structure."""
import importlib.util
import sys
# Get path to build script
script_path = Path(__file__).parent.parent.parent.parent / "scripts" / "build_component_index.py"
if not script_path.exists():
pytest.skip("build_component_index.py script not found")
# Load the module
spec = importlib.util.spec_from_file_location("build_component_index", script_path)
build_module = importlib.util.module_from_spec(spec)
sys.modules["build_component_index"] = build_module
with patch("asyncio.run") as mock_run:
# Mock component data
mock_run.return_value = {
"components": {
"TestCategory": {
"TestComponent": {
"display_name": "Test Component",
"description": "A test component",
"template": {"code": {"type": "code"}},
}
}
}
}
spec.loader.exec_module(build_module)
index = build_module.build_component_index()
assert index is not None
assert "version" in index
assert "entries" in index
assert "sha256" in index
assert isinstance(index["entries"], list) | test | 1 | {"function_name": "test_build_script_creates_valid_structure", "class_name": "TestBuildComponentIndexScript", "qualname": "TestBuildComponentIndexScript.test_build_script_creates_valid_structure", "file_path": "src/backend/tests/unit/test_build_component_index.py", "repo_id": "langflow-ai/langflow", "loc": 38, "tested_modules": ["pathlib"], "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/train/v2/tests/test_config.py:test_scaling_config_validation | # Context:
import pytest
from ray.train import RunConfig, ScalingConfig
def test_scaling_config_accelerator_type(): ...
def test_scaling_config_tpu_min_workers_multiple(): ...
def test_storage_filesystem_repr(): ...
def test_scaling_config_default_workers(): ...
# Task:
Write a Python test function `test_scaling_config_validation` to verify the behavior of `scaling_config_validation`.
Module under test: ray.train | def test_scaling_config_validation():
assert ScalingConfig(
num_workers=2, use_gpu=True, resources_per_worker={"CPU": 1}
).total_resources == {"CPU": 2, "GPU": 2}
with pytest.raises(ValueError, match="`use_gpu` is False but `GPU` was found in"):
ScalingConfig(num_workers=2, use_gpu=False, resources_per_worker={"GPU": 1})
with pytest.raises(ValueError, match="Cannot specify both"):
ScalingConfig(num_workers=2, use_gpu=True, use_tpu=True)
with pytest.raises(
ValueError,
match=(
"If `label_selector` is a list, it must be the same length as "
"`max_workers`"
),
):
ScalingConfig(num_workers=2, label_selector=[{"subcluster": "my_subcluster"}])
with pytest.raises(
ValueError,
match=(
"If `label_selector` is a list, it must be the same length as "
"`max_workers`"
),
):
ScalingConfig(
num_workers=(2, 3),
label_selector=[{"subcluster": "a"}, {"subcluster": "b"}],
) | test | 0 | {"function_name": "test_scaling_config_validation", "class_name": null, "qualname": "test_scaling_config_validation", "file_path": "python/ray/train/v2/tests/test_config.py", "repo_id": "ray-project/ray", "loc": 30, "tested_modules": ["ray.train"], "has_docstring": false, "runnable_level": "plib_runnable"} |
vllm-project/vllm:tests/distributed/test_weight_transfer.py:TestIPCWeightTransferUpdateInfoValidation.test_valid_update_info_from_pickled | # Context:
import base64
import pickle
import pytest
import torch
from torch.multiprocessing.reductions import reduce_tensor
from vllm.distributed.weight_transfer.ipc_engine import (
IPCWeightTransferEngine,
IPCWeightTransferInitInfo,
IPCWeightTransferUpdateInfo,
)
def create_mock_parallel_config(rank: int, world_size: int, dp_rank: int) -> ParallelConfig: ...
class TestNCCLWeightTransferUpdateInfoValidation: ...
class TestNCCLEngineParsing: ...
class TestEngineRegistry: ...
def test_nccl_receive_weights_without_init_raises(): ...
def trainer_broadcast_tensor(master_address: str, master_port: int, world_size: int, tensor_shape: list[int], tensor_dtype: str) -> bool: ...
def inference_receive_tensor(master_address: str, master_port: int, world_size: int, tensor_shape: list[int], tensor_dtype: str) -> dict: ...
def test_nccl_weight_transfer_between_processes(): ...
class TestIPCEngineParsing: ...
def get_physical_gpu_id(device_index: int) -> str: ...
class TrainerActor: ...
def inference_receive_ipc_tensor(ipc_handle_dict: dict, mode: str) -> dict: ...
def test_ipc_weight_transfer_between_processes(mode: str): ...
def test_ipc_receive_weights_missing_gpu_uuid_raises(): ...
class TestIPCWeightTransferUpdateInfoValidation:
def test_valid_update_info(self): ...
def test_mismatched_dtype_names_raises(self): ...
def test_mismatched_shapes_raises(self): ...
def test_mismatched_ipc_handles_raises(self): ...
def test_both_handles_and_pickled_raises(self): ...
def test_neither_handles_nor_pickled_raises(self): ...
def test_empty_lists_valid(self): ...
# Task:
Write a Python test method `test_valid_update_info_from_pickled` in test class `TestIPCWeightTransferUpdateInfoValidation` to test creating IPCWeightTransferUpdateInfo from pickled handles.
Module under test: torch.multiprocessing.reductions, vllm.config.parallel, vllm.config.weight_transfer | def test_valid_update_info_from_pickled(self):
"""Test creating IPCWeightTransferUpdateInfo from pickled handles."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
dummy_tensor = torch.ones(10, 10, device="cuda:0")
ipc_handle = reduce_tensor(dummy_tensor)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle}]
pickled = base64.b64encode(pickle.dumps(ipc_handles)).decode("utf-8")
info = IPCWeightTransferUpdateInfo(
names=["layer.weight"],
dtype_names=["float32"],
shapes=[[10, 10]],
ipc_handles_pickled=pickled,
)
assert info.ipc_handles == ipc_handles
assert info.ipc_handles_pickled is None | test | 1 | {"function_name": "test_valid_update_info_from_pickled", "class_name": "TestIPCWeightTransferUpdateInfoValidation", "qualname": "TestIPCWeightTransferUpdateInfoValidation.test_valid_update_info_from_pickled", "file_path": "tests/distributed/test_weight_transfer.py", "repo_id": "vllm-project/vllm", "loc": 20, "tested_modules": ["torch.multiprocessing.reductions", "vllm.config.parallel", "vllm.config.weight_transfer", "vllm.distributed.weight_transfer", "vllm.distributed.weight_transfer.ipc_engine"], "has_docstring": true, "runnable_level": "project_runnable"} |
Comfy-Org/ComfyUI:comfy_api/latest/_ui.py:ImageSaveHelper._create_animated_png_metadata | # Context:
import json
from PIL.PngImagePlugin import PngInfo
from comfy.cli_args import args
from ._io import ComfyNode, FolderType, Image, _UIOutput
class SavedResult(dict): ...
class SavedImages(_UIOutput): ...
class SavedAudios(_UIOutput): ...
def _get_directory_by_folder_type(folder_type: FolderType) -> str: ...
class AudioSaveHelper: ...
class PreviewImage(_UIOutput): ...
class PreviewMask(PreviewImage): ...
class PreviewAudio(_UIOutput): ...
class PreviewVideo(_UIOutput): ...
class PreviewUI3D(_UIOutput): ...
class PreviewText(_UIOutput): ...
class ImageSaveHelper:
def _convert_tensor_to_pil(image_tensor: torch.Tensor) -> PILImage.Image: ...
def _create_png_metadata(cls: type[ComfyNode] | None) -> PngInfo | None: ...
def _create_webp_metadata(pil_image: PILImage.Image, cls: type[ComfyNode] | None) -> PILImage.Exif: ...
def save_images(images, filename_prefix: str, folder_type: FolderType, cls: type[ComfyNode] | None, compress_level) -> list[SavedResult]: ...
def get_save_images_ui(images, filename_prefix: str, cls: type[ComfyNode] | None, compress_level) -> SavedImages: ...
def save_animated_png(images, filename_prefix: str, folder_type: FolderType, cls: type[ComfyNode] | None, fps: float, compress_level: int) -> SavedResult: ...
def get_save_animated_png_ui(images, filename_prefix: str, cls: type[ComfyNode] | None, fps: float, compress_level: int) -> SavedImages: ...
def save_animated_webp(images, filename_prefix: str, folder_type: FolderType, cls: type[ComfyNode] | None, fps: float, lossless: bool, quality: int, method: int) -> SavedResult: ...
def get_save_animated_webp_ui(images, filename_prefix: str, cls: type[ComfyNode] | None, fps: float, lossless: bool, quality: int, method: int) -> SavedImages: ...
# Task:
Write a Python method `_create_animated_png_metadata` for the class `ImageSaveHelper` to creates a PngInfo object with prompt and extra_pnginfo for animated PNGs (APNG).
Returns: PngInfo | None | def _create_animated_png_metadata(cls: type[ComfyNode] | None) -> PngInfo | None:
"""Creates a PngInfo object with prompt and extra_pnginfo for animated PNGs (APNG)."""
if args.disable_metadata or cls is None or not cls.hidden:
return None
metadata = PngInfo()
if cls.hidden.prompt:
metadata.add(
b"comf",
"prompt".encode("latin-1", "strict")
+ b"\0"
+ json.dumps(cls.hidden.prompt).encode("latin-1", "strict"),
after_idat=True,
)
if cls.hidden.extra_pnginfo:
for x in cls.hidden.extra_pnginfo:
metadata.add(
b"comf",
x.encode("latin-1", "strict")
+ b"\0"
+ json.dumps(cls.hidden.extra_pnginfo[x]).encode("latin-1", "strict"),
after_idat=True,
)
return metadata | function_complex | 1 | {"cognitive_complexity": 6, "loc": 23, "code_loc": 21, "docstring_loc": 1, "function_name": "_create_animated_png_metadata", "class_name": "ImageSaveHelper", "qualname": "ImageSaveHelper._create_animated_png_metadata", "file_path": "comfy_api/latest/_ui.py", "repo_id": "Comfy-Org/ComfyUI", "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_connection.py:check_connection | # Context:
import time
from psycopg import Connection, sql
from psycopg.rows import dict_row
from ._shared import (
TOKEN_CREDENTIAL_SCOPE,
BaseConnectionInfo,
BasicAuth,
Extension,
get_username_password,
)
class ConnectionInfo(BaseConnectionInfo): ...
def create_extensions(required_extensions: list[Extension]): ...
class AzurePGConnectionPool(ConnectionPool): ...
# Task:
Write a Python function `check_connection` to check if the connection to Azure Database for PostgreSQL is valid and required extensions are installed.
Parameters: required_extensions: list[Extension] | def check_connection(conn: Connection, /, required_extensions: list[Extension] = []):
"""Check if the connection to Azure Database for PostgreSQL is valid and required extensions are installed.
:param conn: Connection to the Azure Database for PostgreSQL.
:type conn: Connection
:param required_extensions: List of required extensions to check if they are installed.
:type required_extensions: list[Extension]
:raises RuntimeError: If the connection check fails or required extensions are not installed.
"""
with conn.cursor(row_factory=dict_row) as cursor:
_logger.debug("checking connection")
t_start = time.perf_counter()
cursor.execute("select 1")
result = cursor.fetchone()
t_elapsed = time.perf_counter() - t_start
assert result is not None, "Connection check failed: no result returned."
_logger.debug(
"connection check successful. elapsed time: %.3f ms", t_elapsed * 1000
)
for ext in required_extensions:
ext_name = ext.ext_name
ext_version = ext.ext_version
schema_name = ext.schema_name
cursor.execute(
sql.SQL(
"""
select extname as ext_name, extversion as ext_version,
n.nspname as schema_name
from pg_extension e
left join pg_namespace n on e.extnamespace = n.oid
where extname = %(ext_name)s
"""
),
{"ext_name": ext_name},
)
resultset = cursor.fetchone()
if resultset is None:
raise RuntimeError(f"Required extension '{ext_name}' is not installed.")
if ext_version is not None and resultset["ext_version"] != ext_version:
raise RuntimeError(
f"Required extension '{ext_name}' version mismatch: "
f"expected {ext_version}, got {resultset['ext_version']}."
)
if schema_name is not None and resultset["schema_name"] != schema_name:
raise RuntimeError(
f"Required extension '{ext_name}' is not installed in the expected schema: "
f"expected {schema_name}, got {resultset['schema_name']}."
)
_logger.debug(
"required extension '%s' is installed (version: %s, schema: %s)",
resultset["ext_name"],
resultset["ext_version"],
resultset["schema_name"],
) | function_complex | 1 | {"cognitive_complexity": 13, "loc": 55, "code_loc": 45, "docstring_loc": 8, "function_name": "check_connection", "class_name": null, "qualname": "check_connection", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_connection.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "project_runnable"} |
github/spec-kit:src/specify_cli/extensions.py:version_satisfies | # Context:
from packaging import version as pkg_version
from packaging.specifiers import SpecifierSet, InvalidSpecifier
class ExtensionError(Exception): ...
class ValidationError(ExtensionError): ...
class CompatibilityError(ExtensionError): ...
class ExtensionManifest: ...
class ExtensionRegistry: ...
class ExtensionManager: ...
class CommandRegistrar: ...
class ExtensionCatalog: ...
class ConfigManager: ...
class HookExecutor: ...
# Task:
Write a Python function `version_satisfies` to check if current version satisfies required version specifier.
Parameters: current: str, required: str
Returns: bool | def version_satisfies(current: str, required: str) -> bool:
"""Check if current version satisfies required version specifier.
Args:
current: Current version (e.g., "0.1.5")
required: Required version specifier (e.g., ">=0.1.0,<2.0.0")
Returns:
True if version satisfies requirement
"""
try:
current_ver = pkg_version.Version(current)
specifier = SpecifierSet(required)
return current_ver in specifier
except (pkg_version.InvalidVersion, InvalidSpecifier):
return False | function_simple | 0 | {"cognitive_complexity": 1, "loc": 16, "code_loc": 6, "docstring_loc": 9, "function_name": "version_satisfies", "class_name": null, "qualname": "version_satisfies", "file_path": "src/specify_cli/extensions.py", "repo_id": "github/spec-kit", "has_docstring": true, "runnable_level": "project_runnable"} |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_websocket_test.py:TestStarletteSessionClientClientContext:class_doc | Write a class-level docstring for `TestStarletteSessionClientClientContext` which has methods: `test_client_context_returns_starlette_context`. | Tests for client_context property on StarletteSessionClient. | documentation | 1 | {"doc_type": "class", "class_name": "TestStarletteSessionClientClientContext", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_websocket_test.py", "repo_id": "streamlit/streamlit", "char_length": 60, "methods": ["test_client_context_returns_starlette_context"]} |
ray-project/ray:python/ray/data/stats.py:DatasetSummary._extract_column_from_table | # Context:
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import pyarrow as pa
class _DtypeAggregators: ...
def _numerical_aggregators(column: str) -> List[AggregateFnV2]: ...
def _temporal_aggregators(column: str) -> List[AggregateFnV2]: ...
def _basic_aggregators(column: str) -> List[AggregateFnV2]: ...
def _default_dtype_aggregators() -> Dict[Union['DataType', 'TypeCategory'], Callable[[str], List[AggregateFnV2]]]: ...
def _get_fallback_aggregators(column: str, dtype: 'DataType') -> List[AggregateFnV2]: ...
def _get_aggregators_for_dtype(column: str, dtype: 'DataType', dtype_agg_mapping: Dict[Union['DataType', 'TypeCategory'], Callable[[str], List[AggregateFnV2]]]) -> List[AggregateFnV2]: ...
def _dtype_aggregators_for_dataset(schema: Optional['Schema'], columns: Optional[List[str]], dtype_agg_mapping: Optional[Dict[Union['DataType', 'TypeCategory'], Callable[[str], List[AggregateFnV2]]]]) -> _DtypeAggregators: ...
def _format_stats(agg: AggregateFnV2, value: Any, agg_type: pa.DataType) -> Dict[str, Tuple[Any, pa.DataType]]: ...
def _parse_summary_stats(agg_result: Dict[str, any], original_schema: pa.Schema, agg_schema: pa.Schema, aggregators: List[AggregateFnV2]) -> tuple: ...
def _create_pyarrow_array(col_data: List, col_type: Optional[pa.DataType], col_name: str) -> pa.Array: ...
def _build_summary_table(stats_dict: Dict[str, Dict[str, tuple]], all_columns: set, original_schema: pa.Schema, preserve_types: bool) -> pa.Table: ...
class DatasetSummary:
STATISTIC_COLUMN = "statistic"
def _safe_convert_table(self, table: pa.Table): ...
def _set_statistic_index(self, df: pd.DataFrame) -> pd.DataFrame: ...
def to_pandas(self): ...
def get_column_stats(self, column: str): ...
# Task:
Write a Python method `_extract_column_from_table` for the class `DatasetSummary` to extract a column from a PyArrow table if it exists.
Parameters: table: pa.Table, column: str
Returns: Optional[dict] | def _extract_column_from_table(
self, table: pa.Table, column: str
) -> Optional[dict]:
"""Extract a column from a PyArrow table if it exists.
Args:
table: PyArrow table to extract from
column: Column name to extract
Returns:
DataFrame with 'statistic' and 'value' columns, or None if column doesn't exist
"""
if column not in table.schema.names:
return None
df = self._safe_convert_table(table)[[self.STATISTIC_COLUMN, column]]
return df.rename(columns={column: "value"}) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 17, "code_loc": 4, "docstring_loc": 9, "function_name": "_extract_column_from_table", "class_name": "DatasetSummary", "qualname": "DatasetSummary._extract_column_from_table", "file_path": "python/ray/data/stats.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/config.py:_get_config_dtype_str | # Context:
import torch
def _quant_flags_to_group_shape(quant_dtype: torch.dtype | str | None, per_act_token_quant: bool, per_out_ch_quant: bool, block_shape: list[int] | None) -> tuple[GroupShape | None, GroupShape | None]: ...
class RoutingMethodType(IntEnum): ...
def get_routing_method_type(scoring_func: str, top_k: int, renormalize: bool, num_expert_group: int | None, has_e_score_bias: bool) -> RoutingMethodType: ...
class FusedMoEQuantDesc: ...
class FusedMoEQuantConfig: ...
def fp8_w8a8_moe_quant_config(w1_scale: torch.Tensor, w2_scale: torch.Tensor, a1_scale: torch.Tensor | None, a2_scale: torch.Tensor | None, w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None, per_act_token_quant: bool, per_out_ch_quant: bool, block_shape: list[int] | None, a1_gscale: torch.Tensor | None, a2_gscale: torch.Tensor | None, g1_alphas: torch.Tensor | None, g2_alphas: torch.Tensor | None) -> FusedMoEQuantConfig: ...
def int8_w8a8_moe_quant_config(w1_scale: torch.Tensor, w2_scale: torch.Tensor, a1_scale: torch.Tensor | None, a2_scale: torch.Tensor | None, w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None, per_act_token_quant: bool) -> FusedMoEQuantConfig: ...
def gptq_marlin_moe_quant_config(w1_scale: torch.Tensor, w2_scale: torch.Tensor, weight_bits: int, group_size: int, w1_zp: torch.Tensor | None, w2_zp: torch.Tensor | None, w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None): ...
def mxfp4_w4a16_moe_quant_config(w1_scale: Union[torch.Tensor, 'PrecisionConfig'], w2_scale: Union[torch.Tensor, 'PrecisionConfig'], w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None) -> FusedMoEQuantConfig: ...
def mxfp4_mxfp8_moe_quant_config(w1_scale: Union[torch.Tensor, 'PrecisionConfig'], w2_scale: Union[torch.Tensor, 'PrecisionConfig'], a1_scale: torch.Tensor | None, a2_scale: torch.Tensor | None, w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None, block_shape: list[int] | None) -> FusedMoEQuantConfig: ...
def mxfp4_w4a8_moe_quant_config(w1_scale: Union[torch.Tensor, 'PrecisionConfig'], w2_scale: Union[torch.Tensor, 'PrecisionConfig'], a1_scale: torch.Tensor | None, a2_scale: torch.Tensor | None, w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None, block_shape: list[int] | None) -> FusedMoEQuantConfig: ...
def ocp_mx_moe_quant_config(quant_dtype: str, w1_scale: Union[torch.Tensor, 'PrecisionConfig'], w2_scale: Union[torch.Tensor, 'PrecisionConfig'], weight_dtype: str | None, a1_scale: torch.Tensor | None, a2_scale: torch.Tensor | None, w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None, block_shape: list[int] | None) -> FusedMoEQuantConfig: ...
def nvfp4_moe_quant_config(g1_alphas: torch.Tensor, g2_alphas: torch.Tensor, a1_gscale: torch.Tensor, a2_gscale: torch.Tensor, w1_scale: torch.Tensor, w2_scale: torch.Tensor, w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None) -> FusedMoEQuantConfig: ...
def nvfp4_w4a16_moe_quant_config(g1_alphas: torch.Tensor, g2_alphas: torch.Tensor, w1_scale: torch.Tensor, w2_scale: torch.Tensor) -> FusedMoEQuantConfig: ...
def int4_w4a16_moe_quant_config(w1_scale: torch.Tensor, w2_scale: torch.Tensor, w1_zp: torch.Tensor | None, w2_zp: torch.Tensor | None, block_shape: list[int] | None) -> FusedMoEQuantConfig: ...
def fp8_w8a16_moe_quant_config(w1_scale: torch.Tensor, w2_scale: torch.Tensor, block_shape: list[int] | None) -> FusedMoEQuantConfig: ...
def int8_w8a16_moe_quant_config(w1_scale: torch.Tensor, w2_scale: torch.Tensor, w1_zp: torch.Tensor | None, w2_zp: torch.Tensor | None, block_shape: list[int] | None) -> FusedMoEQuantConfig: ...
def int4_w4afp8_moe_quant_config(w1_scale: torch.Tensor, w2_scale: torch.Tensor, g1_alphas: torch.Tensor, g2_alphas: torch.Tensor, per_act_token_quant: bool, per_out_ch_quant: bool, block_shape: list[int] | None) -> FusedMoEQuantConfig: ...
def awq_marlin_moe_quant_config(w1_scale: torch.Tensor, w2_scale: torch.Tensor, w1_zp: torch.Tensor | None, w2_zp: torch.Tensor | None, weight_bits: int, group_size: int, w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None) -> FusedMoEQuantConfig: ...
def biased_moe_quant_config(w1_bias: torch.Tensor | None, w2_bias: torch.Tensor | None) -> FusedMoEQuantConfig: ...
class FusedMoEParallelConfig: ...
class FusedMoEConfig: ...
# Task:
Write a Python function `_get_config_dtype_str` to return a string used to construct the filename that contains the.
Parameters: dtype: torch.dtype, use_fp8_w8a8: bool, use_fp8_w8a16: bool, use_int8_w8a16: bool, use_int4_w4a16: bool, ocp_mx_scheme: str | None
Returns: str | None | def _get_config_dtype_str(
dtype: torch.dtype,
use_fp8_w8a8: bool = False,
use_fp8_w8a16: bool = False,
use_int8_w8a16: bool = False,
use_int4_w4a16: bool = False,
ocp_mx_scheme: str | None = None,
) -> str | None:
"""
Return a string used to construct the filename that contains the
tuning info for a particular quantization scheme. See
try_get_optimal_moe_config in fused_moe.py.
"""
if use_fp8_w8a8:
return "fp8_w8a8"
elif use_fp8_w8a16:
return "fp8_w8a16"
elif use_int8_w8a16:
return "int8_w8a16"
elif use_int4_w4a16:
return "int4_w4a16"
elif ocp_mx_scheme is not None:
# The output of this function is passed to `try_get_optimal_moe_config`,
# and as we only simulate OCP MX execution in fused_moe for now,
# we will NOT look for `*,dtype=w_mxfp4_a_mxfp4.json` for now.
return None
elif dtype == torch.float:
# avoiding cases where kernel fails when float32 MoE
# use fp16/bfloat16 configs
return "float32"
return None | function_complex | 1 | {"cognitive_complexity": 6, "loc": 31, "code_loc": 13, "docstring_loc": 5, "function_name": "_get_config_dtype_str", "class_name": null, "qualname": "_get_config_dtype_str", "file_path": "vllm/model_executor/layers/fused_moe/config.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "plib_runnable"} |
RVC-Boss/GPT-SoVITS:GPT_SoVITS/eres2net/kaldi.py:_get_waveform_and_window_properties | # Context:
from typing import Tuple
from torch import Tensor
def _get_epsilon(device, dtype): ...
def _next_power_of_2(x: int) -> int: ...
def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor: ...
def _feature_window_function(window_type: str, window_size: int, blackman_coeff: float, device: torch.device, dtype: int) -> Tensor: ...
def _get_log_energy(strided_input: Tensor, epsilon: Tensor, energy_floor: float) -> Tensor: ...
def _get_window(waveform: Tensor, padded_window_size: int, window_size: int, window_shift: int, window_type: str, blackman_coeff: float, snip_edges: bool, raw_energy: bool, energy_floor: float, dither: float, remove_dc_offset: bool, preemphasis_coefficient: float) -> Tuple[Tensor, Tensor]: ...
def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor: ...
def spectrogram(waveform: Tensor, blackman_coeff: float, channel: int, dither: float, energy_floor: float, frame_length: float, frame_shift: float, min_duration: float, preemphasis_coefficient: float, raw_energy: bool, remove_dc_offset: bool, round_to_power_of_two: bool, sample_frequency: float, snip_edges: bool, subtract_mean: bool, window_type: str) -> Tensor: ...
def inverse_mel_scale_scalar(mel_freq: float) -> float: ...
def inverse_mel_scale(mel_freq: Tensor) -> Tensor: ...
def mel_scale_scalar(freq: float) -> float: ...
def mel_scale(freq: Tensor) -> Tensor: ...
def vtln_warp_freq(vtln_low_cutoff: float, vtln_high_cutoff: float, low_freq: float, high_freq: float, vtln_warp_factor: float, freq: Tensor) -> Tensor: ...
def vtln_warp_mel_freq(vtln_low_cutoff: float, vtln_high_cutoff: float, low_freq, high_freq: float, vtln_warp_factor: float, mel_freq: Tensor) -> Tensor: ...
def get_mel_banks(num_bins: int, window_length_padded: int, sample_freq: float, low_freq: float, high_freq: float, vtln_low: float, vtln_high: float, vtln_warp_factor: float, device, dtype) -> Tuple[Tensor, Tensor]: ...
def fbank(waveform: Tensor, blackman_coeff: float, channel: int, dither: float, energy_floor: float, frame_length: float, frame_shift: float, high_freq: float, htk_compat: bool, low_freq: float, min_duration: float, num_mel_bins: int, preemphasis_coefficient: float, raw_energy: bool, remove_dc_offset: bool, round_to_power_of_two: bool, sample_frequency: float, snip_edges: bool, subtract_mean: bool, use_energy: bool, use_log_fbank: bool, use_power: bool, vtln_high: float, vtln_low: float, vtln_warp: float, window_type: str) -> Tensor: ...
def _get_dct_matrix(num_ceps: int, num_mel_bins: int) -> Tensor: ...
def _get_lifter_coeffs(num_ceps: int, cepstral_lifter: float) -> Tensor: ...
def mfcc(waveform: Tensor, blackman_coeff: float, cepstral_lifter: float, channel: int, dither: float, energy_floor: float, frame_length: float, frame_shift: float, high_freq: float, htk_compat: bool, low_freq: float, num_ceps: int, min_duration: float, num_mel_bins: int, preemphasis_coefficient: float, raw_energy: bool, remove_dc_offset: bool, round_to_power_of_two: bool, sample_frequency: float, snip_edges: bool, subtract_mean: bool, use_energy: bool, vtln_high: float, vtln_low: float, vtln_warp: float, window_type: str) -> Tensor: ...
# Task:
Write a Python function `_get_waveform_and_window_properties` to gets the waveform and window properties.
Parameters: waveform: Tensor, channel: int, sample_frequency: float, frame_shift: float, frame_length: float, round_to_power_of_two: bool, preemphasis_coefficient: float
Returns: Tuple[Tensor, int, int, int] | def _get_waveform_and_window_properties(
waveform: Tensor,
channel: int,
sample_frequency: float,
frame_shift: float,
frame_length: float,
round_to_power_of_two: bool,
preemphasis_coefficient: float,
) -> Tuple[Tensor, int, int, int]:
r"""Gets the waveform and window properties"""
channel = max(channel, 0)
assert channel < waveform.size(0), "Invalid channel {} for size {}".format(channel, waveform.size(0))
waveform = waveform[channel, :] # size (n)
window_shift = int(sample_frequency * frame_shift * MILLISECONDS_TO_SECONDS)
window_size = int(sample_frequency * frame_length * MILLISECONDS_TO_SECONDS)
padded_window_size = _next_power_of_2(window_size) if round_to_power_of_two else window_size
assert 2 <= window_size <= len(waveform), "choose a window size {} that is [2, {}]".format(
window_size, len(waveform)
)
assert 0 < window_shift, "`window_shift` must be greater than 0"
assert padded_window_size % 2 == 0, (
"the padded `window_size` must be divisible by two. use `round_to_power_of_two` or change `frame_length`"
)
assert 0.0 <= preemphasis_coefficient <= 1.0, "`preemphasis_coefficient` must be between [0,1]"
assert sample_frequency > 0, "`sample_frequency` must be greater than zero"
return waveform, window_shift, window_size, padded_window_size | function_simple | 1 | {"cognitive_complexity": 1, "loc": 27, "code_loc": 16, "docstring_loc": 1, "function_name": "_get_waveform_and_window_properties", "class_name": null, "qualname": "_get_waveform_and_window_properties", "file_path": "GPT_SoVITS/eres2net/kaldi.py", "repo_id": "RVC-Boss/GPT-SoVITS", "has_docstring": true, "runnable_level": "file_runnable"} |
keras-team/keras:keras/src/layers/pooling/adaptive_pooling1d_test.py:AdaptivePooling1DLayerTest.test_average_pooling_get_config | # Context:
from keras.src import layers
class AdaptivePooling1DLayerTest(testing.TestCase):
def _run_layer_test(self, layer_class, x_np, output_size, data_format): ...
def test_average_pooling_basic_shapes(self): ...
def test_max_pooling_basic_shapes(self): ...
def test_average_pooling_channels_last(self): ...
def test_max_pooling_channels_last(self): ...
def test_average_pooling_compute_output_shape(self): ...
def test_max_pooling_compute_output_shape(self): ...
def test_max_pooling_get_config(self): ...
def test_average_pooling_numerical(self): ...
def test_max_pooling_numerical(self): ...
# Task:
Write a Python test method `test_average_pooling_get_config` in test class `AdaptivePooling1DLayerTest` to test get_config() serialization for AdaptiveAveragePooling1D.
Module under test: keras.src, keras.src, keras.src | def test_average_pooling_get_config(self):
"""Test get_config() serialization for AdaptiveAveragePooling1D."""
layer = layers.AdaptiveAveragePooling1D(
output_size=32, data_format="channels_first"
)
config = layer.get_config()
self.assertEqual(config["output_size"], (32,))
self.assertEqual(config["data_format"], "channels_first") | test | 1 | {"function_name": "test_average_pooling_get_config", "class_name": "AdaptivePooling1DLayerTest", "qualname": "AdaptivePooling1DLayerTest.test_average_pooling_get_config", "file_path": "keras/src/layers/pooling/adaptive_pooling1d_test.py", "repo_id": "keras-team/keras", "loc": 8, "tested_modules": ["keras.src", "keras.src", "keras.src"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/core/test_celeryconfig.py:TestCeleryConfigStructure.test_result_backend_contains_host | # Context:
from langflow.core import celeryconfig
class TestCeleryConfigAcceptContent: ...
class TestCeleryConfigVariables: ...
class TestCeleryConfigStructure:
def test_broker_url_contains_protocol(self): ...
def test_result_backend_contains_protocol(self): ...
def test_broker_url_contains_host(self): ...
# Task:
Write a Python test method `test_result_backend_contains_host` in test class `TestCeleryConfigStructure` to test that result_backend contains a host component.
Module under test: langflow.core | def test_result_backend_contains_host(self):
"""Test that result_backend contains a host component."""
result_backend = celeryconfig.result_backend
# Remove protocol part
if "://" in result_backend:
host_part = result_backend.split("://")[1]
assert len(host_part) > 0 | test | 1 | {"function_name": "test_result_backend_contains_host", "class_name": "TestCeleryConfigStructure", "qualname": "TestCeleryConfigStructure.test_result_backend_contains_host", "file_path": "src/backend/tests/unit/core/test_celeryconfig.py", "repo_id": "langflow-ai/langflow", "loc": 7, "tested_modules": ["langflow.core"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/expressions.py:Expr.__or__ | # Context:
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generic,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
class Operation(Enum): ...
class _ExprVisitor(ABC, Generic[T]): ...
class _PyArrowExpressionVisitor(_ExprVisitor['pyarrow.compute.Expression']): ...
class ColumnExpr(Expr): ...
class LiteralExpr(Expr): ...
class BinaryExpr(Expr): ...
class UnaryExpr(Expr): ...
class _CallableClassSpec: ...
class _CallableClassUDF: ...
class UDFExpr(Expr): ...
def _create_udf_callable(fn: Callable[..., BatchColumn], return_dtype: DataType) -> Callable[..., UDFExpr]: ...
def udf(return_dtype: DataType) -> Callable[..., UDFExpr]: ...
def _create_pyarrow_wrapper(fn: Callable[..., BatchColumn]) -> Callable[..., BatchColumn]: ...
def pyarrow_udf(return_dtype: DataType) -> Callable[..., UDFExpr]: ...
def _create_pyarrow_compute_udf(pc_func: Callable[..., pyarrow.Array], return_dtype: DataType | None) -> Callable[..., 'UDFExpr']: ...
class DownloadExpr(Expr): ...
class AliasExpr(Expr): ...
class StarExpr(Expr): ...
class MonotonicallyIncreasingIdExpr(Expr): ...
def col(name: str) -> ColumnExpr: ...
def lit(value: Any) -> LiteralExpr: ...
def star() -> StarExpr: ...
def download(uri_column_name: str, filesystem: Optional['pyarrow.fs.FileSystem']) -> DownloadExpr: ...
def monotonically_increasing_id() -> MonotonicallyIncreasingIdExpr: ...
def __getattr__(name: str): ...
class Expr(ABC):
def name(self) -> str | None: ...
def structurally_equals(self, other: Any) -> bool: ...
def to_pyarrow(self) -> 'pyarrow.compute.Expression': ...
def __repr__(self) -> str: ...
def _bin(self, other: Any, op: Operation) -> 'Expr': ...
def __add__(self, other: Any) -> 'Expr': ...
def __radd__(self, other: Any) -> 'Expr': ...
def __sub__(self, other: Any) -> 'Expr': ...
def __rsub__(self, other: Any) -> 'Expr': ...
def __mul__(self, other: Any) -> 'Expr': ...
def __rmul__(self, other: Any) -> 'Expr': ...
def __mod__(self, other: Any): ...
def __rmod__(self, other: Any): ...
def __truediv__(self, other: Any) -> 'Expr': ...
def __rtruediv__(self, other: Any) -> 'Expr': ...
def __floordiv__(self, other: Any) -> 'Expr': ...
def __rfloordiv__(self, other: Any) -> 'Expr': ...
def __gt__(self, other: Any) -> 'Expr': ...
def __lt__(self, other: Any) -> 'Expr': ...
def __ge__(self, other: Any) -> 'Expr': ...
def __le__(self, other: Any) -> 'Expr': ...
def __eq__(self, other: Any) -> 'Expr': ...
def __ne__(self, other: Any) -> 'Expr': ...
def __and__(self, other: Any) -> 'Expr': ...
def __invert__(self) -> 'Expr': ...
def is_null(self) -> 'Expr': ...
def is_not_null(self) -> 'Expr': ...
def is_in(self, values: Union[List[Any], 'Expr']) -> 'Expr': ...
def not_in(self, values: Union[List[Any], 'Expr']) -> 'Expr': ...
def alias(self, name: str) -> 'Expr': ...
def ceil(self) -> 'UDFExpr': ...
def floor(self) -> 'UDFExpr': ...
def round(self) -> 'UDFExpr': ...
def trunc(self) -> 'UDFExpr': ...
def ln(self) -> 'UDFExpr': ...
def log10(self) -> 'UDFExpr': ...
def log2(self) -> 'UDFExpr': ...
def exp(self) -> 'UDFExpr': ...
def sin(self) -> 'UDFExpr': ...
def cos(self) -> 'UDFExpr': ...
def tan(self) -> 'UDFExpr': ...
def asin(self) -> 'UDFExpr': ...
def acos(self) -> 'UDFExpr': ...
def atan(self) -> 'UDFExpr': ...
def negate(self) -> 'UDFExpr': ...
def sign(self) -> 'UDFExpr': ...
def power(self, exponent: Any) -> 'UDFExpr': ...
def abs(self) -> 'UDFExpr': ...
def cast(self, target_type: DataType, safe: bool) -> 'UDFExpr': ...
def arr(self) -> '_ArrayNamespace': ...
def list(self) -> '_ListNamespace': ...
def str(self) -> '_StringNamespace': ...
def struct(self) -> '_StructNamespace': ...
def dt(self) -> '_DatetimeNamespace': ...
def _unalias(self) -> 'Expr': ...
# Task:
Write a Python method `__or__` for the class `Expr` to logical OR operator (|).
Parameters: other: Any
Returns: 'Expr' | def __or__(self, other: Any) -> "Expr":
"""Logical OR operator (|)."""
return self._bin(other, Operation.OR) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "__or__", "class_name": "Expr", "qualname": "Expr.__or__", "file_path": "python/ray/data/expressions.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
langchain-ai/langchain:libs/langchain/langchain_classic/evaluation/embedding_distance/base.py:_EmbeddingDistanceChainMixin._chebyshev_distance | # Context:
from typing import Any
from scipy.spatial.distance import chebyshev
def _import_numpy() -> Any: ...
def _check_numpy() -> bool: ...
def _embedding_factory() -> Embeddings: ...
class EmbeddingDistance(str, Enum): ...
class EmbeddingDistanceEvalChain(_EmbeddingDistanceChainMixin, StringEvaluator): ...
class PairwiseEmbeddingDistanceEvalChain(_EmbeddingDistanceChainMixin, PairwiseStringEvaluator): ...
class _EmbeddingDistanceChainMixin(Chain):
def _validate_tiktoken_installed(cls, values: dict[str, Any]) -> dict[str, Any]: ...
model_config = ConfigDict(
def output_keys(self) -> list[str]: ...
def _prepare_output(self, result: dict) -> dict: ...
def _get_metric(self, metric: EmbeddingDistance) -> Any: ...
def _cosine_distance(a: Any, b: Any) -> Any: ...
def _euclidean_distance(a: Any, b: Any) -> Any: ...
def _manhattan_distance(a: Any, b: Any) -> Any: ...
def _hamming_distance(a: Any, b: Any) -> Any: ...
def _compute_score(self, vectors: Any) -> float: ...
# Task:
Write a Python method `_chebyshev_distance` for the class `_EmbeddingDistanceChainMixin` to compute the Chebyshev distance between two vectors.
Parameters: a: Any, b: Any
Returns: Any | def _chebyshev_distance(a: Any, b: Any) -> Any:
"""Compute the Chebyshev distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Chebyshev distance.
"""
try:
from scipy.spatial.distance import chebyshev
return chebyshev(a.flatten(), b.flatten())
except ImportError:
if _check_numpy():
np = _import_numpy()
return np.max(np.abs(a - b))
return max(abs(x - y) for x, y in zip(a, b, strict=False)) | function_simple | 1 | {"cognitive_complexity": 3, "loc": 20, "code_loc": 8, "docstring_loc": 9, "function_name": "_chebyshev_distance", "class_name": "_EmbeddingDistanceChainMixin", "qualname": "_EmbeddingDistanceChainMixin._chebyshev_distance", "file_path": "libs/langchain/langchain_classic/evaluation/embedding_distance/base.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/tune/tests/test_env_callbacks.py:test_no_env_variable | # Context:
import os
from ray.tune.constants import RAY_TUNE_CALLBACKS_ENV_VAR
from ray.tune.utils.callback import Callback, _initialize_env_callbacks
class MockCallback(Callback): ...
def test_env_callbacks_loading(mock_import, env_value, expected_callback_count): ...
def test_callback_loading_errors(mock_import, env_value, original_error_type): ...
def test_import_error_handling(): ...
# Task:
Write a Python test function `test_no_env_variable` to test that no callbacks are loaded when environment variable is not set.
Module under test: ray.tune.constants, ray.tune.utils.callback | def test_no_env_variable():
"""Test that no callbacks are loaded when environment variable is not set."""
if RAY_TUNE_CALLBACKS_ENV_VAR in os.environ:
del os.environ[RAY_TUNE_CALLBACKS_ENV_VAR]
callbacks = _initialize_env_callbacks()
assert len(callbacks) == 0 | test | 0 | {"function_name": "test_no_env_variable", "class_name": null, "qualname": "test_no_env_variable", "file_path": "python/ray/tune/tests/test_env_callbacks.py", "repo_id": "ray-project/ray", "loc": 7, "tested_modules": ["ray.tune.constants", "ray.tune.utils.callback"], "has_docstring": true, "runnable_level": "plib_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/rag/embeddings/test_backward_compatibility.py:TestLegacyConfigurationFormats.test_legacy_google_with_model_key | # Context:
from crewai.rag.embeddings.providers.google.generative_ai import GenerativeAiProvider
class TestGoogleProviderAlias: ...
class TestModelKeyBackwardCompatibility: ...
class TestTaskTypeConfiguration: ...
class TestFactoryBackwardCompatibility: ...
class TestDocumentationCodeSnippets: ...
class TestLegacyConfigurationFormats:
def test_legacy_openai_with_model_key(self): ...
def test_legacy_cohere_with_model_key(self): ...
def test_legacy_azure_with_model_key(self): ...
# Task:
Write a Python test method `test_legacy_google_with_model_key` in test class `TestLegacyConfigurationFormats` to test legacy Google config using 'model' instead of 'model_name'.
Module under test: crewai.rag.embeddings.factory, crewai.rag.embeddings.providers.openai.openai_provider, crewai.rag.embeddings.providers.cohere.cohere_provider | def test_legacy_google_with_model_key(self):
"""Test legacy Google config using 'model' instead of 'model_name'."""
provider = GenerativeAiProvider(
api_key="test-key",
model="text-embedding-005",
task_type="retrieval_document",
)
assert provider.model_name == "text-embedding-005"
assert provider.task_type == "retrieval_document" | test | 0 | {"function_name": "test_legacy_google_with_model_key", "class_name": "TestLegacyConfigurationFormats", "qualname": "TestLegacyConfigurationFormats.test_legacy_google_with_model_key", "file_path": "lib/crewai/tests/rag/embeddings/test_backward_compatibility.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["crewai.rag.embeddings.factory", "crewai.rag.embeddings.providers.openai.openai_provider", "crewai.rag.embeddings.providers.cohere.cohere_provider", "crewai.rag.embeddings.providers.google.generative_ai", "crewai.rag.embeddings.providers.google.vertex"], "has_docstring": true, "runnable_level": "project_runnable"} |
binary-husky/gpt_academic:crazy_functions/review_fns/data_sources/pubmed_source.py:PubMedSource.get_latest_papers | # Context:
from typing import List, Optional, Dict, Union
from crazy_functions.review_fns.data_sources.base_source import DataSource, PaperMetadata
async def example_usage(): ...
class PubMedSource(DataSource):
API_KEYS = [
def __init__(self, api_key: str = None):
"""初始化
Args:
api_key: PubMed API密钥,如果不提供则从预定义列表中随机选择
"""
self.api_key = api_key or random.choice(self.API_KEYS) # 随机选择一个API密钥
self._initialize()
def _initialize(self) -> None: ...
async def _make_request(self, url: str) -> Optional[str]: ...
async def search(self, query: str, limit: int, sort_by: str, start_year: int) -> List[PaperMetadata]: ...
async def _fetch_papers_batch(self, pmids: List[str]) -> List[PaperMetadata]: ...
def _parse_article(self, article: ET.Element) -> PaperMetadata: ...
async def get_paper_details(self, pmid: str) -> Optional[PaperMetadata]: ...
async def get_related_papers(self, pmid: str, limit: int) -> List[PaperMetadata]: ...
async def search_by_author(self, author: str, limit: int, start_year: int) -> List[PaperMetadata]: ...
async def search_by_journal(self, journal: str, limit: int, start_year: int) -> List[PaperMetadata]: ...
async def get_citations(self, paper_id: str) -> List[PaperMetadata]: ...
async def get_references(self, paper_id: str) -> List[PaperMetadata]: ...
# Task:
Write a Python async method `get_latest_papers` for the class `PubMedSource` to 获取最新论文.
Parameters: days: int, limit: int
Returns: List[PaperMetadata] | async def get_latest_papers(
self,
days: int = 7,
limit: int = 100
) -> List[PaperMetadata]:
"""获取最新论文
Args:
days: 最近几天的论文
limit: 返回结果数量限制
Returns:
最新论文列表
"""
query = f"last {days} days[dp]"
return await self.search(query, limit=limit, sort_by="date") | function_simple | 1 | {"cognitive_complexity": 0, "loc": 16, "code_loc": 2, "docstring_loc": 9, "function_name": "get_latest_papers", "class_name": "PubMedSource", "qualname": "PubMedSource.get_latest_papers", "file_path": "crazy_functions/review_fns/data_sources/pubmed_source.py", "repo_id": "binary-husky/gpt_academic", "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_kb_tags_meta.py:TestKbTagsMeta.test_list_tags | # Context:
import pytest
from common import (
delete_knowledge_graph,
kb_basic_info,
kb_get_meta,
kb_update_metadata_setting,
knowledge_graph,
list_tags,
list_tags_from_kbs,
rename_tags,
rm_tags,
update_chunk,
)
def _wait_for_tag(auth, kb_id, tag, timeout): ...
def _seed_tag(auth, kb_id, document_id, chunk_id): ...
class TestAuthorization: ...
class TestKbTagsMetaNegative: ...
class TestKbTagsMeta:
def test_list_tags_from_kbs(self, WebApiAuth, add_dataset): ...
def test_rm_tags(self, WebApiAuth, add_chunks): ...
def test_rename_tag(self, WebApiAuth, add_chunks): ...
def test_get_meta(self, WebApiAuth, add_dataset): ...
def test_basic_info(self, WebApiAuth, add_dataset): ...
def test_update_metadata_setting(self, WebApiAuth, add_dataset): ...
def test_knowledge_graph(self, WebApiAuth, add_dataset): ...
def test_delete_knowledge_graph(self, WebApiAuth, add_dataset): ...
# Task:
Write a Python test method `test_list_tags` in test class `TestKbTagsMeta` to verify the behavior of `list_tags`.
Module under test: common, configs, libs.auth | def test_list_tags(self, WebApiAuth, add_dataset):
kb_id = add_dataset
res = list_tags(WebApiAuth, kb_id)
assert res["code"] == 0, res
assert isinstance(res["data"], list), res | test | 1 | {"function_name": "test_list_tags", "class_name": "TestKbTagsMeta", "qualname": "TestKbTagsMeta.test_list_tags", "file_path": "test/testcases/test_web_api/test_kb_app/test_kb_tags_meta.py", "repo_id": "infiniflow/ragflow", "loc": 5, "tested_modules": ["common", "configs", "libs.auth", "utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/test_map_batches.py:test_map_batches_combine_empty_blocks | # Context:
import ray
def process_timestamp_data(row): ...
def process_timestamp_data_batch_arrow(batch: pa.Table) -> pa.Table: ...
def process_timestamp_data_batch_pandas(batch: pd.DataFrame) -> pd.DataFrame: ...
def test_map_batches_basic(ray_start_regular_shared, tmp_path, restore_data_context, target_max_block_size_infinite_or_default): ...
def test_map_batches_extra_args(shutdown_only, tmp_path, target_max_block_size_infinite_or_default): ...
def test_map_with_memory_resources(method, shutdown_only, target_max_block_size_infinite_or_default): ...
def test_map_batches_generator(ray_start_regular_shared, tmp_path, target_max_block_size_infinite_or_default): ...
def test_map_batches_actors_preserves_order(shutdown_only, target_max_block_size_infinite_or_default): ...
def test_map_batches_batch_mutation(ray_start_regular_shared, num_rows, num_blocks, batch_size, restore_data_context, target_max_block_size_infinite_or_default): ...
def test_map_batches_batch_zero_copy(ray_start_regular_shared, num_rows, num_blocks, batch_size, target_max_block_size_infinite_or_default): ...
def test_map_batches_block_bundling_auto(ray_start_regular_shared, block_size, batch_size, target_max_block_size_infinite_or_default): ...
def test_map_batches_block_bundling_skewed_manual(ray_start_regular_shared, block_sizes, batch_size, expected_num_blocks, target_max_block_size_infinite_or_default): ...
def test_map_batches_block_bundling_skewed_auto(ray_start_regular_shared, block_sizes, batch_size, target_max_block_size_infinite_or_default): ...
def test_map_batches_preserve_empty_blocks(ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_map_batches_timestamp_nanosecs(df, expected_df, ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_map_batches_async_exception_propagation(shutdown_only): ...
def test_map_batches_async_generator_fast_yield(shutdown_only, target_max_block_size_infinite_or_default): ...
def test_map_batches_struct_field_type_divergence(shutdown_only): ...
# Task:
Write a Python test function `test_map_batches_combine_empty_blocks` to verify the behavior of `map_batches_combine_empty_blocks`.
Module under test: typing, ray.data._internal.arrow_ops.transform_pyarrow, ray.data._internal.utils.arrow_utils | def test_map_batches_combine_empty_blocks(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
xs = [x % 3 for x in list(range(100))]
# ds1 has 1 block which contains 100 rows.
ds1 = ray.data.from_items(xs).repartition(1).sort("item").map_batches(lambda x: x)
assert ds1._block_num_rows() == [100]
# ds2 has 30 blocks, but only 3 of them are non-empty
ds2 = (
ray.data.from_items(xs)
.repartition(30)
.sort("item")
.map_batches(lambda x: x, batch_size=1)
)
assert len(ds2._block_num_rows()) == 3
count = sum(1 for x in ds2._block_num_rows() if x > 0)
assert count == 3
# The number of partitions should not affect the map_batches() result.
assert ds1.take_all() == ds2.take_all() | test | 0 | {"function_name": "test_map_batches_combine_empty_blocks", "class_name": null, "qualname": "test_map_batches_combine_empty_blocks", "file_path": "python/ray/data/tests/test_map_batches.py", "repo_id": "ray-project/ray", "loc": 22, "tested_modules": ["typing", "ray.data._internal.arrow_ops.transform_pyarrow", "ray.data._internal.utils.arrow_utils", "ray.data.context", "ray.data.dataset"], "has_docstring": false, "runnable_level": "project_runnable"} |
jax-ml/jax:tests/pallas/mgpu_examples_test.py:module_doc | Write a module-level docstring for the Python module `mgpu_examples_test` which contains class `TuningConfig`, function `matmul0`, function `matmul1`, function `matmul2`, function `matmul3`. | Tests for examples from Pallas:MGPU documentation. | documentation | 1 | {"doc_type": "module", "module_name": "mgpu_examples_test", "file_path": "tests/pallas/mgpu_examples_test.py", "repo_id": "jax-ml/jax", "char_length": 50} |
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py:TestTextOperationsExtract.test_extract_invalid_regex | # Context:
import pytest
from lfx.components.processing.text_operations import TextOperations
class TestTextOperationsComponent(ComponentTestBaseWithoutClient): ...
class TestTextOperationsWordCount: ...
class TestTextOperationsCaseConversion: ...
class TestTextOperationsReplace: ...
class TestTextOperationsHead: ...
class TestTextOperationsTail: ...
class TestTextOperationsStrip: ...
class TestTextOperationsJoin: ...
class TestTextOperationsClean: ...
class TestTextOperationsToDataFrame: ...
class TestTextOperationsUpdateBuildConfig: ...
class TestTextOperationsUpdateOutputs: ...
class TestTextOperationsOutputMethods: ...
class TestBugFixWordCountEmptyText: ...
class TestBugFixTextJoinEmptyFirst: ...
class TestBugFixTextStripTabs: ...
class TestBugFixDataFrameHeaderValidation: ...
class TestBugFixInputValidation: ...
class TestTextOperationsExtract:
def test_extract_numbers(self): ...
def test_extract_with_limit(self): ...
def test_extract_no_matches(self): ...
def test_extract_empty_pattern(self): ...
# Task:
Write a Python test method `test_extract_invalid_regex` in test class `TestTextOperationsExtract` to test extraction with invalid regex raises ValueError (Bug #3 fix).
Module under test: lfx.components.processing.text_operations, lfx.schema.data, lfx.schema.dataframe | def test_extract_invalid_regex(self):
"""Test extraction with invalid regex raises ValueError (Bug #3 fix)."""
component = TextOperations()
component.extract_pattern = "[invalid"
component.max_matches = 10
with pytest.raises(ValueError, match="Invalid regex pattern"):
component._text_extract("hello") | test | 1 | {"function_name": "test_extract_invalid_regex", "class_name": "TestTextOperationsExtract", "qualname": "TestTextOperationsExtract.test_extract_invalid_regex", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "repo_id": "langflow-ai/langflow", "loc": 8, "tested_modules": ["lfx.components.processing.text_operations", "lfx.schema.data", "lfx.schema.dataframe", "lfx.schema.message", "tests.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
666ghj/BettaFish:ReportEngine/renderers/pdf_layout_optimizer.py:PDFLayoutOptimizer.__init__ | # Context:
from typing import Any, Dict, List, Optional, Tuple
class KPICardLayout: ...
class CalloutLayout: ...
class TableLayout: ...
class ChartLayout: ...
class GridLayout: ...
class DataBlockLayout: ...
class PageLayout: ...
class PDFLayoutConfig: ...
class PDFLayoutOptimizer:
CHAR_WIDTH_FACTOR = {
def _create_default_config() -> PDFLayoutConfig: ...
def optimize_for_document(self, document_ir: Dict[str, Any]) -> PDFLayoutConfig: ...
def _analyze_document(self, document_ir: Dict[str, Any]) -> Dict[str, Any]: ...
def _analyze_chapter(self, chapter: Dict[str, Any], stats: Dict[str, Any]): ...
def _analyze_block(self, block: Dict[str, Any], stats: Dict[str, Any]): ...
def _extract_text_from_paragraph(self, paragraph: Dict[str, Any]) -> str: ...
def _analyze_section(self, section: Dict[str, Any], stats: Dict[str, Any]): ...
def _estimate_text_width(self, text: str, font_size: int) -> float: ...
def _check_text_overflow(self, text: str, font_size: int, max_width: int) -> bool: ...
def _calculate_safe_font_size(self, text: str, max_width: int, min_font_size: int, max_font_size: int) -> Tuple[int, bool]: ...
def _detect_kpi_overflow_issues(self, stats: Dict[str, Any]) -> List[str]: ...
def _adjust_config_based_on_stats(self, stats: Dict[str, Any]) -> PDFLayoutConfig: ...
def _log_optimization(self, stats: Dict[str, Any], config: PDFLayoutConfig): ...
def save_config(self, path: str | Path, log_entry: Optional[Dict]): ...
def load_config(cls, path: str | Path) -> PDFLayoutOptimizer: ...
def generate_pdf_css(self) -> str: ...
# Task:
Write a Python method `__init__` for the class `PDFLayoutOptimizer` to 初始化优化器.
Parameters: config: Optional[PDFLayoutConfig] | def __init__(self, config: Optional[PDFLayoutConfig] = None):
"""
初始化优化器
参数:
config: 布局配置,如果为None则使用默认配置
"""
self.config = config or self._create_default_config()
self.optimization_log = [] | function_simple | 1 | {"cognitive_complexity": 1, "loc": 9, "code_loc": 2, "docstring_loc": 6, "function_name": "__init__", "class_name": "PDFLayoutOptimizer", "qualname": "PDFLayoutOptimizer.__init__", "file_path": "ReportEngine/renderers/pdf_layout_optimizer.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "file_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/openai/test_openai.py:test_openai_get_client_params_with_env_var | # Context:
import os
from unittest.mock import patch, MagicMock
from crewai.llms.providers.openai.completion import OpenAICompletion, ResponsesAPIResult
from crewai.llms.providers.openai.completion import OpenAICompletion
def test_openai_completion_is_used_when_openai_provider(): ...
def test_openai_completion_is_used_when_no_provider_prefix(): ...
def test_openai_is_default_provider_without_explicit_llm_set_on_agent(): ...
def test_openai_completion_module_is_imported(): ...
def test_native_openai_raises_error_when_initialization_fails(): ...
def test_openai_completion_initialization_parameters(): ...
def test_openai_completion_call(): ...
def test_openai_completion_called_during_crew_execution(): ...
def test_openai_completion_call_arguments(): ...
def test_multiple_openai_calls_in_crew(): ...
def test_openai_completion_with_tools(): ...
def test_openai_completion_call_returns_usage_metrics(): ...
def test_openai_raises_error_when_model_not_supported(): ...
def test_openai_client_setup_with_extra_arguments(): ...
def test_extra_arguments_are_passed_to_openai_completion(): ...
def test_openai_get_client_params_with_api_base(): ...
def test_openai_get_client_params_with_base_url_priority(): ...
def test_openai_get_client_params_priority_order(): ...
def test_openai_get_client_params_no_base_url(monkeypatch): ...
def test_openai_streaming_with_response_model(): ...
def test_openai_response_format_with_pydantic_model(): ...
def test_openai_response_format_with_dict(): ...
def test_openai_response_format_none(): ...
def test_openai_streaming_returns_usage_metrics(): ...
def test_openai_responses_api_initialization(): ...
def test_openai_responses_api_default_is_completions(): ...
def test_openai_responses_api_prepare_params(): ...
def test_openai_responses_api_tool_format(): ...
def test_openai_completions_api_tool_format(): ...
def test_openai_responses_api_structured_output_format(): ...
def test_openai_responses_api_with_previous_response_id(): ...
def test_openai_responses_api_call_routing(): ...
def test_openai_responses_api_basic_call(): ...
def test_openai_responses_api_with_structured_output(): ...
def test_openai_responses_api_with_system_message_extraction(): ...
def test_openai_responses_api_streaming(): ...
def test_openai_responses_api_returns_usage_metrics(): ...
def test_openai_responses_api_builtin_tools_param(): ...
def test_openai_responses_api_builtin_tools_with_custom_tools(): ...
def test_openai_responses_api_with_web_search(): ...
def test_responses_api_result_dataclass(): ...
def test_responses_api_result_has_tool_outputs(): ...
def test_responses_api_result_has_reasoning(): ...
def test_openai_responses_api_parse_tool_outputs_param(): ...
def test_openai_responses_api_parse_tool_outputs_default_false(): ...
def test_openai_responses_api_with_parse_tool_outputs(): ...
def test_openai_responses_api_parse_tool_outputs_basic_call(): ...
def test_openai_responses_api_auto_chain_param(): ...
def test_openai_responses_api_auto_chain_default_false(): ...
def test_openai_responses_api_last_response_id_property(): ...
def test_openai_responses_api_reset_chain(): ...
def test_openai_responses_api_auto_chain_prepare_params(): ...
def test_openai_responses_api_explicit_previous_response_id_takes_precedence(): ...
def test_openai_responses_api_auto_chain_disabled_no_tracking(): ...
def test_openai_responses_api_auto_chain_integration(): ...
def test_openai_responses_api_auto_chain_with_reset(): ...
def test_openai_responses_api_auto_chain_reasoning_param(): ...
def test_openai_responses_api_auto_chain_reasoning_default_false(): ...
def test_openai_responses_api_last_reasoning_items_property(): ...
def test_openai_responses_api_reset_reasoning_chain(): ...
def test_openai_responses_api_auto_chain_reasoning_adds_include(): ...
def test_openai_responses_api_auto_chain_reasoning_preserves_existing_include(): ...
def test_openai_responses_api_auto_chain_reasoning_no_duplicate_include(): ...
def test_openai_responses_api_auto_chain_reasoning_prepends_to_input(): ...
def test_openai_responses_api_auto_chain_reasoning_disabled_no_include(): ...
def test_openai_responses_api_auto_chain_reasoning_disabled_no_prepend(): ...
def test_openai_responses_api_both_auto_chains_work_together(): ...
def test_openai_agent_kickoff_structured_output_without_tools(): ...
def test_openai_agent_kickoff_structured_output_with_tools(): ...
def test_openai_stop_words_not_applied_to_structured_output(): ...
def test_openai_stop_words_still_applied_to_regular_responses(): ...
def test_openai_structured_output_preserves_json_with_stop_word_patterns(): ...
def test_openai_completions_cached_prompt_tokens(): ...
def test_openai_responses_api_cached_prompt_tokens(): ...
def test_openai_streaming_cached_prompt_tokens(): ...
def test_openai_completions_cached_prompt_tokens_with_tools(): ...
def test_openai_responses_api_cached_prompt_tokens_with_tools(): ...
def test_openai_streaming_returns_tool_calls_without_available_functions(): ...
async def test_openai_async_streaming_returns_tool_calls_without_available_functions(): ...
# Task:
Write a Python test function `test_openai_get_client_params_with_env_var` to test that _get_client_params uses OPENAI_BASE_URL environment variable as fallback.
Module under test: typing, crewai.llm, crewai.llms.providers.openai.completion | def test_openai_get_client_params_with_env_var():
"""
Test that _get_client_params uses OPENAI_BASE_URL environment variable as fallback
"""
with patch.dict(os.environ, {
"OPENAI_BASE_URL": "https://env.openai.com/v1",
}):
llm = OpenAICompletion(model="gpt-4o")
client_params = llm._get_client_params()
assert client_params["base_url"] == "https://env.openai.com/v1" | test | 0 | {"function_name": "test_openai_get_client_params_with_env_var", "class_name": null, "qualname": "test_openai_get_client_params_with_env_var", "file_path": "lib/crewai/tests/llms/openai/test_openai.py", "repo_id": "crewAIInc/crewAI", "loc": 10, "tested_modules": ["typing", "crewai.llm", "crewai.llms.providers.openai.completion", "crewai.crew", "crewai.agent"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/entrypoints/openai/test_completion_with_function_calling.py:test_named_tool_use | # Context:
import json
import jsonschema
import openai # use the official client for correctness check
import pytest
def server(): ...
async def client(server): ...
async def test_function_tool_use(client: openai.AsyncOpenAI, model_name: str, stream: bool, tool_choice: str | dict, enable_thinking: bool): ...
def k2_server(): ...
async def k2_client(k2_server): ...
async def test_tool_id_kimi_k2(k2_client: openai.AsyncOpenAI, model_name: str, stream: bool, tool_choice: str): ...
async def test_no_args_tool_call(client: openai.AsyncOpenAI, model_name: str, arguments: str): ...
async def test_inconsistent_tool_choice_and_tools(client: openai.AsyncOpenAI, sample_json_schema): ...
# Task:
Write a Python test function `test_named_tool_use` to verify the behavior of `named_tool_use`.
Module under test: utils | async def test_named_tool_use(
client: openai.AsyncOpenAI,
sample_json_schema,
):
messages = [
{"role": "system", "content": "you are a helpful assistant"},
{
"role": "user",
"content": (
"Give an example JSON for an employee profile using the specified tool."
),
},
]
tools = [
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
]
tool_choice = {"type": "function", "function": {"name": "dummy_function_name"}}
# non-streaming
chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=tools,
temperature=0.0,
tool_choice=tool_choice,
)
message = chat_completion.choices[0].message
assert len(message.content) == 0
json_string = message.tool_calls[0].function.arguments
json1 = json.loads(json_string)
jsonschema.validate(instance=json1, schema=sample_json_schema)
messages.append({"role": "assistant", "content": json_string})
messages.append(
{"role": "user", "content": "Give me another one with a different name and age"}
)
# streaming
stream = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=tools,
tool_choice=tool_choice,
temperature=0.0,
stream=True,
)
output = []
finish_reason_count = 0
async for chunk in stream:
delta = chunk.choices[0].delta
if delta.role:
assert delta.role == "assistant"
assert delta.content is None or len(delta.content) == 0
if delta.tool_calls:
output.append(delta.tool_calls[0].function.arguments)
if chunk.choices[0].finish_reason is not None:
finish_reason_count += 1
# finish reason should only return in last block
assert finish_reason_count == 1
json2 = json.loads("".join(output))
jsonschema.validate(instance=json2, schema=sample_json_schema)
assert json1["name"] != json2["name"]
assert json1["age"] != json2["age"] | test | 1 | {"function_name": "test_named_tool_use", "class_name": null, "qualname": "test_named_tool_use", "file_path": "tests/entrypoints/openai/test_completion_with_function_calling.py", "repo_id": "vllm-project/vllm", "loc": 75, "tested_modules": ["utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
exo-explore/exo:src/exo/utils/info_gatherer/info_gatherer.py:_get_bridge_members | # Context:
import anyio
from loguru import logger
async def _get_thunderbolt_devices() -> set[str] | None: ...
async def _get_bridge_services() -> dict[str, str] | None: ...
async def _find_thunderbolt_bridge(bridge_services: dict[str, str], thunderbolt_devices: set[str]) -> str | None: ...
async def _is_service_enabled(service_name: str) -> bool | None: ...
class StaticNodeInformation(TaggedModel): ...
class NodeNetworkInterfaces(TaggedModel): ...
class MacThunderboltIdentifiers(TaggedModel): ...
class MacThunderboltConnections(TaggedModel): ...
class RdmaCtlStatus(TaggedModel): ...
class ThunderboltBridgeInfo(TaggedModel): ...
class NodeConfig(TaggedModel): ...
class MiscData(TaggedModel): ...
class NodeDiskUsage(TaggedModel): ...
async def _gather_iface_map() -> dict[str, str] | None: ...
class InfoGatherer: ...
# Task:
Write a Python async function `_get_bridge_members` to get member interfaces of a bridge device via ifconfig.
Parameters: bridge_device: str
Returns: set[str] | async def _get_bridge_members(bridge_device: str) -> set[str]:
"""Get member interfaces of a bridge device via ifconfig."""
result = await anyio.run_process(
["ifconfig", bridge_device],
check=False,
)
if result.returncode != 0:
logger.debug(f"ifconfig {bridge_device} failed with code {result.returncode}")
return set()
members: set[str] = set()
ifconfig_output = result.stdout.decode()
for line in ifconfig_output.splitlines():
line = line.strip()
if line.startswith("member:"):
parts = line.split()
if len(parts) > 1:
members.add(parts[1])
return members | function_complex | 0 | {"cognitive_complexity": 7, "loc": 20, "code_loc": 16, "docstring_loc": 1, "function_name": "_get_bridge_members", "class_name": null, "qualname": "_get_bridge_members", "file_path": "src/exo/utils/info_gatherer/info_gatherer.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:task-sdk/tests/task_sdk/test_crypto.py:TestRealFernet.test_rotate_reencrypt_with_primary_key | # Context:
from cryptography.fernet import Fernet
from airflow.sdk.crypto import _NullFernet, _RealFernet, get_fernet
from cryptography.fernet import MultiFernet
class TestNullFernet: ...
class TestGetFernet: ...
class TestRealFernet:
def test_encryption(self): ...
# Task:
Write a Python test method `test_rotate_reencrypt_with_primary_key` in test class `TestRealFernet` to rotate() should re-encrypt data with the primary key.
Module under test: __future__, cryptography.fernet, airflow.sdk.crypto | def test_rotate_reencrypt_with_primary_key(self):
"""rotate() should re-encrypt data with the primary key."""
from cryptography.fernet import MultiFernet
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
# encrypt with key1 only
encrypted_with_key1 = Fernet(key1).encrypt(b"rotate test")
# MultiFernet with key2 as primary, key1 as fallback
multi = MultiFernet([Fernet(key2), Fernet(key1)])
real_fernet = _RealFernet(multi)
# rotate should re-encrypt with key2
rotated = real_fernet.rotate(encrypted_with_key1)
# key2 should be able to decrypt
assert Fernet(key2).decrypt(rotated) == b"rotate test"
assert rotated != encrypted_with_key1 | test | 1 | {"function_name": "test_rotate_reencrypt_with_primary_key", "class_name": "TestRealFernet", "qualname": "TestRealFernet.test_rotate_reencrypt_with_primary_key", "file_path": "task-sdk/tests/task_sdk/test_crypto.py", "repo_id": "apache/airflow", "loc": 20, "tested_modules": ["__future__", "cryptography.fernet", "airflow.sdk.crypto", "airflow.sdk.exceptions", "tests_common.test_utils.config"], "has_docstring": true, "runnable_level": "project_runnable"} |
Comfy-Org/ComfyUI:tests-unit/comfy_api_test/input_impl_test.py:test_container_to_output_format_empty_string | # Context:
from comfy_api.input_impl.video_types import (
container_to_output_format,
get_open_write_kwargs,
)
def test_container_to_output_format_none(): ...
def test_container_to_output_format_comma_separated(): ...
def test_container_to_output_format_single(): ...
def test_get_open_write_kwargs_filepath_no_format(): ...
def test_get_open_write_kwargs_base_options_mode(): ...
def test_get_open_write_kwargs_bytesio_auto_format(): ...
def test_get_open_write_kwargs_bytesio_specific_format(): ...
def test_get_open_write_kwargs_bytesio_specific_format_list(): ...
# Task:
Write a Python test function `test_container_to_output_format_empty_string` to test that an empty string input returns None. `None` arg allows default auto-detection.
Module under test: comfy_api.input_impl.video_types, comfy_api.util | def test_container_to_output_format_empty_string():
"""Test that an empty string input returns None. `None` arg allows default auto-detection."""
assert container_to_output_format("") is None | test | 1 | {"function_name": "test_container_to_output_format_empty_string", "class_name": null, "qualname": "test_container_to_output_format_empty_string", "file_path": "tests-unit/comfy_api_test/input_impl_test.py", "repo_id": "Comfy-Org/ComfyUI", "loc": 3, "tested_modules": ["comfy_api.input_impl.video_types", "comfy_api.util"], "has_docstring": true, "runnable_level": "project_runnable"} |
google/langextract:tests/provider_schema_test.py:ProviderSchemaDiscoveryTest:class_doc | Write a class-level docstring for `ProviderSchemaDiscoveryTest` (inherits from absltest.TestCase) which has methods: `test_gemini_returns_gemini_schema`, `test_ollama_returns_format_mode_schema`, `test_openai_returns_none`. | Tests for provider schema discovery via get_schema_class(). | documentation | 1 | {"doc_type": "class", "class_name": "ProviderSchemaDiscoveryTest", "file_path": "tests/provider_schema_test.py", "repo_id": "google/langextract", "char_length": 59, "methods": ["test_gemini_returns_gemini_schema", "test_ollama_returns_format_mode_schema", "test_openai_returns_none"]} |
apache/airflow:providers/openlineage/tests/unit/openlineage/utils/test_sql_hook_lineage.py:TestGetHookConnId.test_returns_none_when_nothing_available | # Context:
from unittest import mock
from airflow.providers.openlineage.utils.sql_hook_lineage import (
_create_ol_event_pair,
_get_hook_conn_id,
_resolve_namespace,
emit_lineage_from_sql_extras,
)
def _make_extra(sql, job_id, hook, default_db): ...
class TestResolveNamespace: ...
class TestCreateOlEventPair: ...
class TestEmitLineageFromSqlExtras: ...
class TestGetHookConnId:
def test_get_conn_id_from_method(self): ...
def test_get_conn_id_from_attribute(self): ...
# Task:
Write a Python test method `test_returns_none_when_nothing_available` in test class `TestGetHookConnId` to verify the behavior of `returns_none_when_nothing_available`.
Module under test: __future__, openlineage.client.event_v2, openlineage.client.facet_v2 | def test_returns_none_when_nothing_available(self):
hook = mock.MagicMock(spec=[])
assert _get_hook_conn_id(hook) is None | test | 1 | {"function_name": "test_returns_none_when_nothing_available", "class_name": "TestGetHookConnId", "qualname": "TestGetHookConnId.test_returns_none_when_nothing_available", "file_path": "providers/openlineage/tests/unit/openlineage/utils/test_sql_hook_lineage.py", "repo_id": "apache/airflow", "loc": 3, "tested_modules": ["__future__", "openlineage.client.event_v2", "openlineage.client.facet_v2", "airflow.providers.common.sql.hooks.lineage", "airflow.providers.openlineage.extractors.base"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py:TavilyExtractorTool._arun | # Context:
import json
class TavilyExtractorToolSchema(BaseModel): ...
class TavilyExtractorTool(BaseTool):
model_config = ConfigDict(arbitrary_types_allowed=True)
def __init__(self, **kwargs: Any):
"""Initializes the TavilyExtractorTool.
Args:
**kwargs: Additional keyword arguments.
"""
super().__init__(**kwargs)
if TAVILY_AVAILABLE:
self.client = TavilyClient(api_key=self.api_key, proxies=self.proxies)
self.async_client = AsyncTavilyClient(
api_key=self.api_key, proxies=self.proxies
)
else:
try:
import subprocess
import click
except ImportError:
raise ImportError(
"The 'tavily-python' package is required. 'click' and 'subprocess' are also needed to assist with installation if the package is missing. "
"Please install 'tavily-python' manually (e.g., 'uv add tavily-python') and ensure 'click' and 'subprocess' are available."
) from None
if click.confirm(
"You are missing the 'tavily-python' package, which is required for TavilyExtractorTool. Would you like to install it?"
):
try:
subprocess.run(["uv pip", "install", "tavily-python"], check=True) # noqa: S607
raise ImportError(
"'tavily-python' has been installed. Please restart your Python application to use the TavilyExtractorTool."
)
except subprocess.CalledProcessError as e:
raise ImportError(
f"Attempted to install 'tavily-python' but failed: {e}. "
f"Please install it manually to use the TavilyExtractorTool."
) from e
else:
raise ImportError(
"The 'tavily-python' package is required to use the TavilyExtractorTool. "
"Please install it with: uv add tavily-python"
)
def _run(self, urls: list[str] | str) -> str: ...
# Task:
Write a Python async method `_arun` for the class `TavilyExtractorTool` to asynchronously extracts content from the given URL(s).
Parameters: urls: list[str] | str
Returns: str | async def _arun(
self,
urls: list[str] | str,
) -> str:
"""Asynchronously extracts content from the given URL(s).
Args:
urls: The URL(s) to extract data from.
Returns:
A JSON string containing the extracted data.
"""
if not self.async_client:
raise ValueError(
"Tavily async client is not initialized. Ensure 'tavily-python' is installed and API key is set."
)
results = await self.async_client.extract(
urls=urls,
extract_depth=self.extract_depth,
include_images=self.include_images,
timeout=self.timeout,
)
return json.dumps(results, indent=2) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 24, "code_loc": 11, "docstring_loc": 8, "function_name": "_arun", "class_name": "TavilyExtractorTool", "qualname": "TavilyExtractorTool._arun", "file_path": "lib/crewai-tools/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "class_runnable"} |
ray-project/ray:python/ray/data/_internal/datasource/databricks_credentials.py:module_doc | Write a module-level docstring for the Python module `databricks_credentials` which contains class `DatabricksCredentialProvider`, class `StaticCredentialProvider`, class `EnvironmentCredentialProvider`, function `resolve_credential_provider`, function `build_headers`. | Databricks credential providers for Ray Data.
This module provides credential abstraction for Databricks authentication,
supporting static tokens with extensibility for future credential sources. | documentation | 0 | {"doc_type": "module", "module_name": "databricks_credentials", "file_path": "python/ray/data/_internal/datasource/databricks_credentials.py", "repo_id": "ray-project/ray", "char_length": 196} |
huggingface/transformers:src/transformers/models/qwen3_vl/modular_qwen3_vl.py:Qwen3VLForConditionalGeneration.forward | # Context:
import torch
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import ProcessingKwargs, Unpack
from ..qwen2_vl.modeling_qwen2_vl import (
PatchEmbed,
Qwen2VLModel,
Qwen2VLModelOutputWithPast,
Qwen2VLPreTrainedModel,
TransformersKwargs,
VisionAttention,
VisionRotaryEmbedding,
)
class BaseModelOutputWithDeepstackFeatures(BaseModelOutputWithPooling): ...
class Qwen3VLVisionConfig(PreTrainedConfig): ...
class Qwen3VLTextConfig(PreTrainedConfig): ...
class Qwen3VLConfig(PreTrainedConfig): ...
class Qwen3VLVisionMLP(nn.Module): ...
class Qwen3VLVisionPatchEmbed(PatchEmbed): ...
class Qwen3VLVisionRotaryEmbedding(VisionRotaryEmbedding): ...
class Qwen3VLVisionPatchMerger(nn.Module): ...
class Qwen3VLVisionAttention(VisionAttention): ...
class Qwen3VLVisionBlock(Qwen2_5_VLVisionBlock): ...
class Qwen3VLTextRotaryEmbedding(LlamaRotaryEmbedding): ...
class Qwen3VLTextAttention(Qwen3Attention): ...
class Qwen3VLTextDecoderLayer(Qwen3DecoderLayer): ...
class Qwen3VLModelOutputWithPast(Qwen2VLModelOutputWithPast): ...
class Qwen3VLPreTrainedModel(Qwen2VLPreTrainedModel): ...
class Qwen3VLVisionModel(Qwen3VLPreTrainedModel): ...
class Qwen3VLTextModel(Qwen3VLPreTrainedModel, Qwen3Model): ...
class Qwen3VLModel(Qwen2VLModel): ...
class Qwen3VLCausalLMOutputWithPast(Qwen2_5_VLCausalLMOutputWithPast): ...
class Qwen3VLProcessorKwargs(ProcessingKwargs): ...
class Qwen3VLProcessor(Qwen2VLProcessor): ...
class Qwen3VLForConditionalGeneration(Qwen2_5_VLForConditionalGeneration):
_checkpoint_conversion_mapping = {}
def get_image_features(self, **super_kwargs) -> tuple | BaseModelOutputWithDeepstackFeatures: ...
def get_video_features(self, **super_kwargs) -> tuple | BaseModelOutputWithDeepstackFeatures: ...
def prepare_inputs_for_generation(self, input_ids, past_key_values, attention_mask, inputs_embeds, cache_position, position_ids, use_cache, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw, is_first_iteration, **kwargs): ...
def _expand_inputs_for_generation(self, expand_size: int, is_encoder_decoder: bool, input_ids: torch.LongTensor | None, **model_kwargs) -> tuple[torch.LongTensor, dict[str, Any]]: ...
# Task:
Write a Python method `forward` for the class `Qwen3VLForConditionalGeneration` to labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):.
Parameters: input_ids: torch.LongTensor, attention_mask: torch.Tensor | None, position_ids: torch.LongTensor | None, past_key_values: Cache | None, inputs_embeds: torch.FloatTensor | None, labels: torch.LongTensor | None, pixel_values: torch.Tensor | None, pixel_values_videos: torch.FloatTensor | None, image_grid_thw: torch.LongTensor | None, video_grid_thw: torch.LongTensor | None, mm_token_type_ids: torch.IntTensor | None, cache_position: torch.LongTensor | None, logits_to_keep: int | torch.Tensor
Returns: tuple | Qwen3VLCausalLMOutputWithPast | def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
pixel_values: torch.Tensor | None = None,
pixel_values_videos: torch.FloatTensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
video_grid_thw: torch.LongTensor | None = None,
mm_token_type_ids: torch.IntTensor | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | Qwen3VLCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
Example:
```python
>>> from transformers import AutoProcessor, Qwen3VLForConditionalGeneration
>>> model = Qwen3VLForConditionalGeneration.from_pretrained("Qwen/Qwen3-VL-8B-Instruct")
>>> processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-8B-Instruct")
>>> messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe the image."},
],
}
]
>>> inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=1024)
>>> generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
>>> output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
>>> print(output_text)
```
"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
mm_token_type_ids=mm_token_type_ids,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
return Qwen3VLCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=outputs.rope_deltas,
) | function_simple | 0 | {"cognitive_complexity": 2, "loc": 97, "code_loc": 28, "docstring_loc": 46, "function_name": "forward", "class_name": "Qwen3VLForConditionalGeneration", "qualname": "Qwen3VLForConditionalGeneration.forward", "file_path": "src/transformers/models/qwen3_vl/modular_qwen3_vl.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/aio/_connection.py:AsyncConnectionInfo:class_doc | Write a class-level docstring for `AsyncConnectionInfo` (inherits from BaseConnectionInfo) which has methods: various methods. | Base connection information for Azure Database for PostgreSQL connections.
:param host: Hostname of the Azure Database for PostgreSQL server.
:type host: str | None
:param dbname: Name of the database to connect to.
:type dbname: str
:param port: Port number for the connection.
:type port: int
:param credentials: Credentials for authentication.
:type credentials: BasicAuth | AsyncTokenCredential
:param sslmode: SSL mode for the connection.
:type sslmode: SSLMode | documentation | 1 | {"doc_type": "class", "class_name": "AsyncConnectionInfo", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/aio/_connection.py", "repo_id": "run-llama/llama_index", "char_length": 467, "methods": []} |
browser-use/browser-use:tests/ci/test_agent_planning.py:test_flash_mode_schema_excludes_plan_fields | # Context:
from browser_use.agent.views import (
AgentOutput,
PlanItem,
)
from browser_use.tools.service import Tools
def _make_agent_output(**overrides) -> AgentOutput: ...
def _make_agent(browser_session, mock_llm, **kwargs): ...
async def test_plan_generation_from_plan_update(browser_session, mock_llm): ...
async def test_plan_step_advancement(browser_session, mock_llm): ...
async def test_replanning_replaces_old_plan(browser_session, mock_llm): ...
async def test_render_plan_description(browser_session, mock_llm): ...
async def test_planning_disabled_returns_none(browser_session, mock_llm): ...
async def test_replan_nudge_injected_at_threshold(browser_session, mock_llm): ...
async def test_no_replan_nudge_below_threshold(browser_session, mock_llm): ...
async def test_full_mode_schema_includes_plan_fields_optional(): ...
async def test_out_of_bounds_plan_step_clamped(browser_session, mock_llm): ...
async def test_no_plan_render_returns_none(browser_session, mock_llm): ...
async def test_replan_nudge_disabled_when_zero(browser_session, mock_llm): ...
async def test_no_replan_nudge_without_plan(browser_session, mock_llm): ...
async def test_exploration_nudge_fires_after_limit(browser_session, mock_llm): ...
async def test_no_exploration_nudge_when_plan_exists(browser_session, mock_llm): ...
async def test_no_exploration_nudge_below_limit(browser_session, mock_llm): ...
async def test_exploration_nudge_disabled_when_zero(browser_session, mock_llm): ...
async def test_exploration_nudge_disabled_when_planning_off(browser_session, mock_llm): ...
async def test_flash_mode_disables_planning(browser_session, mock_llm): ...
# Task:
Write a Python test function `test_flash_mode_schema_excludes_plan_fields` to verify the behavior of `flash_mode_schema_excludes_plan_fields`.
Module under test: browser_use.agent.views, browser_use.tools.service, browser_use | async def test_flash_mode_schema_excludes_plan_fields():
tools = Tools()
ActionModel = tools.registry.create_action_model()
FlashOutput = AgentOutput.type_with_custom_actions_flash_mode(ActionModel)
schema = FlashOutput.model_json_schema()
assert 'current_plan_item' not in schema['properties']
assert 'plan_update' not in schema['properties']
assert 'thinking' not in schema['properties'] | test | 0 | {"function_name": "test_flash_mode_schema_excludes_plan_fields", "class_name": null, "qualname": "test_flash_mode_schema_excludes_plan_fields", "file_path": "tests/ci/test_agent_planning.py", "repo_id": "browser-use/browser-use", "loc": 9, "tested_modules": ["browser_use.agent.views", "browser_use.tools.service", "browser_use"], "has_docstring": false, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/model_executor/offloader/prefetch.py:module_doc | Write a module-level docstring for the Python module `prefetch` which contains class `ParamInfo`, class `StaticBufferPool`, class `PrefetchOffloader`, class `_ModuleOffloader`, class `_BaseParamOffloader`. | Prefetch-based CPU offloading with async prefetching.
Uses static buffers and event-based stream forking for torch.compile +
CUDA graph compatibility. Events allow the copy stream to join CUDA
graph captures, ensuring H2D copies are properly captured. | documentation | 1 | {"doc_type": "module", "module_name": "prefetch", "file_path": "vllm/model_executor/offloader/prefetch.py", "repo_id": "vllm-project/vllm", "char_length": 252} |
google/langextract:langextract/data_lib.py:enum_asdict_factory | # Context:
import dataclasses
import enum
import numbers
from typing import Any, Iterable, Mapping
def annotated_document_to_dict(adoc: data.AnnotatedDocument | None) -> dict[str, Any]: ...
def dict_to_annotated_document(adoc_dic: Mapping[str, Any]) -> data.AnnotatedDocument: ...
# Task:
Write a Python function `enum_asdict_factory` to custom dict_factory for dataclasses.asdict.
Parameters: items: Iterable[tuple[str, Any]]
Returns: dict[str, Any] | def enum_asdict_factory(items: Iterable[tuple[str, Any]]) -> dict[str, Any]:
"""Custom dict_factory for dataclasses.asdict.
Recursively converts dataclass instances, converts enum values to their
underlying values, converts integral numeric types to int, and skips any
field whose name starts with an underscore.
Args:
items: An iterable of (key, value) pairs from fields of a dataclass.
Returns:
A mapping of field names to their values, with special handling for
dataclasses, enums, and numeric types.
"""
result: dict[str, Any] = {}
for key, value in items:
# Skip internal fields.
if key.startswith("_"):
continue
if dataclasses.is_dataclass(value):
result[key] = dataclasses.asdict(value, dict_factory=enum_asdict_factory)
elif isinstance(value, enum.Enum):
result[key] = value.value
elif isinstance(value, numbers.Integral) and not isinstance(value, bool):
result[key] = int(value)
else:
result[key] = value
return result | function_complex | 1 | {"cognitive_complexity": 9, "loc": 28, "code_loc": 13, "docstring_loc": 13, "function_name": "enum_asdict_factory", "class_name": null, "qualname": "enum_asdict_factory", "file_path": "langextract/data_lib.py", "repo_id": "google/langextract", "has_docstring": true, "runnable_level": "slib_runnable"} |
apache/airflow:airflow-core/src/airflow/executors/workloads/callback.py:module_doc | Write a module-level docstring for the Python module `callback` which contains class `CallbackFetchMethod`, class `CallbackDTO`, class `ExecuteCallback`, function `execute_callback_workload`. | Callback workload schemas for executor communication. | documentation | 1 | {"doc_type": "module", "module_name": "callback", "file_path": "airflow-core/src/airflow/executors/workloads/callback.py", "repo_id": "apache/airflow", "char_length": 53} |
vllm-project/vllm:tests/v1/kv_connector/unit/test_decode_bench_connector.py:test_decode_bench_connector_partial_block | # Context:
class DecodeBenchTestRunner: ...
def test_decode_bench_connector_basic(): ...
def test_decode_bench_connector_no_refill(): ...
def test_decode_bench_connector_single_token(): ...
def test_decode_bench_connector_two_tokens(): ...
def test_decode_bench_connector_large_context(): ...
def test_decode_bench_connector_multiple_requests(): ...
def test_decode_bench_connector_concurrent_requests(): ...
# Task:
Write a Python test function `test_decode_bench_connector_partial_block` to test DecodeBenchConnector with partial block filling.
Module under test: vllm, vllm.config, vllm.distributed.kv_transfer.kv_connector.v1 | def test_decode_bench_connector_partial_block():
"""Test DecodeBenchConnector with partial block filling."""
block_size = 16
num_gpu_blocks = 100
runner = DecodeBenchTestRunner(block_size=block_size, num_gpu_blocks=num_gpu_blocks)
# Create a request that doesn't align to block boundaries
# e.g., 2.5 blocks worth of tokens
num_tokens = block_size * 2 + block_size // 2
token_ids = [1] * num_tokens
req = runner.new_request(token_ids)
# Run step
_, metadata = runner.run_single_step()
assert len(metadata.reqs_to_fill) == 1
assert req.request_id in metadata.reqs_to_fill
block_ids_per_group, num_tokens_to_fill = metadata.reqs_to_fill[req.request_id]
# Should fill all tokens except the last one
expected_fill_tokens = num_tokens - 1
assert num_tokens_to_fill == expected_fill_tokens
# For standard attention, there's only one group
assert len(block_ids_per_group) == 1
block_ids = block_ids_per_group[0]
# Should allocate 3 blocks to hold the partial data
expected_num_blocks = 3
assert len(block_ids) == expected_num_blocks | test | 1 | {"function_name": "test_decode_bench_connector_partial_block", "class_name": null, "qualname": "test_decode_bench_connector_partial_block", "file_path": "tests/v1/kv_connector/unit/test_decode_bench_connector.py", "repo_id": "vllm-project/vllm", "loc": 33, "tested_modules": ["vllm", "vllm.config", "vllm.distributed.kv_transfer.kv_connector.v1", "vllm.distributed.kv_transfer.kv_connector.v1.decode_bench_connector", "vllm.forward_context"], "has_docstring": true, "runnable_level": "file_runnable"} |
github/spec-kit:tests/test_ai_skills.py:TestNewProjectCommandSkip.test_new_project_commands_removed_after_skills_succeed | # Context:
from unittest.mock import patch
from specify_cli import (
_get_skills_dir,
install_ai_skills,
AGENT_SKILLS_DIR_OVERRIDES,
DEFAULT_SKILLS_DIR,
SKILL_DESCRIPTIONS,
AGENT_CONFIG,
app,
)
from typer.testing import CliRunner
def temp_dir(): ...
def project_dir(temp_dir): ...
def templates_dir(project_dir): ...
def commands_dir_claude(project_dir): ...
def commands_dir_gemini(project_dir): ...
class TestGetSkillsDir: ...
class TestInstallAiSkills: ...
class TestCommandCoexistence: ...
class TestSkipIfExists: ...
class TestSkillDescriptions: ...
class TestCliValidation: ...
class TestParameterOrderingIssue: ...
class TestNewProjectCommandSkip:
def _fake_extract(self, agent, project_path, **_kwargs): ...
def test_commands_preserved_when_skills_fail(self, tmp_path): ...
def test_here_mode_commands_preserved(self, tmp_path, monkeypatch): ...
# Task:
Write a Python test method `test_new_project_commands_removed_after_skills_succeed` in test class `TestNewProjectCommandSkip` to for new projects, commands should be removed when skills succeed.
Module under test: pathlib, specify_cli, typer.testing | def test_new_project_commands_removed_after_skills_succeed(self, tmp_path):
"""For new projects, commands should be removed when skills succeed."""
from typer.testing import CliRunner
runner = CliRunner()
target = tmp_path / "new-proj"
def fake_download(project_path, *args, **kwargs):
self._fake_extract("claude", project_path)
with patch("specify_cli.download_and_extract_template", side_effect=fake_download), \
patch("specify_cli.ensure_executable_scripts"), \
patch("specify_cli.ensure_constitution_from_template"), \
patch("specify_cli.install_ai_skills", return_value=True) as mock_skills, \
patch("specify_cli.is_git_repo", return_value=False), \
patch("specify_cli.shutil.which", return_value="/usr/bin/git"):
result = runner.invoke(app, ["init", str(target), "--ai", "claude", "--ai-skills", "--script", "sh", "--no-git"])
# Skills should have been called
mock_skills.assert_called_once()
# Commands dir should have been removed after skills succeeded
cmds_dir = target / ".claude" / "commands"
assert not cmds_dir.exists() | test | 0 | {"function_name": "test_new_project_commands_removed_after_skills_succeed", "class_name": "TestNewProjectCommandSkip", "qualname": "TestNewProjectCommandSkip.test_new_project_commands_removed_after_skills_succeed", "file_path": "tests/test_ai_skills.py", "repo_id": "github/spec-kit", "loc": 24, "tested_modules": ["pathlib", "specify_cli", "typer.testing", "typer.testing", "typer.testing"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/unit_test/common/test_string_utils.py:TestRemoveRedundantSpaces.test_currency_symbols | # Context:
import pytest
from common.string_utils import remove_redundant_spaces, clean_markdown_block
class TestCleanMarkdownBlock: ...
class TestRemoveRedundantSpaces:
def test_remove_spaces_before_commas(self): ...
def test_remove_spaces_before_periods(self): ...
def test_remove_spaces_before_exclamation(self): ...
def test_remove_spaces_after_opening_parenthesis(self): ...
def test_remove_spaces_before_closing_parenthesis(self): ...
def test_keep_spaces_between_words(self): ...
def test_mixed_punctuation(self): ...
def test_with_numbers(self): ...
def test_decimal_numbers(self): ...
def test_time_format(self): ...
def test_empty_string(self): ...
def test_only_spaces(self): ...
def test_no_redundant_spaces(self): ...
def test_multiple_spaces(self): ...
def test_angle_brackets(self): ...
def test_case_insensitive(self): ...
def test_semicolon_and_colon(self): ...
def test_quotation_marks(self): ...
def test_abbreviations(self): ...
def test_multiple_punctuation(self): ...
def test_email_addresses(self): ...
def test_urls(self): ...
def test_hashtags_and_mentions(self): ...
def test_nested_parentheses(self): ...
def test_math_expressions(self): ...
def test_html_tags(self): ...
def test_programming_code(self): ...
def test_unicode_and_special_symbols(self): ...
def test_mixed_chinese_english(self): ...
def test_special_characters_in_pattern(self): ...
def test_tabs_and_newlines(self): ...
# Task:
Write a Python test method `test_currency_symbols` in test class `TestRemoveRedundantSpaces` to test currency symbols.
Module under test: common.string_utils | def test_currency_symbols(self):
"""Test currency symbols"""
input_text = "Price : € 100 , £ 50 , ¥ 1000 ."
expected = "Price: €100, £50, ¥1000."
assert remove_redundant_spaces(input_text) == expected | test | 1 | {"function_name": "test_currency_symbols", "class_name": "TestRemoveRedundantSpaces", "qualname": "TestRemoveRedundantSpaces.test_currency_symbols", "file_path": "test/unit_test/common/test_string_utils.py", "repo_id": "infiniflow/ragflow", "loc": 5, "tested_modules": ["common.string_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/partners/openrouter/langchain_openrouter/chat_models.py:ChatOpenRouter.lc_secrets | Write a Python method `lc_secrets` for the class `ChatOpenRouter` to a map of constructor argument names to secret ids.
Returns: dict[str, str] | def lc_secrets(self) -> dict[str, str]:
"""A map of constructor argument names to secret ids."""
return {"openrouter_api_key": "OPENROUTER_API_KEY"} | function_simple | 1 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "lc_secrets", "class_name": "ChatOpenRouter", "qualname": "ChatOpenRouter.lc_secrets", "file_path": "libs/partners/openrouter/langchain_openrouter/chat_models.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "self_contained"} |
langflow-ai/langflow:src/backend/tests/unit/components/bundles/cometapi/test_cometapi_component.py:TestCometAPIComponent.test_build_model_integration | # Context:
import os
import pytest
from langchain_openai import ChatOpenAI
from lfx.components.cometapi.cometapi import CometAPIComponent
from pydantic.v1 import SecretStr
class TestCometAPIComponent(ComponentTestBaseWithoutClient):
def component_class(self): ...
def default_kwargs(self): ...
def file_names_mapping(self): ...
def test_basic_setup(self, component_class, default_kwargs): ...
def test_build_model_success(self, mock_chat_openai, component_class, default_kwargs): ...
def test_build_model_with_json_mode(self, mock_chat_openai, component_class, default_kwargs): ...
def test_build_model_with_streaming(self, mock_chat_openai, component_class, default_kwargs): ...
def test_build_model_invalid_model_selection(self, component_class, default_kwargs): ...
def test_build_model_exception_handling(self, mock_chat_openai, component_class, default_kwargs): ...
def test_get_models_success(self, mock_get, component_class, default_kwargs): ...
def test_get_models_json_decode_error(self, mock_get, component_class, default_kwargs): ...
def test_get_models_integration(self): ...
def test_component_inputs_structure(self, component_class): ...
def test_component_input_requirements(self, component_class): ...
def test_component_input_types(self, component_class): ...
# Task:
Write a Python test method `test_build_model_integration` in test class `TestCometAPIComponent` to integration test with real API key (if available).
Module under test: langchain_openai, lfx.components.cometapi.cometapi, pydantic.v1 | def test_build_model_integration(self):
"""Integration test with real API key (if available)."""
component = CometAPIComponent()
component.api_key = SecretStr(os.getenv("COMETAPI_KEY"))
component.model_name = "gpt-4o-mini"
component.temperature = 0.2
component.max_tokens = 100
component.seed = 42
model = component.build_model()
assert isinstance(model, ChatOpenAI)
assert model.model_name == "gpt-4o-mini"
assert model.openai_api_base == "https://api.cometapi.com/v1" | test | 1 | {"function_name": "test_build_model_integration", "class_name": "TestCometAPIComponent", "qualname": "TestCometAPIComponent.test_build_model_integration", "file_path": "src/backend/tests/unit/components/bundles/cometapi/test_cometapi_component.py", "repo_id": "langflow-ai/langflow", "loc": 13, "tested_modules": ["langchain_openai", "lfx.components.cometapi.cometapi", "pydantic.v1", "tests.base", "lfx.base.models.cometapi_constants"], "has_docstring": true, "runnable_level": "project_runnable"} |
sansan0/TrendRadar:trendradar/crawler/fetcher.py:DataFetcher.fetch_data | # Context:
import json
import random
import time
from typing import Dict, List, Tuple, Optional, Union
import requests
class DataFetcher:
DEFAULT_API_URL = "https://newsnow.busiyi.world/api/s"
DEFAULT_HEADERS = {
def __init__(
self,
proxy_url: Optional[str] = None,
api_url: Optional[str] = None,
):
"""
初始化数据获取器
Args:
proxy_url: 代理服务器 URL(可选)
api_url: API 基础 URL(可选,默认使用 DEFAULT_API_URL)
"""
self.proxy_url = proxy_url
self.api_url = api_url or self.DEFAULT_API_URL
def crawl_websites(self, ids_list: List[Union[str, Tuple[str, str]]], request_interval: int) -> Tuple[Dict, Dict, List]: ...
# Task:
Write a Python method `fetch_data` for the class `DataFetcher` to 获取指定ID数据,支持重试.
Parameters: id_info: Union[str, Tuple[str, str]], max_retries: int, min_retry_wait: int, max_retry_wait: int
Returns: Tuple[Optional[str], str, str] | def fetch_data(
self,
id_info: Union[str, Tuple[str, str]],
max_retries: int = 2,
min_retry_wait: int = 3,
max_retry_wait: int = 5,
) -> Tuple[Optional[str], str, str]:
"""
获取指定ID数据,支持重试
Args:
id_info: 平台ID 或 (平台ID, 别名) 元组
max_retries: 最大重试次数
min_retry_wait: 最小重试等待时间(秒)
max_retry_wait: 最大重试等待时间(秒)
Returns:
(响应文本, 平台ID, 别名) 元组,失败时响应文本为 None
"""
if isinstance(id_info, tuple):
id_value, alias = id_info
else:
id_value = id_info
alias = id_value
url = f"{self.api_url}?id={id_value}&latest"
proxies = None
if self.proxy_url:
proxies = {"http": self.proxy_url, "https": self.proxy_url}
retries = 0
while retries <= max_retries:
try:
response = requests.get(
url,
proxies=proxies,
headers=self.DEFAULT_HEADERS,
timeout=10,
)
response.raise_for_status()
data_text = response.text
data_json = json.loads(data_text)
status = data_json.get("status", "未知")
if status not in ["success", "cache"]:
raise ValueError(f"响应状态异常: {status}")
status_info = "最新数据" if status == "success" else "缓存数据"
print(f"获取 {id_value} 成功({status_info})")
return data_text, id_value, alias
except Exception as e:
retries += 1
if retries <= max_retries:
base_wait = random.uniform(min_retry_wait, max_retry_wait)
additional_wait = (retries - 1) * random.uniform(1, 2)
wait_time = base_wait + additional_wait
print(f"请求 {id_value} 失败: {e}. {wait_time:.2f}秒后重试...")
time.sleep(wait_time)
else:
print(f"请求 {id_value} 失败: {e}")
return None, id_value, alias
return None, id_value, alias | function_complex | 1 | {"cognitive_complexity": 15, "loc": 66, "code_loc": 39, "docstring_loc": 12, "function_name": "fetch_data", "class_name": "DataFetcher", "qualname": "DataFetcher.fetch_data", "file_path": "trendradar/crawler/fetcher.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "class_runnable"} |
exo-explore/exo:packaging/dmg/generate-background.py:draw_arrow | # Context:
import math
from PIL import Image, ImageDraw
def generate_background(output_path: str) -> None: ...
# Task:
Write a Python function `draw_arrow` to draw a hand-drawn-style curved arrow from app icon toward Applications.
Parameters: draw: ImageDraw.ImageDraw
Returns: None | def draw_arrow(draw: ImageDraw.ImageDraw) -> None:
"""Draw a hand-drawn-style curved arrow from app icon toward Applications."""
color = (30, 30, 30)
line_width = 8
# Compute bezier curve points for a gentle upward arc
points: list[tuple[float, float]] = []
steps = 80
for i in range(steps + 1):
t = i / steps
# Quadratic bezier: start → control → end
cx = (ARROW_START_X + ARROW_END_X) / 2
cy = ARROW_Y - ARROW_RISE
x = (1 - t) ** 2 * ARROW_START_X + 2 * (1 - t) * t * cx + t**2 * ARROW_END_X
y = (1 - t) ** 2 * ARROW_Y + 2 * (1 - t) * t * cy + t**2 * ARROW_Y
points.append((x, y))
# Draw the curve as connected line segments
for i in range(len(points) - 1):
draw.line([points[i], points[i + 1]], fill=color, width=line_width)
# Arrowhead at the end
end_x, end_y = points[-1]
# Direction from second-to-last to last point
prev_x, prev_y = points[-3]
angle = math.atan2(end_y - prev_y, end_x - prev_x)
head_len = 36
head_angle = math.radians(25)
left_x = end_x - head_len * math.cos(angle - head_angle)
left_y = end_y - head_len * math.sin(angle - head_angle)
right_x = end_x - head_len * math.cos(angle + head_angle)
right_y = end_y - head_len * math.sin(angle + head_angle)
draw.polygon(
[(end_x, end_y), (left_x, left_y), (right_x, right_y)],
fill=color,
) | function_simple | 0 | {"cognitive_complexity": 2, "loc": 38, "code_loc": 26, "docstring_loc": 1, "function_name": "draw_arrow", "class_name": null, "qualname": "draw_arrow", "file_path": "packaging/dmg/generate-background.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:doc/source/llm/doc_code/serve/qwen/qwen_example.py:module_doc | Write a module-level docstring for the Python module `qwen_example` which contains function `_non_blocking_serve_run`, function `_testing_build_openai_app`. | This file serves as a documentation example and CI test.
Structure:
1. Monkeypatch setup: Ensures serve.run is non-blocking and removes accelerator requirements for CI testing.
2. Docs example (between __qwen_example_start/end__): Embedded in Sphinx docs via literalinclude.
3. Test validation (deployment status polling + cleanup) | documentation | 0 | {"doc_type": "module", "module_name": "qwen_example", "file_path": "doc/source/llm/doc_code/serve/qwen/qwen_example.py", "repo_id": "ray-project/ray", "char_length": 332} |
langflow-ai/langflow:src/backend/tests/unit/api/v1/test_mcp_projects.py:test_update_project_mcp_settings_empty_settings | # Context:
from httpx import AsyncClient
def test_args_reference_urls_filters_strings_only(args, urls, expected): ...
def test_args_reference_urls_matches_non_last_string_argument(): ...
def mock_project(active_user): ...
def mock_flow(active_user, mock_project): ...
def mock_project_mcp_server(): ...
class AsyncContextManagerMock: ...
def mock_sse_transport(): ...
def mock_streamable_http_manager(): ...
def mock_current_user_ctx(active_user): ...
def mock_current_project_ctx(mock_project): ...
async def other_test_user(): ...
async def other_test_project(other_test_user): ...
def disable_mcp_composer_by_default(): ...
def enable_mcp_composer(): ...
async def test_handle_project_streamable_messages_success(client: AsyncClient, user_test_project, mock_streamable_http_manager, logged_in_headers): ...
async def test_handle_project_messages_success(client: AsyncClient, user_test_project, mock_sse_transport, logged_in_headers): ...
async def test_update_project_mcp_settings_invalid_json(client: AsyncClient, user_test_project, logged_in_headers): ...
async def test_flow_for_update(active_user, user_test_project): ...
async def test_update_project_mcp_settings_success(client: AsyncClient, user_test_project, test_flow_for_update, logged_in_headers): ...
async def test_update_project_mcp_settings_invalid_project(client: AsyncClient, logged_in_headers): ...
async def test_update_project_mcp_settings_other_user_project(client: AsyncClient, other_test_project, logged_in_headers): ...
async def test_update_project_mcp_settings_other_user_project_with_composer(client: AsyncClient, other_test_project, logged_in_headers, enable_mcp_composer): ...
async def test_user_can_only_access_own_projects(client: AsyncClient, other_test_project, logged_in_headers): ...
async def test_user_data_isolation_with_real_db(client: AsyncClient, logged_in_headers, other_test_user, other_test_project): ...
async def user_test_project(active_user): ...
async def user_test_flow(active_user, user_test_project): ...
async def test_user_can_update_own_flow_mcp_settings(client: AsyncClient, logged_in_headers, user_test_project, user_test_flow): ...
async def test_update_project_auth_settings_encryption(client: AsyncClient, user_test_project, test_flow_for_update, logged_in_headers): ...
async def test_project_sse_creation(user_test_project): ...
async def test_project_session_manager_lifespan_handles_cleanup(user_test_project, monkeypatch): ...
def _prepare_install_test_env(monkeypatch, tmp_path, filename): ...
async def test_install_mcp_config_defaults_to_sse_transport(client: AsyncClient, user_test_project, logged_in_headers, tmp_path, monkeypatch): ...
async def test_install_mcp_config_streamable_transport(client: AsyncClient, user_test_project, logged_in_headers, tmp_path, monkeypatch): ...
async def test_init_mcp_servers(user_test_project, other_test_project): ...
async def test_init_mcp_servers_error_handling(): ...
async def test_init_mcp_servers_error_handling_streamable(): ...
async def test_list_project_tools_with_mcp_enabled_filter(client: AsyncClient, user_test_project, active_user, logged_in_headers): ...
async def test_list_project_tools_response_structure(client: AsyncClient, user_test_project, logged_in_headers): ...
async def test_mcp_longterm_token_fails_without_superuser(): ...
# Task:
Write a Python test function `test_update_project_mcp_settings_empty_settings` to test updating MCP settings with empty settings list.
Module under test: contextlib, types, uuid | async def test_update_project_mcp_settings_empty_settings(client: AsyncClient, user_test_project, logged_in_headers):
"""Test updating MCP settings with empty settings list."""
# Use real database objects instead of mocks to avoid the coroutine issue
# Empty settings list
json_payload = {
"settings": [],
"auth_settings": {
"auth_type": "none",
"api_key": None,
"iam_endpoint": None,
"username": None,
"password": None,
"bearer_token": None,
},
}
# Make the request to the actual endpoint
response = await client.patch(
f"api/v1/mcp/project/{user_test_project.id}", headers=logged_in_headers, json=json_payload
)
# Verify response - the real endpoint should handle empty settings correctly
assert response.status_code == 200
assert "Updated MCP settings for 0 flows" in response.json()["message"] | test | 1 | {"function_name": "test_update_project_mcp_settings_empty_settings", "class_name": null, "qualname": "test_update_project_mcp_settings_empty_settings", "file_path": "src/backend/tests/unit/api/v1/test_mcp_projects.py", "repo_id": "langflow-ai/langflow", "loc": 25, "tested_modules": ["contextlib", "types", "uuid", "fastapi", "httpx"], "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-core/llama_index/core/memory/memory.py:Memory.aput_messages | # Context:
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
TypeVar,
Generic,
cast,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ContentBlock,
TextBlock,
AudioBlock,
ImageBlock,
VideoBlock,
DocumentBlock,
CachePoint,
CitableBlock,
CitationBlock,
ThinkingBlock,
ToolCallBlock,
)
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore, MessageStatus
class InsertMethod(Enum): ...
def generate_chat_store_key() -> str: ...
def get_default_chat_store() -> SQLAlchemyChatStore: ...
class BaseMemoryBlock(BaseModel, Generic[T]): ...
class Memory(BaseMemory):
model_config = ConfigDict(arbitrary_types_allowed=True)
def class_name(cls) -> str: ...
def validate_memory(cls, values: dict) -> dict: ...
def from_defaults(cls, session_id: Optional[str], chat_history: Optional[List[ChatMessage]], token_limit: int, memory_blocks: Optional[List[BaseMemoryBlock[Any]]], tokenizer_fn: Optional[Callable[[str], List]], chat_history_token_ratio: float, token_flush_size: int, memory_blocks_template: RichPromptTemplate, insert_method: InsertMethod, image_token_size_estimate: int, audio_token_size_estimate: int, video_token_size_estimate: int, table_name: str, async_database_uri: Optional[str], async_engine: Optional[AsyncEngine], db_schema: Optional[str]) -> 'Memory': ...
def _estimate_token_count(self, message_or_blocks: Union[str, ChatMessage, List[ChatMessage], List[ContentBlock]]) -> int: ...
async def _get_memory_blocks_content(self, chat_history: List[ChatMessage], input: Optional[Union[str, ChatMessage]], **block_kwargs) -> Dict[str, Any]: ...
async def _truncate_memory_blocks(self, content_per_memory_block: Dict[str, Any], memory_blocks_tokens: int, chat_history_tokens: int) -> Dict[str, Any]: ...
async def _format_memory_blocks(self, content_per_memory_block: Dict[str, Any]) -> Tuple[List[Tuple[str, List[ContentBlock]]], List[ChatMessage]]: ...
def _insert_memory_content(self, chat_history: List[ChatMessage], memory_content: List[ContentBlock], chat_message_data: List[ChatMessage]) -> List[ChatMessage]: ...
async def aget(self, input: Optional[Union[str, ChatMessage]], **block_kwargs) -> List[ChatMessage]: ...
async def _manage_queue(self) -> None: ...
async def aput(self, message: ChatMessage) -> None: ...
async def aset(self, messages: List[ChatMessage]) -> None: ...
async def aget_all(self, status: Optional[MessageStatus]) -> List[ChatMessage]: ...
async def areset(self, status: Optional[MessageStatus]) -> None: ...
def get(self, input: Optional[Union[str, ChatMessage]], **block_kwargs) -> List[ChatMessage]: ...
def get_all(self, status: Optional[MessageStatus]) -> List[ChatMessage]: ...
def put(self, message: ChatMessage) -> None: ...
def put_messages(self, messages: List[ChatMessage]) -> None: ...
def set(self, messages: List[ChatMessage]) -> None: ...
def reset(self) -> None: ...
# Task:
Write a Python async method `aput_messages` for the class `Memory` to add a list of messages to the chat store and process waterfall logic if needed.
Parameters: messages: List[ChatMessage]
Returns: None | async def aput_messages(self, messages: List[ChatMessage]) -> None:
"""Add a list of messages to the chat store and process waterfall logic if needed."""
# Add the messages to the chat store
await self.sql_store.add_messages(
self.session_id, messages, status=MessageStatus.ACTIVE
)
# Ensure the active queue is managed
await self._manage_queue() | function_simple | 1 | {"cognitive_complexity": 0, "loc": 9, "code_loc": 4, "docstring_loc": 1, "function_name": "aput_messages", "class_name": "Memory", "qualname": "Memory.aput_messages", "file_path": "llama-index-core/llama_index/core/memory/memory.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/mrkl/base.py:ChainConfig:class_doc | Write a class-level docstring for `ChainConfig` (inherits from NamedTuple) which has methods: various methods. | Configuration for a chain to use in MRKL system.
Args:
action_name: Name of the action.
action: Action function to call.
action_description: Description of the action. | documentation | 1 | {"doc_type": "class", "class_name": "ChainConfig", "file_path": "libs/langchain/langchain_classic/agents/mrkl/base.py", "repo_id": "langchain-ai/langchain", "char_length": 180, "methods": []} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.