sample_id string | instruction string | completion string | category string | label int64 | metadata string |
|---|---|---|---|---|---|
vllm-project/vllm:tests/entrypoints/test_responses_utils.py:TestShouldContinueFinalMessage.test_dict_without_status_returns_false | # Context:
from vllm.entrypoints.openai.responses.utils import (
_construct_single_message_from_response_item,
_maybe_combine_reasoning_and_tool_call,
construct_chat_messages_with_tool_call,
convert_tool_responses_to_completions_format,
should_continue_final_message,
)
class TestResponsesUtils: ...... | def test_dict_without_status_returns_false(self):
"""Dict without status field should not be continued."""
dict_item = {
"id": "msg_123",
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "Some text"}],
}
a... | test | 1 | {"function_name": "test_dict_without_status_returns_false", "class_name": "TestShouldContinueFinalMessage", "qualname": "TestShouldContinueFinalMessage.test_dict_without_status_returns_false", "file_path": "tests/entrypoints/test_responses_utils.py", "repo_id": "vllm-project/vllm", "loc": 9, "tested_modules": ["openai.... |
ray-project/ray:release/train_tests/pytorch_lightning/test_lightning.py:test_lightning_train_run | # Context:
import os
from ray.train.torch import TorchTrainer
class ImageClassifier(pl.LightningModule): ...
def train_func(): ...
# Task:
Write a Python test function `test_lightning_train_run` to verify the behavior of `lightning_train_run`.
Module under test: torch.utils.data, torchvision.models, torchvision.data... | def test_lightning_train_run():
# [2] Configure scaling and resource requirements.
scaling_config = ray.train.ScalingConfig(num_workers=4, use_gpu=True)
# [3] Launch distributed training job.
trainer = TorchTrainer(
train_func,
scaling_config=scaling_config,
# [3a] If running in... | test | 0 | {"function_name": "test_lightning_train_run", "class_name": null, "qualname": "test_lightning_train_run", "file_path": "release/train_tests/pytorch_lightning/test_lightning.py", "repo_id": "ray-project/ray", "loc": 25, "tested_modules": ["torch.utils.data", "torchvision.models", "torchvision.datasets", "torchvision.tra... |
apache/airflow:providers/teradata/src/airflow/providers/teradata/utils/tpt_util.py:decrypt_remote_file | # Context:
import logging
from paramiko import SSHClient
class TPTConfig: ...
def execute_remote_command(ssh_client: SSHClient, command: str) -> tuple[int, str, str]: ...
def write_file(path: str, content: str) -> None: ...
def secure_delete(file_path: str, logger: logging.Logger | None) -> None: ...
def remote_secure... | def decrypt_remote_file(
ssh_client: SSHClient,
remote_enc_file: str,
remote_dec_file: str,
password: str,
logger: logging.Logger | None = None,
) -> int:
"""
Decrypt a remote file using OpenSSL.
:param ssh_client: SSH client connection
:param remote_enc_file: Path to the encrypted ... | function_complex | 1 | {"cognitive_complexity": 6, "loc": 48, "code_loc": 22, "docstring_loc": 11, "function_name": "decrypt_remote_file", "class_name": null, "qualname": "decrypt_remote_file", "file_path": "providers/teradata/src/airflow/providers/teradata/utils/tpt_util.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_lev... |
ray-project/ray:doc/source/serve/tutorials/video-analysis/deployments/decoder.py:MultiDecoder._load_embeddings | # Context:
import io
import aioboto3
import numpy as np
from utils.s3 import get_s3_region
class MultiDecoder:
async def __init__(self, bucket: str, s3_prefix: str = S3_EMBEDDINGS_PREFIX):
"""Initialize decoder with text embeddings from S3."""
self.bucket = bucket
self.ema_alpha = EMA_ALPHA... | async def _load_embeddings(self):
"""Load precomputed text embeddings from S3."""
session = aioboto3.Session(region_name=get_s3_region(self.bucket))
async with session.client("s3") as s3:
# Load tag embeddings
tag_key = f"{self.s3_prefix}tag_embeddings.npz"
... | function_simple | 0 | {"cognitive_complexity": 0, "loc": 20, "code_loc": 14, "docstring_loc": 1, "function_name": "_load_embeddings", "class_name": "MultiDecoder", "qualname": "MultiDecoder._load_embeddings", "file_path": "doc/source/serve/tutorials/video-analysis/deployments/decoder.py", "repo_id": "ray-project/ray", "has_docstring": true,... |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_agent_utils.py:TestSplitMessagesIntoChunks.test_single_chunk_when_under_limit | # Context:
from typing import Any, Literal, Optional
from crewai.utilities.agent_utils import (
_asummarize_chunks,
_estimate_token_count,
_extract_summary_tags,
_format_messages_for_summary,
_split_messages_into_chunks,
convert_tools_to_openai_schema,
parse_tool_call_args,
summarize_mes... | def test_single_chunk_when_under_limit(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi"},
]
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
assert len(chunks) == 1
... | test | 0 | {"function_name": "test_single_chunk_when_under_limit", "class_name": "TestSplitMessagesIntoChunks", "qualname": "TestSplitMessagesIntoChunks.test_single_chunk_when_under_limit", "file_path": "lib/crewai/tests/utilities/test_agent_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 8, "tested_modules": ["__future__", "typ... |
sansan0/TrendRadar:trendradar/core/scheduler.py:Scheduler._in_range | Write a Python method `_in_range` for the class `Scheduler` to 检查时间是否在范围内(支持跨日).
Parameters: now_hhmm: str, start: str, end: str
Returns: bool | def _in_range(now_hhmm: str, start: str, end: str) -> bool:
"""
检查时间是否在范围内(支持跨日)
Args:
now_hhmm: 当前时间 HH:MM
start: 开始时间 HH:MM
end: 结束时间 HH:MM
Returns:
是否在范围内
"""
if start <= end:
# 正常范围,如 08:00-09:00
... | function_simple | 1 | {"cognitive_complexity": 3, "loc": 18, "code_loc": 4, "docstring_loc": 11, "function_name": "_in_range", "class_name": "Scheduler", "qualname": "Scheduler._in_range", "file_path": "trendradar/core/scheduler.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "self_contained"} |
mem0ai/mem0:mem0/vector_stores/neptune_analytics.py:NeptuneAnalyticsVector.list | # Context:
from typing import Dict, List, Optional
class OutputData(BaseModel): ...
class NeptuneAnalyticsVector(VectorStoreBase):
_COLLECTION_PREFIX = "MEM0_VECTOR_"
_FIELD_N = 'n'
_FIELD_ID = '~id'
_FIELD_PROP = '~properties'
_FIELD_SCORE = 'score'
_FIELD_LABEL = 'label'
_TIMEZONE = "UT... | def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]:
"""
List all vectors in the collection with optional filtering.
Retrieves vectors from the collection, optionally filtered by metadata properties.
Args:
filters (Optional[D... | function_simple | 1 | {"cognitive_complexity": 2, "loc": 30, "code_loc": 14, "docstring_loc": 12, "function_name": "list", "class_name": "NeptuneAnalyticsVector", "qualname": "NeptuneAnalyticsVector.list", "file_path": "mem0/vector_stores/neptune_analytics.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "runnable_level": "file_runnabl... |
fastapi/fastapi:tests/test_sse.py:test_post_method_sse | # Context:
from fastapi.testclient import TestClient
class Item(BaseModel): ...
async def sse_items() -> AsyncIterable[Item]: ...
def sse_items_sync() -> Iterable[Item]: ...
async def sse_items_no_annotation(): ...
def sse_items_sync_no_annotation(): ...
async def sse_items_dict(): ...
async def sse_items_event(): ...... | def test_post_method_sse(client: TestClient):
"""SSE should work with POST (needed for MCP compatibility)."""
response = client.post("/items/stream-post")
assert response.status_code == 200
assert response.headers["content-type"] == "text/event-stream; charset=utf-8"
data_lines = [
line for ... | test | 1 | {"function_name": "test_post_method_sse", "class_name": null, "qualname": "test_post_method_sse", "file_path": "tests/test_sse.py", "repo_id": "fastapi/fastapi", "loc": 9, "tested_modules": ["collections.abc", "fastapi", "fastapi.responses", "fastapi.sse", "fastapi.testclient"], "has_docstring": true, "runnable_level":... |
langflow-ai/langflow:src/backend/tests/integration/test_openai_responses_extended.py:test_openai_responses_long_input | # Context:
import pytest
from httpx import AsyncClient
def load_env_vars(): ...
async def create_global_variable(client: AsyncClient, headers, name, value, variable_type): ...
async def load_and_prepare_flow(client: AsyncClient, created_api_key): ...
async def load_and_prepare_agent_flow(client: AsyncClient, created_a... | async def test_openai_responses_long_input(client: AsyncClient, created_api_key):
"""Test the OpenAI responses endpoint with very long input."""
flow, headers = await load_and_prepare_flow(client, created_api_key)
# Create a very long input
long_input = "Hello " * 1000 # ~6000 characters
payload =... | test | 1 | {"function_name": "test_openai_responses_long_input", "class_name": null, "qualname": "test_openai_responses_long_input", "file_path": "src/backend/tests/integration/test_openai_responses_extended.py", "repo_id": "langflow-ai/langflow", "loc": 17, "tested_modules": ["dotenv", "httpx", "lfx.log.logger", "tests.api_keys"... |
langflow-ai/langflow:src/lfx/src/lfx/services/mcp_composer/service.py:MCPComposerService._wait_for_process_exit | # Context:
import asyncio
class MCPComposerError(Exception): ...
class MCPComposerPortError(MCPComposerError): ...
class MCPComposerConfigError(MCPComposerError): ...
class MCPComposerDisabledError(MCPComposerError): ...
class MCPComposerStartupError(MCPComposerError): ...
def require_composer_enabled(func: Callable) ... | async def _wait_for_process_exit(self, process):
"""Wait for a process to exit."""
await asyncio.to_thread(process.wait) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "_wait_for_process_exit", "class_name": "MCPComposerService", "qualname": "MCPComposerService._wait_for_process_exit", "file_path": "src/lfx/src/lfx/services/mcp_composer/service.py", "repo_id": "langflow-ai/langflow", "has_docstr... |
browser-use/browser-use:browser_use/dom/serializer/paint_order.py:RectUnionPure._split_diff | # Context:
class Rect: ...
class PaintOrderRemover: ...
class RectUnionPure:
__slots__ = ('_rects',)
def __init__(self):
self._rects: list[Rect] = []
def contains(self, r: Rect) -> bool: ...
def add(self, r: Rect) -> bool: ...
# Task:
Write a Python method `_split_diff` for the class `RectUnionPure` t... | def _split_diff(self, a: Rect, b: Rect) -> list[Rect]:
r"""
Return list of up to 4 rectangles = a \ b.
Assumes a intersects b.
"""
parts = []
# Bottom slice
if a.y1 < b.y1:
parts.append(Rect(a.x1, a.y1, a.x2, b.y1))
# Top slice
if b.y2 < a.y2:
parts.append(Rect(a.x1, b.y2, a.x2, a.y2))
# Mid... | function_simple | 0 | {"cognitive_complexity": 4, "loc": 26, "code_loc": 12, "docstring_loc": 4, "function_name": "_split_diff", "class_name": "RectUnionPure", "qualname": "RectUnionPure._split_diff", "file_path": "browser_use/dom/serializer/paint_order.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "fil... |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestStripUnsupportedFormats.test_keeps_date | # Context:
from copy import deepcopy
from crewai.utilities.pydantic_schema_utils import (
build_rich_field_description,
convert_oneof_to_anyof,
create_model_from_schema,
ensure_all_properties_required,
ensure_type_in_schemas,
force_additional_properties_false,
resolve_refs,
strip_null_fr... | def test_keeps_date(self) -> None:
schema = {"type": "string", "format": "date"}
result = strip_unsupported_formats(deepcopy(schema))
assert result["format"] == "date" | test | 0 | {"function_name": "test_keeps_date", "class_name": "TestStripUnsupportedFormats", "qualname": "TestStripUnsupportedFormats.test_keeps_date", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["__future__", "copy", "typing", "pydantic", "c... |
huggingface/transformers:src/transformers/models/ministral3/convert_ministral3_weights_to_hf.py:convert_and_write_model | # Context:
import os
import torch
from safetensors.torch import load_file
from transformers import (
GenerationConfig,
Ministral3Config,
Ministral3ForCausalLM,
Mistral3Config,
Mistral3ForConditionalGeneration,
PixtralImageProcessorFast,
PixtralProcessor,
PixtralVisionConfig,
)
from trans... | def convert_and_write_model(input_dir: str, output_dir: str, max_position_embeddings: int):
"""Convert the model and save it (this implicitly save the config as well)."""
params = read_json(os.path.join(input_dir, "params.json"))
is_vision = params.get("vision_encoder") is not None
config = convert_con... | function_complex | 0 | {"cognitive_complexity": 11, "loc": 38, "code_loc": 27, "docstring_loc": 1, "function_name": "convert_and_write_model", "class_name": null, "qualname": "convert_and_write_model", "file_path": "src/transformers/models/ministral3/convert_ministral3_weights_to_hf.py", "repo_id": "huggingface/transformers", "has_docstring"... |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_static_routes_test.py:TestReservedPaths.test_reserved_path_returns_404 | # Context:
from starlette.testclient import TestClient
def static_app(tmp_path: Path) -> Iterator[TestClient]: ...
class TestStreamlitStaticFiles: ...
class TestWithBaseUrl: ...
class TestDoubleSlashProtection: ...
class TestTrailingSlashRedirect: ...
class TestCacheHeadersOnRedirects: ...
class TestReservedPaths:
... | def test_reserved_path_returns_404(self, static_app: TestClient) -> None:
"""Test that reserved paths return 404 instead of SPA fallback."""
response = static_app.get("/_stcore/health")
assert response.status_code == 404 | test | 1 | {"function_name": "test_reserved_path_returns_404", "class_name": "TestReservedPaths", "qualname": "TestReservedPaths.test_reserved_path_returns_404", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_static_routes_test.py", "repo_id": "streamlit/streamlit", "loc": 5, "tested_modules": ["__future__", "ty... |
ray-project/ray:python/ray/data/tests/unit/test_average_calculator.py:test_calcuate_time_window_average | # Context:
from ray.data._internal.average_calculator import TimeWindowAverageCalculator
def current_time(): ...
# Task:
Write a Python test function `test_calcuate_time_window_average` to test TimeWindowAverageCalculator.
Module under test: ray.data._internal.average_calculator | def test_calcuate_time_window_average(current_time):
"""Test TimeWindowAverageCalculator."""
window_s = 10
values_to_report = [i + 1 for i in range(20)]
calculator = TimeWindowAverageCalculator(window_s)
assert calculator.get_average() is None
for value in values_to_report:
# Report va... | test | 0 | {"function_name": "test_calcuate_time_window_average", "class_name": null, "qualname": "test_calcuate_time_window_average", "file_path": "python/ray/data/tests/unit/test_average_calculator.py", "repo_id": "ray-project/ray", "loc": 30, "tested_modules": ["ray.data._internal.average_calculator"], "has_docstring": true, "... |
browser-use/browser-use:tests/ci/test_structured_extraction.py:TestExtractStructured.test_structured_extraction_returns_json | # Context:
import asyncio
import json
import tempfile
from browser_use.agent.views import ActionResult
from browser_use.filesystem.file_system import FileSystem
from browser_use.tools.service import Tools
class TestSchemaDictToPydanticModel: ...
class TestExtractionResult: ...
def _make_extraction_llm(structured_respo... | async def test_structured_extraction_returns_json(self, browser_session, base_url):
"""When output_schema is provided, extract returns structured JSON in <structured_result> tags."""
tools = Tools()
await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session)
await asyncio.sl... | test | 0 | {"function_name": "test_structured_extraction_returns_json", "class_name": "TestExtractStructured", "qualname": "TestExtractStructured.test_structured_extraction_returns_json", "file_path": "tests/ci/test_structured_extraction.py", "repo_id": "browser-use/browser-use", "loc": 54, "tested_modules": ["pydantic", "browser... |
apache/airflow:airflow-core/tests/unit/models/test_log.py:TestLogTaskInstanceReproduction.test_log_task_instance_join_correctness | # Context:
from sqlalchemy import select
from sqlalchemy.orm import joinedload
from airflow.models.log import Log
from airflow.operators.empty import EmptyOperator
from airflow.utils.state import TaskInstanceState
class TestLogTaskInstanceReproduction:
# Task:
Write a Python test method `test_log_task_instance_join_c... | def test_log_task_instance_join_correctness(self, dag_maker, session):
# Create dag_1 with a task
with dag_maker("dag_1", session=session):
EmptyOperator(task_id="common_task_id")
dr1 = dag_maker.create_dagrun()
ti1 = dr1.get_task_instance("common_task_id")
ti1.state... | test | 1 | {"function_name": "test_log_task_instance_join_correctness", "class_name": "TestLogTaskInstanceReproduction", "qualname": "TestLogTaskInstanceReproduction.test_log_task_instance_join_correctness", "file_path": "airflow-core/tests/unit/models/test_log.py", "repo_id": "apache/airflow", "loc": 53, "tested_modules": ["__fu... |
crewAIInc/crewAI:lib/crewai/src/crewai/memory/encoding_flow.py:EncodingFlow.batch_embed | # Context:
from crewai.flow.flow import Flow, listen, start
from crewai.memory.types import MemoryConfig, MemoryRecord, embed_texts
class ItemState(BaseModel): ...
class EncodingState(BaseModel): ...
class EncodingFlow(Flow[EncodingState]):
initial_state = EncodingState
def __init__(
self,
sto... | def batch_embed(self) -> None:
"""Embed all items in a single embedder call."""
items = list(self.state.items)
texts = [item.content for item in items]
embeddings = embed_texts(self._embedder, texts)
for item, emb in zip(items, embeddings, strict=False):
item.embeddin... | function_simple | 0 | {"cognitive_complexity": 1, "loc": 7, "code_loc": 5, "docstring_loc": 1, "function_name": "batch_embed", "class_name": "EncodingFlow", "qualname": "EncodingFlow.batch_embed", "file_path": "lib/crewai/src/crewai/memory/encoding_flow.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_ru... |
huggingface/transformers:src/transformers/models/ovis2/image_processing_ovis2.py:get_all_supported_aspect_ratios | # Context:
from functools import lru_cache
class Ovis2ImageProcessorKwargs(ImagesKwargs): ...
def get_optimal_tiled_canvas(original_image_size: tuple[int, int], target_tile_size: tuple[int, int], min_image_tiles: int, max_image_tiles: int) -> tuple[int, int]: ...
def compute_patch_covering_area(left: int, upper: int, ... | def get_all_supported_aspect_ratios(min_image_tiles: int, max_image_tiles: int) -> list[tuple[int, int]]:
"""
Computes all allowed aspect ratios for a given minimum and maximum number of input tiles.
This function calculates all possible arrangements of tiles that can be formed
within the constraint of... | function_complex | 0 | {"cognitive_complexity": 7, "loc": 32, "code_loc": 7, "docstring_loc": 22, "function_name": "get_all_supported_aspect_ratios", "class_name": null, "qualname": "get_all_supported_aspect_ratios", "file_path": "src/transformers/models/ovis2/image_processing_ovis2.py", "repo_id": "huggingface/transformers", "has_docstring"... |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_code_interpreter_tool.py:test_unsafe_mode_running_unsafe_code | # Context:
from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import (
CodeInterpreterTool,
SandboxPython,
)
def printer_mock(): ...
def docker_unavailable_mock(): ...
def test_run_code_in_docker(docker_mock, printer_mock): ...
def test_run_code_in_docker_with_error(docker_mock, printer_mock):... | def test_unsafe_mode_running_unsafe_code(printer_mock, docker_unavailable_mock):
"""Test behavior when no result variable is set."""
tool = CodeInterpreterTool(unsafe_mode=True)
code = """
import os
os.system("ls -la")
result = eval("5/1")
"""
result = tool.run(code=code, libraries_used=[])
printer_... | test | 0 | {"function_name": "test_unsafe_mode_running_unsafe_code", "class_name": null, "qualname": "test_unsafe_mode_running_unsafe_code", "file_path": "lib/crewai-tools/tests/tools/test_code_interpreter_tool.py", "repo_id": "crewAIInc/crewAI", "loc": 13, "tested_modules": ["crewai_tools.tools.code_interpreter_tool.code_interpr... |
apache/airflow:providers/standard/tests/unit/standard/operators/test_hitl.py:TestHITLOperator.test_validate_params_input_with_invalid_input | # Context:
import pytest
from typing import TYPE_CHECKING, Any
from airflow.providers.common.compat.sdk import AirflowException, DownstreamTasksSkipped, ParamValidationError
from airflow.providers.standard.operators.hitl import (
ApprovalOperator,
HITLBranchOperator,
HITLEntryOperator,
HITLOperator,
)
f... | def test_validate_params_input_with_invalid_input(
self,
params: ParamsDict,
params_input: dict[str, Any],
exc: type[ValueError | ParamValidationError],
error_msg: str,
) -> None:
hitl_op = HITLOperator(
task_id="hitl_test",
subject="This is su... | test | 1 | {"function_name": "test_validate_params_input_with_invalid_input", "class_name": "TestHITLOperator", "qualname": "TestHITLOperator.test_validate_params_input_with_invalid_input", "file_path": "providers/standard/tests/unit/standard/operators/test_hitl.py", "repo_id": "apache/airflow", "loc": 24, "tested_modules": ["__f... |
apache/airflow:airflow-core/tests/unit/api_fastapi/execution_api/versions/v2025_09_23/test_asset_events.py:test_asset_events | # Context:
from datetime import datetime
import pytest
from airflow._shared.timezones import timezone
from airflow.models.asset import AssetActive, AssetAliasModel, AssetEvent, AssetModel
def ver_client(client): ...
def test_asset(session): ...
def test_asset_alias(session, test_asset_events, test_asset): ...
class Te... | def test_asset_events(session):
def make_timestamp(day):
return datetime(2021, 1, day, tzinfo=timezone.utc)
common = {
"asset_id": 1,
"extra": {"foo": "bar"},
"source_dag_id": "foo",
"source_task_id": "bar",
"source_run_id": "custom",
"source_map_index": ... | test | 1 | {"function_name": "test_asset_events", "class_name": null, "qualname": "test_asset_events", "file_path": "airflow-core/tests/unit/api_fastapi/execution_api/versions/v2025_09_23/test_asset_events.py", "repo_id": "apache/airflow", "loc": 22, "tested_modules": ["__future__", "datetime", "airflow._shared.timezones", "airfl... |
streamlit/streamlit:lib/streamlit/components/v2/manifest_scanner.py:ComponentConfig.from_dict | # Context:
from typing import Any, Final
def _normalize_package_name(dist_name: str) -> str: ...
class ComponentManifest: ...
def _is_likely_streamlit_component_package(dist: importlib.metadata.Distribution) -> bool: ...
def _find_package_pyproject_toml(dist: importlib.metadata.Distribution) -> Path | None: ...
def _p... | def from_dict(config: dict[str, Any]) -> ComponentConfig:
"""Create a ComponentConfig from a raw dict.
Parameters
----------
config
Raw component dictionary parsed from TOML.
Returns
-------
ComponentConfig
Parsed and validated component ... | function_simple | 1 | {"cognitive_complexity": 4, "loc": 27, "code_loc": 10, "docstring_loc": 12, "function_name": "from_dict", "class_name": "ComponentConfig", "qualname": "ComponentConfig.from_dict", "file_path": "lib/streamlit/components/v2/manifest_scanner.py", "repo_id": "streamlit/streamlit", "has_docstring": true, "runnable_level": "... |
langflow-ai/langflow:src/lfx/src/lfx/interface/initialize/loading.py:instantiate_class | # Context:
from typing import TYPE_CHECKING, Any
from lfx.custom.eval import eval_custom_component_code
from lfx.log.logger import logger
from lfx.custom.custom_component.component import Component
from lfx.custom.custom_component.custom_component import CustomComponent
from lfx.graph.vertex.base import Vertex
async d... | def instantiate_class(
vertex: Vertex,
user_id=None,
event_manager: EventManager | None = None,
) -> Any:
"""Instantiate class from module type and key, and params."""
vertex_type = vertex.vertex_type
base_type = vertex.base_type
logger.debug(f"Instantiating {vertex_type} of type {base_type}... | function_simple | 1 | {"cognitive_complexity": 2, "loc": 27, "code_loc": 19, "docstring_loc": 1, "function_name": "instantiate_class", "class_name": null, "qualname": "instantiate_class", "file_path": "src/lfx/src/lfx/interface/initialize/loading.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "project_runna... |
paperless-ngx/paperless-ngx:src/paperless_remote/tests/test_parser.py:TestParser.test_get_text_with_azure_error_logged_and_returns_none | # Context:
import uuid
from unittest import mock
from django.test import override_settings
from paperless_remote.signals import get_parser
class TestParser(DirectoriesMixin, FileSystemAssertsMixin, TestCase):
SAMPLE_FILES = Path(__file__).resolve().parent / "samples"
def assertContainsStrings(self, content: st... | def test_get_text_with_azure_error_logged_and_returns_none(
self,
mock_client_cls,
) -> None:
mock_client = mock.Mock()
mock_client.begin_analyze_document.side_effect = RuntimeError("fail")
mock_client_cls.return_value = mock_client
with override_settings(
... | test | 1 | {"function_name": "test_get_text_with_azure_error_logged_and_returns_none", "class_name": "TestParser", "qualname": "TestParser.test_get_text_with_azure_error_logged_and_returns_none", "file_path": "src/paperless_remote/tests/test_parser.py", "repo_id": "paperless-ngx/paperless-ngx", "loc": 28, "tested_modules": ["path... |
run-llama/llama_index:llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py:TestAgentCoreMemoryContext.test_context_creation | # Context:
from llama_index.memory.bedrock_agentcore.base import (
AgentCoreMemory,
AgentCoreMemoryContext,
)
def mock_client(): ...
def memory_context(): ...
def memory(mock_client, memory_context): ...
class TestBaseAgentCoreMemoryMethods: ...
class TestAgentCoreMemory: ...
class TestIntegration: ...
class T... | def test_context_creation(self):
"""Test creating a memory context."""
context = AgentCoreMemoryContext(
actor_id="test-actor",
memory_id="test-memory",
session_id="test-session",
)
assert context.actor_id == "test-actor"
assert context.memory_... | test | 1 | {"function_name": "test_context_creation", "class_name": "TestAgentCoreMemoryContext", "qualname": "TestAgentCoreMemoryContext.test_context_creation", "file_path": "llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py", "repo_id": "run-llama/llama_index", "loc": 12, "teste... |
deepfakes/faceswap:tests/plugins/train/trainer/test_distributed.py:test_WrappedModel | # Context:
import numpy as np
import pytest
import torch
from plugins.train.trainer import distributed as mod_distributed
def _trainer_mocked(mocker: pytest_mock.MockFixture): ...
def test_Trainer(gpu_count, batch_size, _trainer_mocked): ...
def test_Trainer_forward(gpu_count, batch_size, outputs, _trainer_mocked, moc... | def test_WrappedModel(batch_size, outputs, mocker):
""" Test that the wrapped model calls preds and loss """
model = mocker.MagicMock()
instance = mod_distributed.WrappedModel(model)
assert instance._keras_model is model
loss_return = [torch.from_numpy((np.random.random((1, )))) for _ in range(outp... | test | 1 | {"function_name": "test_WrappedModel", "class_name": null, "qualname": "test_WrappedModel", "file_path": "tests/plugins/train/trainer/test_distributed.py", "repo_id": "deepfakes/faceswap", "loc": 48, "tested_modules": ["plugins.train.trainer", "plugins.train.trainer", "plugins.train.trainer"], "has_docstring": true, "r... |
sansan0/TrendRadar:trendradar/storage/local.py:module_doc | Write a module-level docstring for the Python module `local` which contains class `LocalStorageBackend`. | 本地存储后端 - SQLite + TXT/HTML
使用 SQLite 作为主存储,支持可选的 TXT 快照和 HTML 报告 | documentation | 1 | {"doc_type": "module", "module_name": "local", "file_path": "trendradar/storage/local.py", "repo_id": "sansan0/TrendRadar", "char_length": 65} |
hiyouga/LlamaFactory:src/llamafactory/v1/accelerator/interface.py:DistributedStrategy.data_mesh_shape | # Context:
class Dim(StrEnum): ...
class DistributedInterface: ...
class DistributedStrategy:
def __post_init__(self) -> None: ...
def model_mesh_shape(self) -> tuple[int, int]: ...
def model_mesh_dim_names(self) -> tuple[str, str]: ...
def data_mesh_dim_names(self) -> tuple[str, str]: ...
# Task:
Wr... | def data_mesh_shape(self) -> tuple[int, int]:
"""Data parallel mesh shape."""
return (self.dp_size, self.cp_size) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "data_mesh_shape", "class_name": "DistributedStrategy", "qualname": "DistributedStrategy.data_mesh_shape", "file_path": "src/llamafactory/v1/accelerator/interface.py", "repo_id": "hiyouga/LlamaFactory", "has_docstring": true, "run... |
crewAIInc/crewAI:lib/crewai/tests/test_flow_visualization.py:test_build_flow_structure_with_router | # Context:
from crewai.flow.visualization import (
build_flow_structure,
visualize_flow_structure,
)
class SimpleFlow(Flow): ...
class RouterFlow(Flow): ...
class ComplexFlow(Flow): ...
def test_build_flow_structure_simple(): ...
def test_build_flow_structure_with_and_or_conditions(): ...
def test_visualize_fl... | def test_build_flow_structure_with_router():
"""Test building structure for a flow with router."""
flow = RouterFlow()
structure = build_flow_structure(flow)
assert structure is not None
assert len(structure["nodes"]) == 4
assert len(structure["router_methods"]) == 1
assert "decide" in str... | test | 0 | {"function_name": "test_build_flow_structure_with_router", "class_name": null, "qualname": "test_build_flow_structure_with_router", "file_path": "lib/crewai/tests/test_flow_visualization.py", "repo_id": "crewAIInc/crewAI", "loc": 20, "tested_modules": ["pathlib", "crewai.flow.flow", "crewai.flow.visualization", "typing... |
run-llama/llama_index:llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/retriever.py:LanceDBRetriever.aretrieve | # Context:
import os
from PIL import Image
from llama_index.core.llms import ImageBlock
from llama_index.core.schema import ImageDocument
from llama_index.core.schema import QueryBundle, NodeWithScore
from typing import Union, Optional, List, Any
class ExtendedQueryBundle(QueryBundle): ...
class LanceDBRetriever(Base... | async def aretrieve(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> List[NodeWithScore]:
"""
Asynchronously retrie... | function_complex | 1 | {"cognitive_complexity": 11, "loc": 48, "code_loc": 23, "docstring_loc": 17, "function_name": "aretrieve", "class_name": "LanceDBRetriever", "qualname": "LanceDBRetriever.aretrieve", "file_path": "llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/retriever.py", "re... |
ray-project/ray:doc/source/ray-overview/examples/e2e-rag/notebooks/rag_utils.py:ChromaQuerier._reformat | Write a Python method `_reformat` for the class `ChromaQuerier` to reformat Chroma DB results into a flat list of dictionaries.
Parameters: chroma_results: dict
Returns: list | def _reformat(self, chroma_results: dict) -> list:
"""
Reformat Chroma DB results into a flat list of dictionaries.
"""
reformatted = []
metadatas = chroma_results.get("metadatas", [])
documents = chroma_results.get("documents", [])
distances = chroma_results.get(... | function_simple | 0 | {"cognitive_complexity": 3, "loc": 28, "code_loc": 22, "docstring_loc": 3, "function_name": "_reformat", "class_name": "ChromaQuerier", "qualname": "ChromaQuerier._reformat", "file_path": "doc/source/ray-overview/examples/e2e-rag/notebooks/rag_utils.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_le... |
ray-project/ray:python/ray/train/v2/tests/test_validation_manager.py:test_checkpoint_validation_management_slow_validation_fn | # Context:
import time
from unittest.mock import create_autospec
import pytest
import ray
from ray.train.v2._internal.execution.checkpoint import validation_manager
from ray.train.v2._internal.execution.checkpoint.checkpoint_manager import (
CheckpointManager,
)
from ray.train.v2._internal.execution.storage import ... | def test_checkpoint_validation_management_slow_validation_fn(tmp_path):
checkpoint_manager = create_autospec(CheckpointManager, instance=True)
def infinite_waiting_validation_fn(checkpoint):
while True:
time.sleep(1)
vm = validation_manager.ValidationManager(
checkpoint_manager... | test | 0 | {"function_name": "test_checkpoint_validation_management_slow_validation_fn", "class_name": null, "qualname": "test_checkpoint_validation_management_slow_validation_fn", "file_path": "python/ray/train/v2/tests/test_validation_manager.py", "repo_id": "ray-project/ray", "loc": 44, "tested_modules": ["ray.train._checkpoin... |
apache/airflow:providers/fab/tests/unit/fab/auth_manager/cli_commands/test_permissions_command.py:TestDagPermissions.test_cleanup_dag_permissions_removes_specific_dag_resources | # Context:
from sqlalchemy import select
from airflow.providers.fab.auth_manager.cli_commands.permissions_command import (
cleanup_dag_permissions,
)
from airflow.providers.fab.auth_manager.models import Action, Permission, Resource
from airflow.providers.fab.www.security.permissions import RESOURCE... | def test_cleanup_dag_permissions_removes_specific_dag_resources(self):
"""Test that cleanup_dag_permissions removes only the specified DAG resources."""
from sqlalchemy import select
from airflow.providers.fab.auth_manager.cli_commands.permissions_command import (
cleanup_dag_permis... | test | 1 | {"function_name": "test_cleanup_dag_permissions_removes_specific_dag_resources", "class_name": "TestDagPermissions", "qualname": "TestDagPermissions.test_cleanup_dag_permissions_removes_specific_dag_resources", "file_path": "providers/fab/tests/unit/fab/auth_manager/cli_commands/test_permissions_command.py", "repo_id":... |
vllm-project/vllm:tests/test_envs.py:TestEnvWithChoices.test_invalid_value_raises_error_case_sensitive | # Context:
import os
from unittest.mock import patch
import pytest
from vllm.envs import (
disable_envs_cache,
enable_envs_cache,
env_list_with_choices,
env_set_with_choices,
env_with_choices,
environment_variables,
)
def test_getattr_without_cache(monkeypatch: pytest.MonkeyPatch): ...
def test... | def test_invalid_value_raises_error_case_sensitive(self):
"""Test that invalid value raises ValueError in case sensitive mode."""
with patch.dict(os.environ, {"TEST_ENV": "invalid"}):
env_func = env_with_choices(
"TEST_ENV", "default", ["option1", "option2"], case_sensitive=T... | test | 1 | {"function_name": "test_invalid_value_raises_error_case_sensitive", "class_name": "TestEnvWithChoices", "qualname": "TestEnvWithChoices.test_invalid_value_raises_error_case_sensitive", "file_path": "tests/test_envs.py", "repo_id": "vllm-project/vllm", "loc": 10, "tested_modules": ["vllm.envs"], "has_docstring": true, "... |
huggingface/diffusers:src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py:Kandinsky5T2VPipeline.check_inputs | # Context:
def basic_clean(text): ...
def whitespace_clean(text): ...
def prompt_clean(text): ...
class Kandinsky5T2VPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin):
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
_callback_tensor_inputs = [
def __init__(
self,
... | def check_inputs(
self,
prompt,
negative_prompt,
height,
width,
prompt_embeds_qwen=None,
prompt_embeds_clip=None,
negative_prompt_embeds_qwen=None,
negative_prompt_embeds_clip=None,
prompt_cu_seqlens=None,
negative_prompt_cu_seqlens... | function_complex | 1 | {"cognitive_complexity": 22, "loc": 85, "code_loc": 40, "docstring_loc": 19, "function_name": "check_inputs", "class_name": "Kandinsky5T2VPipeline", "qualname": "Kandinsky5T2VPipeline.check_inputs", "file_path": "src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py", "repo_id": "huggingface/diffusers", "has_docstri... |
vllm-project/vllm:examples/online_serving/openai_responses_client_with_mcp_tools.py:module_doc | Write a module-level docstring for the Python module `openai_responses_client_with_mcp_tools` which contains function `example_no_filter`, function `example_wildcard`, function `example_specific_tools`, function `example_object_format`, function `main`. | Example demonstrating MCP (Model Context Protocol) tools with the Responses API.
This example shows how to use MCP tools with different allowed_tools configurations:
1. No filter (allows all tools from the MCP server)
2. Wildcard "*" (explicitly allows all tools)
3. Specific tool names (filters to only those tools)
S... | documentation | 1 | {"doc_type": "module", "module_name": "openai_responses_client_with_mcp_tools", "file_path": "examples/online_serving/openai_responses_client_with_mcp_tools.py", "repo_id": "vllm-project/vllm", "char_length": 654} |
browser-use/browser-use:browser_use/skill_cli/commands/profile.py:_handle_sync | # Context:
import argparse
import json
import sys
import tempfile
from pathlib import Path
from browser_use.skill_cli.commands.utils import get_sdk_client
from browser_use.skill_cli.api_key import APIKeyRequired
import asyncio
from browser_use.skill_cli.sessions import create_browser_session
class ProfileModeError(Exc... | def _handle_sync(args: argparse.Namespace) -> int:
"""Handle 'profile sync' command - sync local profile to cloud."""
import asyncio
from browser_use.skill_cli.api_key import APIKeyRequired
from browser_use.skill_cli.sessions import create_browser_session
# Get SDK client (validates API key)
try:
client = get... | function_complex | 0 | {"cognitive_complexity": 44, "loc": 191, "code_loc": 146, "docstring_loc": 1, "function_name": "_handle_sync", "class_name": null, "qualname": "_handle_sync", "file_path": "browser_use/skill_cli/commands/profile.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/azure/test_azure.py:test_azure_completion_is_used_when_azure_provider | # Context:
import pytest
from crewai.llm import LLM
def mock_azure_credentials(): ...
def test_azure_completion_is_used_when_azure_openai_provider(): ...
def test_azure_tool_use_conversation_flow(): ...
def test_azure_completion_module_is_imported(): ...
def test_native_azure_raises_error_when_initialization_fails(): ... | def test_azure_completion_is_used_when_azure_provider():
"""
Test that AzureCompletion from completion.py is used when LLM uses provider 'azure'
"""
llm = LLM(model="azure/gpt-4")
assert llm.__class__.__name__ == "AzureCompletion"
assert llm.provider == "azure"
assert llm.model == "gpt-4" | test | 0 | {"function_name": "test_azure_completion_is_used_when_azure_provider", "class_name": null, "qualname": "test_azure_completion_is_used_when_azure_provider", "file_path": "lib/crewai/tests/llms/azure/test_azure.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["crewai.llm", "crewai.crew", "crewai.agent", "... |
crewAIInc/crewAI:lib/crewai/tests/cli/authentication/providers/test_keycloak.py:TestKeycloakProvider.test_get_issuer | # Context:
class TestKeycloakProvider:
def setup_method(self): ...
def test_initialization_with_valid_settings(self): ...
def test_get_authorize_url(self): ...
def test_get_authorize_url_with_different_domain(self): ...
def test_get_token_url(self): ...
def test_get_token_url_with_different_dom... | def test_get_issuer(self):
expected_issuer = "https://keycloak.example.com/realms/test-realm"
assert self.provider.get_issuer() == expected_issuer | test | 0 | {"function_name": "test_get_issuer", "class_name": "TestKeycloakProvider", "qualname": "TestKeycloakProvider.test_get_issuer", "file_path": "lib/crewai/tests/cli/authentication/providers/test_keycloak.py", "repo_id": "crewAIInc/crewAI", "loc": 3, "tested_modules": ["crewai.cli.authentication.main", "crewai.cli.authenti... |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/gemini/completion.py:GeminiCompletion._handle_structured_output_tool_call | # Context:
import logging
from typing import TYPE_CHECKING, Any, Literal, cast
from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
from google.genai import types
class GeminiCompletion(BaseLLM):
def __init__(
self,
model: str = "gemini-2.0-flash-001",
api_k... | def _handle_structured_output_tool_call(
self,
structured_data: dict[str, Any],
response_model: type[BaseModel],
contents: list[types.Content],
from_task: Any | None = None,
from_agent: Any | None = None,
) -> BaseModel:
"""Validate and emit event for structur... | function_simple | 0 | {"cognitive_complexity": 1, "loc": 40, "code_loc": 17, "docstring_loc": 15, "function_name": "_handle_structured_output_tool_call", "class_name": "GeminiCompletion", "qualname": "GeminiCompletion._handle_structured_output_tool_call", "file_path": "lib/crewai/src/crewai/llms/providers/gemini/completion.py", "repo_id": "... |
crewAIInc/crewAI:lib/crewai/tests/test_streaming.py:TestStreamingEdgeCases.test_streaming_with_empty_content_chunks | # Context:
from collections.abc import AsyncIterator, Generator
from unittest.mock import MagicMock, patch
from crewai.types.streaming import (
CrewStreamingOutput,
FlowStreamingOutput,
StreamChunk,
StreamChunkType,
ToolCallChunk,
)
from crewai.types.streaming import (
CrewStreamingOutpu... | def test_streaming_with_empty_content_chunks(self) -> None:
"""Test streaming when LLM chunks have empty content."""
mock_output = MagicMock()
mock_output.raw = "No streaming"
def gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="")
streaming =... | test | 0 | {"function_name": "test_streaming_with_empty_content_chunks", "class_name": "TestStreamingEdgeCases", "qualname": "TestStreamingEdgeCases.test_streaming_with_empty_content_chunks", "file_path": "lib/crewai/tests/test_streaming.py", "repo_id": "crewAIInc/crewAI", "loc": 20, "tested_modules": ["collections.abc", "typing"... |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_response_format.py:TestResponseFormatAsToolStrategy.test_typed_dict | # Context:
from langchain_core.messages import HumanMessage
from langchain.agents import create_agent
from langchain.agents.structured_output import (
MultipleStructuredOutputsError,
ProviderStrategy,
StructuredOutputValidationError,
ToolStrategy,
)
from tests.unit_tests.agents.model import FakeToolCall... | def test_typed_dict(self) -> None:
"""Test response_format as ToolStrategy with TypedDict."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherTypedDict",
"id": "2",
"a... | test | 1 | {"function_name": "test_typed_dict", "class_name": "TestResponseFormatAsToolStrategy", "qualname": "TestResponseFormatAsToolStrategy.test_typed_dict", "file_path": "libs/langchain_v1/tests/unit_tests/agents/test_response_format.py", "repo_id": "langchain-ai/langchain", "loc": 20, "tested_modules": ["collections.abc", "... |
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_rw_lock.py:test_manual_acquire_release | # Context:
from crewai.utilities.rw_lock import RWLock
def test_multiple_readers_concurrent(): ...
def test_writer_blocks_readers(): ...
def test_writer_blocks_other_writers(): ...
def test_readers_block_writers(): ...
def test_alternating_readers_and_writers(): ...
def test_context_manager_releases_on_exception(): ..... | def test_manual_acquire_release():
lock = RWLock()
lock.r_acquire()
lock.r_release()
lock.w_acquire()
lock.w_release()
with lock.r_locked():
pass | test | 0 | {"function_name": "test_manual_acquire_release", "class_name": null, "qualname": "test_manual_acquire_release", "file_path": "lib/crewai/tests/utilities/events/test_rw_lock.py", "repo_id": "crewAIInc/crewAI", "loc": 11, "tested_modules": ["crewai.utilities.rw_lock"], "has_docstring": false, "runnable_level": "project_r... |
langflow-ai/langflow:src/lfx/tests/unit/services/test_service_manager.py:TestServiceRegistration.test_register_storage_service | # Context:
from lfx.services.schema import ServiceType
from lfx.services.storage.local import LocalStorageService
def service_manager(): ...
def temp_config_dir(tmp_path): ...
class TestPluginDiscovery: ...
class TestServiceCreation: ...
class TestConflictResolution: ...
class TestTeardown: ...
class TestConfigDirecto... | def test_register_storage_service(self, service_manager):
"""Test registering the real LocalStorageService."""
service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService, override=True)
assert ServiceType.STORAGE_SERVICE in service_manager.service_classes
as... | test | 1 | {"function_name": "test_register_storage_service", "class_name": "TestServiceRegistration", "qualname": "TestServiceRegistration.test_register_storage_service", "file_path": "src/lfx/tests/unit/services/test_service_manager.py", "repo_id": "langflow-ai/langflow", "loc": 6, "tested_modules": ["pathlib", "lfx.services.ba... |
apache/airflow:providers/microsoft/azure/tests/unit/microsoft/azure/fs/test_msgraph.py:TestMSGraphFS.test_get_fs_no_connection | # Context:
from unittest.mock import MagicMock, patch
from airflow.providers.microsoft.azure.fs.msgraph import get_fs
def mock_connection(): ...
def mock_connection_minimal(): ...
class TestMSGraphFS:
def test_get_fs_with_drive_id(self, mock_msgdrivefs, mock_get_connection, mock_connection): ...
def test_get_... | def test_get_fs_no_connection(self, mock_msgdrivefs):
mock_fs_instance = MagicMock()
mock_msgdrivefs.return_value = mock_fs_instance
result = get_fs(None)
mock_msgdrivefs.assert_called_once_with({})
assert result == mock_fs_instance | test | 1 | {"function_name": "test_get_fs_no_connection", "class_name": "TestMSGraphFS", "qualname": "TestMSGraphFS.test_get_fs_no_connection", "file_path": "providers/microsoft/azure/tests/unit/microsoft/azure/fs/test_msgraph.py", "repo_id": "apache/airflow", "loc": 8, "tested_modules": ["__future__", "airflow.models.connection"... |
browser-use/browser-use:tests/ci/browser/test_tabs.py:TestMultiTabOperations.test_create_and_switch_three_tabs | # Context:
import asyncio
import time
import pytest
from browser_use.agent.service import Agent
from tests.ci.conftest import create_mock_llm
def http_server(): ...
def base_url(http_server): ...
async def browser_session(): ...
class TestMultiTabOperations:
async def test_close_tab_with_vision(self, browser_sess... | async def test_create_and_switch_three_tabs(self, browser_session, base_url):
"""Test that agent can create 3 tabs, switch between them, and call done().
This test verifies that browser state is retrieved between each step.
"""
start_time = time.time()
actions = [
# Action 1: Navigate to home page
f""... | test | 0 | {"function_name": "test_create_and_switch_three_tabs", "class_name": "TestMultiTabOperations", "qualname": "TestMultiTabOperations.test_create_and_switch_three_tabs", "file_path": "tests/ci/browser/test_tabs.py", "repo_id": "browser-use/browser-use", "loc": 131, "tested_modules": ["browser_use.agent.service", "browser_... |
huggingface/transformers:tests/quantization/metal/test_metal.py:ReplaceWithMetalLinearTest.test_all_linears_replaced | # Context:
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, MetalConfig, OPTForCausalLM
import torch.nn as nn
from transformers.integrations.metal_quantization import MetalLinear
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
def _patch_mps_avai... | def test_all_linears_replaced(self):
from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear
model = self._make_small_model()
nb_linears = sum(1 for m in model.modules() if isinstance(m, nn.Linear))
self.assertGreater(nb_linears, 0)
confi... | test | 0 | {"function_name": "test_all_linears_replaced", "class_name": "ReplaceWithMetalLinearTest", "qualname": "ReplaceWithMetalLinearTest.test_all_linears_replaced", "file_path": "tests/quantization/metal/test_metal.py", "repo_id": "huggingface/transformers", "loc": 12, "tested_modules": ["contextlib", "transformers", "transf... |
crewAIInc/crewAI:lib/crewai-files/src/crewai_files/uploaders/anthropic.py:AnthropicFileUploader.__init__ | # Context:
import os
from typing import Any
class AnthropicFileUploader(FileUploader):
def provider_name(self) -> str: ...
def _get_client(self) -> Any: ...
def _get_async_client(self) -> Any: ...
def upload(self, file: FileInput, purpose: str | None) -> UploadResult: ...
def delete(self, file_id: ... | def __init__(
self,
api_key: str | None = None,
client: Any = None,
async_client: Any = None,
) -> None:
"""Initialize the Anthropic uploader.
Args:
api_key: Optional Anthropic API key. If not provided, uses
ANTHROPIC_API_KEY environment v... | function_simple | 0 | {"cognitive_complexity": 1, "loc": 17, "code_loc": 3, "docstring_loc": 8, "function_name": "__init__", "class_name": "AnthropicFileUploader", "qualname": "AnthropicFileUploader.__init__", "file_path": "lib/crewai-files/src/crewai_files/uploaders/anthropic.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runn... |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/agent.py:sync_example | # Context:
from agents import Agent, Runner
async def async_example(): ...
async def streaming_example(): ...
# Task:
Write a Python function `sync_example` to synchronous execution example. | def sync_example():
"""Synchronous execution example"""
result = Runner.run_sync(root_agent, "Hello, how does sync execution work?")
return result.final_output | function_simple | 0 | {"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "sync_example", "class_name": null, "qualname": "sync_example", "file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/agent.py", "repo_id": "Shubhamsaboo/awesome-llm-apps"... |
apache/airflow:providers/databricks/src/airflow/providers/databricks/utils/mixins.py:DatabricksSQLStatementsMixin._handle_deferrable_execution | # Context:
import time
from airflow.providers.common.compat.sdk import AirflowException
from airflow.providers.databricks.hooks.databricks import DatabricksHook, SQLStatementState
from airflow.providers.databricks.triggers.databricks import DatabricksSQLStatementExecutionTrigger
class GetHookHasFields(Protocol): ...
c... | def _handle_deferrable_execution(
self: HandleDeferrableExecutionHasFields, defer_method_name: str = "execute_complete"
) -> None:
"""Execute a SQL statement in deferrable mode."""
statement_state: SQLStatementState = self._hook.get_sql_statement_state(self.statement_id)
end_time: fl... | function_complex | 1 | {"cognitive_complexity": 7, "loc": 36, "code_loc": 27, "docstring_loc": 1, "function_name": "_handle_deferrable_execution", "class_name": "DatabricksSQLStatementsMixin", "qualname": "DatabricksSQLStatementsMixin._handle_deferrable_execution", "file_path": "providers/databricks/src/airflow/providers/databricks/utils/mix... |
langflow-ai/langflow:src/backend/tests/locust/diagnose_remote.py:module_doc | Write a module-level docstring for the Python module `diagnose_remote` which contains function `test_connectivity`, function `test_flow_endpoint`, function `run_load_simulation`, function `main`. | Diagnostic tool for remote Langflow instances.
Helps debug connection issues and performance problems. | documentation | 1 | {"doc_type": "module", "module_name": "diagnose_remote", "file_path": "src/backend/tests/locust/diagnose_remote.py", "repo_id": "langflow-ai/langflow", "char_length": 103} |
zhayujie/chatgpt-on-wechat:models/doubao/doubao_bot.py:DoubaoBot._convert_messages_to_openai_format | # Context:
import json
class DoubaoBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(DoubaoSession, model=conf().get("model") or "doubao-seed-2-0-pro-260215")
model = conf().get("model") or "doubao-seed-2-0-pro-260215"
self.args = {
"model":... | def _convert_messages_to_openai_format(self, messages):
"""
Convert messages from Claude format to OpenAI format.
Claude format uses content blocks: tool_use / tool_result / text
OpenAI format uses tool_calls in assistant, role=tool for results
"""
if not messages:
... | function_complex | 1 | {"cognitive_complexity": 57, "loc": 87, "code_loc": 64, "docstring_loc": 6, "function_name": "_convert_messages_to_openai_format", "class_name": "DoubaoBot", "qualname": "DoubaoBot._convert_messages_to_openai_format", "file_path": "models/doubao/doubao_bot.py", "repo_id": "zhayujie/chatgpt-on-wechat", "has_docstring": ... |
ray-project/ray:python/ray/experimental/gpu_object_manager/cuda_ipc_transport.py:CudaIpcTransportMetadata:class_doc | Write a class-level docstring for `CudaIpcTransportMetadata` (inherits from TensorTransportMetadata) which has methods: various methods. | Metadata for tensors stored in the GPU object store for CUDA IPC transport. | documentation | 0 | {"doc_type": "class", "class_name": "CudaIpcTransportMetadata", "file_path": "python/ray/experimental/gpu_object_manager/cuda_ipc_transport.py", "repo_id": "ray-project/ray", "char_length": 75, "methods": []} |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_docx_loader.py:TestDOCXLoader.test_load_docx_parsing_error | # Context:
import tempfile
from unittest.mock import Mock, patch
from crewai_tools.rag.loaders.docx_loader import DOCXLoader
from crewai_tools.rag.source_content import SourceContent
import pytest
class TestDOCXLoader:
def test_load_docx_from_file(self, mock_docx_class): ...
def test_load_docx_with_tables(self... | def test_load_docx_parsing_error(self, mock_docx_class):
mock_docx_class.side_effect = Exception("Invalid DOCX file")
with tempfile.NamedTemporaryFile(suffix=".docx") as f:
loader = DOCXLoader()
with pytest.raises(ValueError, match="Error loading DOCX file"):
loa... | test | 0 | {"function_name": "test_load_docx_parsing_error", "class_name": "TestDOCXLoader", "qualname": "TestDOCXLoader.test_load_docx_parsing_error", "file_path": "lib/crewai-tools/tests/rag/test_docx_loader.py", "repo_id": "crewAIInc/crewAI", "loc": 7, "tested_modules": ["crewai_tools.rag.base_loader", "crewai_tools.rag.loader... |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/relevance_agent.py:module_doc | Write a module-level docstring for the Python module `relevance_agent` which contains class `RelevanceAgent`. | Relevance Agent — Scores signals by developer relevance (0–100).
This agent uses LLM reasoning to evaluate each signal's importance to
AI/ML developers. It's a legitimate agent because relevance scoring
requires judgment, context understanding, and nuanced assessment that
pure heuristics cannot capture.
Model Selecti... | documentation | 0 | {"doc_type": "module", "module_name": "relevance_agent", "file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/relevance_agent.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "char_length": 523} |
vllm-project/vllm:vllm/distributed/eplb/rebalance_execute.py:RecvMetadata:class_doc | Write a class-level docstring for `RecvMetadata` which has methods: various methods. | Metadata describing remote receives during EPLB rebalancing. | documentation | 1 | {"doc_type": "class", "class_name": "RecvMetadata", "file_path": "vllm/distributed/eplb/rebalance_execute.py", "repo_id": "vllm-project/vllm", "char_length": 60, "methods": []} |
Zie619/n8n-workflows:src/user_management.py:UserManager.get_all_users | # Context:
from typing import List, Optional
import sqlite3
class User(BaseModel): ...
class UserCreate(BaseModel): ...
class UserLogin(BaseModel): ...
class UserUpdate(BaseModel): ...
class Token(BaseModel): ...
def get_current_user(credentials: HTTPAuthorizationCredentials) -> User: ...
def require_admin(current_use... | def get_all_users(self) -> List[User]:
"""Get all users."""
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("""
SELECT id, username, email, full_name, role, active, created_at
FROM users ORDER BY created_at DESC
""")
us... | function_simple | 0 | {"cognitive_complexity": 1, "loc": 26, "code_loc": 21, "docstring_loc": 1, "function_name": "get_all_users", "class_name": "UserManager", "qualname": "UserManager.get_all_users", "file_path": "src/user_management.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "file_runnable"} |
huggingface/transformers:src/transformers/models/sam2_video/modular_sam2_video.py:Sam2VideoInferenceSession.remove_mask_inputs | # Context:
class Sam2VideoPromptEncoderConfig(Sam2PromptEncoderConfig): ...
class Sam2VideoMaskDecoderConfig(Sam2MaskDecoderConfig): ...
class Sam2VideoConfig(PreTrainedConfig): ...
class Sam2VideoInferenceCache: ...
class Sam2VideoProcessor(Sam2Processor): ...
class Sam2VideoLayerNorm(Sam2LayerNorm): ...
class Sam2Vi... | def remove_mask_inputs(self, obj_idx: int, frame_idx: int):
"""Remove mask inputs."""
self.mask_inputs_per_obj[obj_idx].pop(frame_idx, None) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "remove_mask_inputs", "class_name": "Sam2VideoInferenceSession", "qualname": "Sam2VideoInferenceSession.remove_mask_inputs", "file_path": "src/transformers/models/sam2_video/modular_sam2_video.py", "repo_id": "huggingface/transfor... |
crewAIInc/crewAI:lib/crewai/tests/llms/google/test_google.py:test_gemini_completion_call_arguments | # Context:
from unittest.mock import patch, MagicMock
from crewai.llm import LLM
from crewai.crew import Crew
from crewai.agent import Agent
from crewai.task import Task
def mock_google_api_key(): ...
def test_gemini_completion_is_used_when_google_provider(): ...
def test_gemini_completion_is_used_when_gemini_provider... | def test_gemini_completion_call_arguments():
"""
Test that GeminiCompletion.call is invoked with correct arguments
"""
# Create LLM instance first
gemini_llm = LLM(model="google/gemini-2.0-flash-001")
# Mock the instance method
with patch.object(gemini_llm, 'call') as mock_call:
moc... | test | 0 | {"function_name": "test_gemini_completion_call_arguments", "class_name": null, "qualname": "test_gemini_completion_call_arguments", "file_path": "lib/crewai/tests/llms/google/test_google.py", "repo_id": "crewAIInc/crewAI", "loc": 44, "tested_modules": ["crewai.llm", "crewai.crew", "crewai.agent", "crewai.task", "crewai... |
vllm-project/vllm:vllm/model_executor/layers/quantization/qutlass_utils.py:triton_scale_swizzle | # Context:
import torch
from vllm.triton_utils import tl, triton
def triton_mx_block_rearrange(scale_tensor: torch.Tensor) -> torch.Tensor: ...
def to_blocked(input_matrix: torch.Tensor, backend: Literal['torch', 'triton']) -> torch.Tensor: ...
# Task:
Write a Python function `triton_scale_swizzle` to rearranges tens... | def triton_scale_swizzle(
scale_ptr: torch.Tensor,
scale_rows: int,
scale_cols: int,
output_ptr: torch.Tensor,
input_row_stride: int,
output_block_stride: int,
BLOCK_ROWS: tl.constexpr,
BLOCK_COLS: tl.constexpr,
):
"""
Rearranges tensor data from row-major to block-scaled swizzle... | function_simple | 1 | {"cognitive_complexity": 0, "loc": 61, "code_loc": 25, "docstring_loc": 13, "function_name": "triton_scale_swizzle", "class_name": null, "qualname": "triton_scale_swizzle", "file_path": "vllm/model_executor/layers/quantization/qutlass_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "... |
run-llama/llama_index:llama-index-core/tests/vector_stores/test_utils.py:test_multimedia_node_serdes | # Context:
from typing import Any
from llama_index.core.schema import (
BaseNode,
Document,
MediaResource,
Node,
NodeRelationship,
TextNode,
ImageNode,
IndexNode,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
def source_node... | def test_multimedia_node_serdes(multimedia_node: Node):
serialized_node: dict[str, Any] = node_to_metadata_dict(multimedia_node)
assert "multimedia_node" in serialized_node["_node_content"]
assert serialized_node["_node_type"] == multimedia_node.class_name()
deserialized_node: BaseNode = metadata_dict_t... | test | 1 | {"function_name": "test_multimedia_node_serdes", "class_name": null, "qualname": "test_multimedia_node_serdes", "file_path": "llama-index-core/tests/vector_stores/test_utils.py", "repo_id": "run-llama/llama_index", "loc": 11, "tested_modules": ["typing", "llama_index.core.schema", "llama_index.core.vector_stores.utils"... |
ray-project/ray:python/ray/tests/gpu_objects/test_gpu_objects_custom.py:test_register_and_use_custom_transport | # Context:
import sys
import numpy
import ray
from ray.experimental import (
CommunicatorMetadata,
TensorTransportManager,
TensorTransportMetadata,
register_tensor_transport,
)
from ray import cloudpickle
class ShmTransportMetadata(TensorTransportMetadata): ...
class ShmCommunicatorMetadata(Communicato... | def test_register_and_use_custom_transport(ray_start_regular):
register_tensor_transport(
"shared_memory", ["cpu"], SharedMemoryTransport, numpy.ndarray
)
@ray.remote
class Actor:
@ray.method(tensor_transport="shared_memory")
def echo(self, data):
return data
... | test | 0 | {"function_name": "test_register_and_use_custom_transport", "class_name": null, "qualname": "test_register_and_use_custom_transport", "file_path": "python/ray/tests/gpu_objects/test_gpu_objects_custom.py", "repo_id": "ray-project/ray", "loc": 34, "tested_modules": ["dataclasses", "typing", "ray.experimental", "ray"], "... |
browser-use/browser-use:tests/ci/test_markdown_chunking.py:TestChunkMarkdownTable.test_table_header_in_overlap_for_continuation | # Context:
from browser_use.dom.markdown_extractor import chunk_markdown_by_structure
class TestChunkMarkdownBasic: ...
class TestChunkMarkdownHeaders: ...
class TestChunkMarkdownHeaderPreferred: ...
class TestChunkMarkdownCodeFence: ...
class TestChunkMarkdownListItems: ...
class TestChunkMarkdownStartFromChar: ...
c... | def test_table_header_in_overlap_for_continuation(self):
"""When a table spans multiple chunks, the header should be in the overlap prefix."""
header = '| Col1 | Col2 |'
separator = '| --- | --- |'
rows = [f'| r{i} | d{i} |' for i in range(100)]
table = '\n'.join([header, separator] + rows)
content = table
... | test | 0 | {"function_name": "test_table_header_in_overlap_for_continuation", "class_name": "TestChunkMarkdownTable", "qualname": "TestChunkMarkdownTable.test_table_header_in_overlap_for_continuation", "file_path": "tests/ci/test_markdown_chunking.py", "repo_id": "browser-use/browser-use", "loc": 15, "tested_modules": ["markdowni... |
langchain-ai/langchain:libs/core/tests/unit_tests/test_ssrf_protection.py:TestIPValidation.test_is_localhost_hostnames | # Context:
from langchain_core._security._ssrf_protection import (
SSRFProtectedUrl,
SSRFProtectedUrlRelaxed,
is_cloud_metadata,
is_localhost,
is_private_ip,
is_safe_url,
validate_safe_url,
)
class TestValidateSafeUrl: ...
class TestIsSafeUrl: ...
class TestSSRFProtectedUrlType: ...
class T... | def test_is_localhost_hostnames(self) -> None:
"""Test localhost hostname detection."""
assert is_localhost("localhost") is True
assert is_localhost("LOCALHOST") is True
assert is_localhost("localhost.localdomain") is True | test | 1 | {"function_name": "test_is_localhost_hostnames", "class_name": "TestIPValidation", "qualname": "TestIPValidation.test_is_localhost_hostnames", "file_path": "libs/core/tests/unit_tests/test_ssrf_protection.py", "repo_id": "langchain-ai/langchain", "loc": 5, "tested_modules": ["typing", "pydantic", "langchain_core._secur... |
vllm-project/vllm:vllm/model_executor/models/gemma3n_mm.py:Gemma3nMultimodalEmbedder:class_doc | Write a class-level docstring for `Gemma3nMultimodalEmbedder` (inherits from nn.Module) which has methods: `__init__`, `forward`. | Embeds token ids or soft tokens for multimodal content into language
model space. | documentation | 1 | {"doc_type": "class", "class_name": "Gemma3nMultimodalEmbedder", "file_path": "vllm/model_executor/models/gemma3n_mm.py", "repo_id": "vllm-project/vllm", "char_length": 81, "methods": ["__init__", "forward"]} |
huggingface/transformers:src/transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py:Ernie4_5_VLMoeProcessor.save_pretrained | # Context:
from pathlib import Path
from shutil import SameFileError, copyfile
class Ernie4_5_VLMoeProcessorKwargs(ProcessingKwargs): ...
class Ernie4_5_VLMoeProcessor(ProcessorMixin):
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
self.image_... | def save_pretrained(self, save_directory, push_to_hub: bool = False, **kwargs):
"""We additionally save a copy of the font to the `save_directory` (if we found a file there)"""
os.makedirs(save_directory, exist_ok=True)
if os.path.isfile(self.video_processor.font):
try:
... | function_simple | 0 | {"cognitive_complexity": 2, "loc": 11, "code_loc": 7, "docstring_loc": 1, "function_name": "save_pretrained", "class_name": "Ernie4_5_VLMoeProcessor", "qualname": "Ernie4_5_VLMoeProcessor.save_pretrained", "file_path": "src/transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py", "repo_id": "huggingface/tran... |
mem0ai/mem0:mem0/graphs/neptune/base.py:NeptuneBase._establish_nodes_relations_from_data | # Context:
from mem0.graphs.tools import (
DELETE_MEMORY_STRUCT_TOOL_GRAPH,
DELETE_MEMORY_TOOL_GRAPH,
EXTRACT_ENTITIES_STRUCT_TOOL,
EXTRACT_ENTITIES_TOOL,
RELATIONS_STRUCT_TOOL,
RELATIONS_TOOL,
)
from mem0.graphs.utils import EXTRACT_RELATIONS_PROMPT, get_delete_messages
class NeptuneBase(ABC):... | def _establish_nodes_relations_from_data(self, data, filters, entity_type_map):
"""
Establish relations among the extracted nodes.
"""
if self.config.graph_store.custom_prompt:
messages = [
{
"role": "system",
"content":... | function_simple | 1 | {"cognitive_complexity": 4, "loc": 42, "code_loc": 34, "docstring_loc": 3, "function_name": "_establish_nodes_relations_from_data", "class_name": "NeptuneBase", "qualname": "NeptuneBase._establish_nodes_relations_from_data", "file_path": "mem0/graphs/neptune/base.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "r... |
vllm-project/vllm:tests/quantization/test_mixed_precision.py:test_mixed_precision_model_accuracies | # Context:
import lm_eval
import pytest
class ModelCase: ...
class EvaluationConfig: ...
# Task:
Write a Python test function `test_mixed_precision_model_accuracies` to verify the behavior of `mixed_precision_model_accuracies`.
Module under test: dataclasses, packaging | def test_mixed_precision_model_accuracies(model_name: str, accuracy_numbers: dict):
results = lm_eval.simple_evaluate(
model="vllm",
model_args=EvaluationConfig(model_name).get_model_args(),
tasks=list(accuracy_numbers.keys()),
batch_size=8,
)
rtol = 0.05
for task, expe... | test | 1 | {"function_name": "test_mixed_precision_model_accuracies", "class_name": null, "qualname": "test_mixed_precision_model_accuracies", "file_path": "tests/quantization/test_mixed_precision.py", "repo_id": "vllm-project/vllm", "loc": 16, "tested_modules": ["dataclasses", "packaging"], "has_docstring": false, "runnable_leve... |
langflow-ai/langflow:src/backend/tests/unit/components/files_and_knowledge/test_file_component_image_processing.py:module_doc | Write a module-level docstring for the Python module `test_file_component_image_processing` which contains class `TestDoclingEmptyTextExtraction`, class `TestDoclingSubprocessErrors`, class `TestStoragePathResolution`, class `TestFileNotFoundHandling`, class `TestDataFrameEmptyHandling`. | Tests for FileComponent image processing with Docling.
These tests cover scenarios where:
- Images are processed but contain no extractable text (e.g., profile pictures)
- Docling returns empty doc_rows
- Storage path resolution for uploaded files
- Edge cases in error handling | documentation | 1 | {"doc_type": "module", "module_name": "test_file_component_image_processing", "file_path": "src/backend/tests/unit/components/files_and_knowledge/test_file_component_image_processing.py", "repo_id": "langflow-ai/langflow", "char_length": 279} |
langflow-ai/langflow:src/backend/tests/unit/services/telemetry/test_component_inputs_splitting.py:test_split_truncates_oversized_single_field | # Context:
from langflow.services.telemetry.schema import MAX_TELEMETRY_URL_SIZE, ComponentInputsPayload
def test_chunk_fields_exist(): ...
def test_chunk_fields_serialize_with_aliases(): ...
def test_chunk_fields_optional_default_none(): ...
def test_calculate_url_size_returns_integer(): ...
def test_calculate_url_si... | def test_split_truncates_oversized_single_field():
"""Test that single field exceeding max size gets truncated."""
# Create input with single field that's too large
oversized_value = "x" * 3000
inputs = {"large_field": oversized_value}
payload = ComponentInputsPayload(
component_run_id="tes... | test | 1 | {"function_name": "test_split_truncates_oversized_single_field", "class_name": null, "qualname": "test_split_truncates_oversized_single_field", "file_path": "src/backend/tests/unit/services/telemetry/test_component_inputs_splitting.py", "repo_id": "langflow-ai/langflow", "loc": 25, "tested_modules": ["hypothesis", "hyp... |
infiniflow/ragflow:api/db/services/evaluation_service.py:EvaluationService.get_recommendations | # Context:
import logging
from typing import List, Dict, Any, Optional, Tuple
from api.db.db_models import EvaluationDataset, EvaluationCase, EvaluationRun, EvaluationResult
class EvaluationService(CommonService):
model = EvaluationDataset
def create_dataset(cls, name: str, description: str, kb_ids: List[str],... | def get_recommendations(cls, run_id: str) -> List[Dict[str, Any]]:
"""
Analyze evaluation results and provide configuration recommendations.
Args:
run_id: Evaluation run ID
Returns:
List of recommendation dictionaries
"""
try:
run = E... | function_complex | 1 | {"cognitive_complexity": 10, "loc": 62, "code_loc": 44, "docstring_loc": 9, "function_name": "get_recommendations", "class_name": "EvaluationService", "qualname": "EvaluationService.get_recommendations", "file_path": "api/db/services/evaluation_service.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runna... |
ray-project/ray:python/ray/serve/tests/test_https_proxy.py:TestSSLConfiguration.test_ssl_config_with_ca_certs | # Context:
from ray.serve.config import HTTPOptions
def ssl_cert_and_key(): ...
def https_serve_instance(ssl_cert_and_key): ...
class TestHTTPSProxy: ...
class TestHTTPSErrorHandling: ...
class TestHTTPSIntegration: ...
class TestSSLConfiguration:
def test_ssl_config_validation_success(self, ssl_cert_and_key): ..... | def test_ssl_config_with_ca_certs(self, ssl_cert_and_key):
"""Test SSL configuration with CA certificates."""
key_path = ssl_cert_and_key["key_path"]
cert_path = ssl_cert_and_key["cert_path"]
# Use cert as CA for testing purposes
ca_path = cert_path
options = HTTPOptions... | test | 0 | {"function_name": "test_ssl_config_with_ca_certs", "class_name": "TestSSLConfiguration", "qualname": "TestSSLConfiguration.test_ssl_config_with_ca_certs", "file_path": "python/ray/serve/tests/test_https_proxy.py", "repo_id": "ray-project/ray", "loc": 11, "tested_modules": ["ray", "ray._private.tls_utils", "ray.serve.co... |
apache/airflow:airflow-core/src/airflow/serialization/definitions/notset.py:ArgNotSet:class_doc | Write a class-level docstring for `ArgNotSet` which has methods: various methods. | Sentinel type for annotations, useful when None is not viable. | documentation | 1 | {"doc_type": "class", "class_name": "ArgNotSet", "file_path": "airflow-core/src/airflow/serialization/definitions/notset.py", "repo_id": "apache/airflow", "char_length": 62, "methods": []} |
huggingface/transformers:tests/models/falcon_h1/test_modeling_falcon_h1.py:FalconH1ModelTest.test_batching_equivalence | # Context:
class FalconH1ModelTester: ...
class FalconH1ModelIntegrationTest(unittest.TestCase): ...
class FalconH1ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (FalconH1Model, FalconH1ForCausalLM) if is_torch_available() else ()
model_split_pe... | def test_batching_equivalence(self):
# need to disable the tril input mask
orig = self.model_tester.use_input_mask
self.model_tester.use_input_mask = False
super().test_batching_equivalence()
self.model_tester.use_input_mask = orig | test | 0 | {"function_name": "test_batching_equivalence", "class_name": "FalconH1ModelTest", "qualname": "FalconH1ModelTest.test_batching_equivalence", "file_path": "tests/models/falcon_h1/test_modeling_falcon_h1.py", "repo_id": "huggingface/transformers", "loc": 6, "tested_modules": ["transformers", "transformers.testing_utils",... |
browser-use/browser-use:tests/ci/test_multi_act_guards.py:TestRuntimeGuard.test_click_link_aborts_remaining | # Context:
import asyncio
from browser_use.agent.service import Agent
from tests.ci.conftest import create_mock_llm
def http_server(): ...
def base_url(http_server): ...
async def browser_session(): ...
def tools(): ...
class TestTerminatesSequenceMetadata: ...
class TestStaticGuard: ...
class TestSafeChain: ...
clas... | async def test_click_link_aborts_remaining(self, browser_session, base_url, tools):
"""Click a link that navigates to another page — remaining actions skipped."""
await tools.navigate(url=f'{base_url}/page_a', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
# Get the selector map to fi... | test | 0 | {"function_name": "test_click_link_aborts_remaining", "class_name": "TestRuntimeGuard", "qualname": "TestRuntimeGuard.test_click_link_aborts_remaining", "file_path": "tests/ci/test_multi_act_guards.py", "repo_id": "browser-use/browser-use", "loc": 37, "tested_modules": ["browser_use.agent.service", "browser_use.browser... |
vllm-project/vllm:vllm/model_executor/models/voyage.py:VoyageQwen3BidirectionalEmbedModel._fuse_gate_up_proj | # Context:
from collections import defaultdict
from collections.abc import Iterable
import torch
class VoyageQwen3BidirectionalEmbedModel(Qwen3Model):
hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# E... | def _fuse_gate_up_proj(self, weights: Iterable[WeightItem]) -> Iterable[WeightItem]:
"""Fuse gate_proj and up_proj into gate_up_proj."""
mlp_buf: dict[int, dict[str, torch.Tensor]] = defaultdict(dict)
mlp_suffixes = {
"mlp.gate_proj.weight": "gate",
"mlp.up_proj.weight": ... | function_complex | 1 | {"cognitive_complexity": 9, "loc": 25, "code_loc": 20, "docstring_loc": 1, "function_name": "_fuse_gate_up_proj", "class_name": "VoyageQwen3BidirectionalEmbedModel", "qualname": "VoyageQwen3BidirectionalEmbedModel._fuse_gate_up_proj", "file_path": "vllm/model_executor/models/voyage.py", "repo_id": "vllm-project/vllm", ... |
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py:TestBugFixTextStripTabs.test_strip_removes_tabs | # Context:
from lfx.components.processing.text_operations import TextOperations
class TestTextOperationsComponent(ComponentTestBaseWithoutClient): ...
class TestTextOperationsWordCount: ...
class TestTextOperationsCaseConversion: ...
class TestTextOperationsReplace: ...
class TestTextOperationsExtract: ...
class TestT... | def test_strip_removes_tabs(self):
"""Strip should remove tabs when using default whitespace stripping."""
component = TextOperations()
component.strip_mode = "both"
component.strip_characters = ""
result = component._text_strip("\t\thello world\t\t")
assert result == "... | test | 1 | {"function_name": "test_strip_removes_tabs", "class_name": "TestBugFixTextStripTabs", "qualname": "TestBugFixTextStripTabs.test_strip_removes_tabs", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "repo_id": "langflow-ai/langflow", "loc": 9, "tested_modules": ["lfx.compone... |
ray-project/ray:release/nightly_tests/dataset/training_ingest_benchmark.py:run_benchmark | # Context:
import itertools
from typing import Dict, List, Optional
import ray
class BenchmarkConfig: ...
class BaseDataLoader(ABC): ...
class S3ParquetDataLoader(BaseDataLoader): ...
class S3UrlImageDataLoader(BaseDataLoader): ...
class S3ReadImagesDataLoader(BaseDataLoader): ...
def create_data_loader(data_loader: s... | def run_benchmark(config: BenchmarkConfig) -> List[Dict]:
"""Run benchmarks with all hyperparameter combinations.
Args:
config: Benchmark configuration
Returns:
List of benchmark results
"""
config.validate()
results = []
# Create data loader for the specified format
d... | function_simple | 0 | {"cognitive_complexity": 3, "loc": 77, "code_loc": 54, "docstring_loc": 8, "function_name": "run_benchmark", "class_name": null, "qualname": "run_benchmark", "file_path": "release/nightly_tests/dataset/training_ingest_benchmark.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-moss/tests/test_base.py:test_list_indexes | # Context:
import pytest
from llama_index.tools.moss.base import MossToolSpec, QueryOptions
class MockBaseToolSpec: ...
def _make_mock_index(name: str, doc_count: int, status: str) -> MagicMock: ...
def mock_client(): ...
async def test_index_docs(mock_client): ...
async def test_query(mock_client): ...
async def test... | async def test_list_indexes(mock_client):
spec = MossToolSpec(client=mock_client, index_name="test")
output = await spec.list_indexes()
mock_client.list_indexes.assert_awaited_once()
# Verify all indexes are in output
assert "index_a" in output
assert "index_b" in output
assert "5" in outp... | test | 1 | {"function_name": "test_list_indexes", "class_name": null, "qualname": "test_list_indexes", "file_path": "llama-index-integrations/tools/llama-index-tools-moss/tests/test_base.py", "repo_id": "run-llama/llama_index", "loc": 14, "tested_modules": ["llama_index.tools.moss.base"], "has_docstring": false, "runnable_level":... |
infiniflow/ragflow:test/unit_test/utils/test_raptor_utils.py:TestIntegrationScenarios.test_financial_excel_report | # Context:
from rag.utils.raptor_utils import (
is_structured_file_type,
is_tabular_pdf,
should_skip_raptor,
get_skip_reason,
EXCEL_EXTENSIONS,
CSV_EXTENSIONS,
STRUCTURED_EXTENSIONS
)
class TestIsStructuredFileType: ...
class TestIsTabularPDF: ...
class TestShouldSkipRaptor: ...
class TestG... | def test_financial_excel_report(self):
"""Test scenario: Financial quarterly Excel report"""
file_type = ".xlsx"
parser_id = "naive"
parser_config = {}
raptor_config = {"use_raptor": True}
# Should skip Raptor
assert should_skip_raptor(file_type, parser_i... | test | 1 | {"function_name": "test_financial_excel_report", "class_name": "TestIntegrationScenarios", "qualname": "TestIntegrationScenarios.test_financial_excel_report", "file_path": "test/unit_test/utils/test_raptor_utils.py", "repo_id": "infiniflow/ragflow", "loc": 11, "tested_modules": ["rag.utils.raptor_utils"], "has_docstrin... |
vllm-project/vllm:tools/pre_commit/generate_attention_backend_docs.py:is_relevant_file | # Context:
import fnmatch
from pathlib import Path
def find_class_in_ast(tree: ast.AST, class_name: str) -> ast.ClassDef | None: ...
def find_method(node: ast.ClassDef, method_name: str) -> ast.FunctionDef | None: ...
def method_returns_true(method: ast.FunctionDef | None) -> bool: ...
def check_method_overrides(node:... | def is_relevant_file(filepath: str) -> bool:
"""Check if a file matches any of the relevant patterns."""
path = Path(filepath)
if path.is_absolute():
try:
path = path.relative_to(REPO_ROOT)
except ValueError:
return False
path_str = str(path)
return any(fnmat... | function_simple | 1 | {"cognitive_complexity": 2, "loc": 11, "code_loc": 8, "docstring_loc": 1, "function_name": "is_relevant_file", "class_name": null, "qualname": "is_relevant_file", "file_path": "tools/pre_commit/generate_attention_backend_docs.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"} |
browser-use/browser-use:tests/ci/browser/test_cdp_headers.py:module_doc | Write a module-level docstring for the Python module `test_cdp_headers` which contains function `test_browser_profile_headers_attribute`, function `test_browser_profile_headers_inherited`. | Test that headers are properly passed to CDPClient for authenticated remote browser connections.
This tests the fix for: When using browser-use with remote browser services that require
authentication headers, these headers need to be included in the WebSocket handshake. | documentation | 0 | {"doc_type": "module", "module_name": "test_cdp_headers", "file_path": "tests/ci/browser/test_cdp_headers.py", "repo_id": "browser-use/browser-use", "char_length": 272} |
ray-project/ray:python/ray/data/tests/datasource/test_turbopuffer_datasink.py:TestMultiNamespaceWrites.test_drops_namespace_column_before_writing | # Context:
from unittest.mock import MagicMock, patch
import pyarrow as pa
def mock_turbopuffer_module(monkeypatch): ...
def sink(): ...
def mock_client(): ...
def sample_table(): ...
def make_sink(**kwargs) -> TurbopufferDatasink: ...
class TestConstructorValidation: ...
class TestClientInitialization: ...
class Test... | def test_drops_namespace_column_before_writing(self):
"""The namespace column is not included in the written data."""
sink = make_sink(namespace=None, namespace_column="tenant")
table = pa.table(
{
"tenant": ["ns_a"],
"id": [1],
"vector... | test | 0 | {"function_name": "test_drops_namespace_column_before_writing", "class_name": "TestMultiNamespaceWrites", "qualname": "TestMultiNamespaceWrites.test_drops_namespace_column_before_writing", "file_path": "python/ray/data/tests/datasource/test_turbopuffer_datasink.py", "repo_id": "ray-project/ray", "loc": 27, "tested_modu... |
apache/airflow:helm-tests/tests/helm_tests/airflow_core/test_worker_sets.py:TestWorkerSets.test_overwrite_hpa_disable | # Context:
from chart_utils.helm_template_generator import render_chart
class TestWorkerSets:
def test_enable_default_worker_set_default(self): ...
def test_enable_default_worker_set(self, enable_default, objects_number): ...
def test_create_multiple_worker_sets(self, enable_default, expected): ...
def... | def test_overwrite_hpa_disable(self):
docs = render_chart(
values={
"workers": {
"hpa": {"enabled": True},
"celery": {"enableDefault": False, "sets": [{"name": "test", "hpa": {"enabled": False}}]},
}
},
s... | test | 1 | {"function_name": "test_overwrite_hpa_disable", "class_name": "TestWorkerSets", "qualname": "TestWorkerSets.test_overwrite_hpa_disable", "file_path": "helm-tests/tests/helm_tests/airflow_core/test_worker_sets.py", "repo_id": "apache/airflow", "loc": 12, "tested_modules": ["__future__", "chart_utils.helm_template_genera... |
huggingface/transformers:benchmark_v2/framework/hardware_metrics.py:get_intel_xpu_stats | # Context:
import subprocess
def get_device_name_and_memory_total() -> tuple[str, float]: ...
class HardwareInfo: ...
def get_amd_gpu_stats(device_handle) -> tuple[int, float]: ...
def get_nvidia_gpu_stats(device_handle) -> tuple[int, float]: ...
class GPUMonitoringStatus(Enum): ...
class GPURawMetrics: ...
class GPUM... | def get_intel_xpu_stats() -> tuple[int, float]:
"""Returns the utilization and memory used of an Intel XPU"""
# xpu-smi outputs CSV format: Timestamp, DeviceId, GPU Memory Utilization (%), GPU Memory Used (MiB)
xpu_smi_output = subprocess.check_output(["xpu-smi", "dump", "-m", "5,18", "-n", "1"])
lines ... | function_complex | 0 | {"cognitive_complexity": 7, "loc": 28, "code_loc": 20, "docstring_loc": 1, "function_name": "get_intel_xpu_stats", "class_name": null, "qualname": "get_intel_xpu_stats", "file_path": "benchmark_v2/framework/hardware_metrics.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runna... |
jax-ml/jax:jax/experimental/mosaic/gpu/mma.py:mma | # Context:
from jax.experimental.mosaic.gpu import fragmented_array as fa
from jaxlib.mlir import ir
class MMALayouts: ...
def _ptx_dtype_str(dtype: ir.Type, is_signed: bool | None) -> str: ...
def _mma_single_tile(acc: fa.FragmentedArray, a: fa.FragmentedArray, b: fa.FragmentedArray) -> fa.FragmentedArray: ...
# Tas... | def mma(
acc: fa.FragmentedArray,
a: fa.FragmentedArray,
b: fa.FragmentedArray,
) -> fa.FragmentedArray:
"""Computes `acc + a @ b.T` using synchronouse MMA instructions.
All operands must have `TiledLayout`s. The layouts must be generated
by the `MMALayouts` class, which ensures that the tiles are ma... | function_complex | 1 | {"cognitive_complexity": 20, "loc": 91, "code_loc": 56, "docstring_loc": 17, "function_name": "mma", "class_name": null, "qualname": "mma", "file_path": "jax/experimental/mosaic/gpu/mma.py", "repo_id": "jax-ml/jax", "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_solr_vector_store_query_utils.py:test_recursively_unpack_filters_valid_inputs | # Context:
import pytest
from llama_index.core.vector_stores.types import (
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
)
from llama_index.vector_stores.solr.query_utils import (
recursively_unpack_filters,
)
def test_recursively_unpack_filters_invalid_operators(input_operator... | def test_recursively_unpack_filters_valid_inputs(
input_filters: MetadataFilters,
expected_output: list[str],
) -> None:
actual_output = recursively_unpack_filters(input_filters)
assert actual_output == expected_output | test | 1 | {"function_name": "test_recursively_unpack_filters_valid_inputs", "class_name": null, "qualname": "test_recursively_unpack_filters_valid_inputs", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_solr_vector_store_query_utils.py", "repo_id": "run-llama/llama_index", "loc": 7... |
streamlit/streamlit:lib/streamlit/components/v2/component_registry.py:BidiComponentRegistry.__init__ | # Context:
import threading
from collections.abc import MutableMapping
class BidiComponentDefinition: ...
class BidiComponentRegistry:
def register_components_from_definitions(self, component_definitions: dict[str, dict[str, Any]]) -> None: ...
def register(self, definition: BidiComponentDefinition) -> None: ... | def __init__(self) -> None:
"""Initialize the component registry with an empty, thread-safe store."""
self._components: MutableMapping[str, BidiComponentDefinition] = {}
self._lock = threading.Lock() | function_simple | 1 | {"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "__init__", "class_name": "BidiComponentRegistry", "qualname": "BidiComponentRegistry.__init__", "file_path": "lib/streamlit/components/v2/component_registry.py", "repo_id": "streamlit/streamlit", "has_docstring": true, "runnable_... |
infiniflow/ragflow:test/testcases/test_web_api/test_canvas_app/test_canvas_routes_unit.py:test_test_db_connect_dialect_matrix_unit | # Context:
import inspect
import sys
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager: ...
class _AwaitableValue: ...
class _Args(dict): ...
class _StubHeaders: ...
class _StubResponse: ...
class _DummyRequest: ...
class _DummyRetCode: ...
class _DummyCanvasCategory: ...
class _TaskField... | def test_test_db_connect_dialect_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
class _FakeDB:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.connected = 0
self.closed = 0
def connect(self):
... | test | 1 | {"function_name": "test_test_db_connect_dialect_matrix_unit", "class_name": null, "qualname": "test_test_db_connect_dialect_matrix_unit", "file_path": "test/testcases/test_web_api/test_canvas_app/test_canvas_routes_unit.py", "repo_id": "infiniflow/ragflow", "loc": 174, "tested_modules": ["copy", "functools", "pathlib",... |
ray-project/ray:python/ray/data/tests/datasource/test_uc_datasource.py:TestReadUnityCatalogAPI.test_raises_with_incomplete_credentials | # Context:
import pytest
def static_credential_provider(): ...
def refreshable_credential_provider(): ...
def requests_mocker(): ...
class TestBuildHeaders: ...
class TestRequestWith401Retry: ...
class TestUnityCatalogConnectorInit: ...
class TestUnityCatalogConnector401Retry: ...
class TestReadUnityCatalogAPI:
d... | def test_raises_with_incomplete_credentials(self, url, token):
"""Test that read_unity_catalog raises when credentials are incomplete."""
import ray.data
with pytest.raises(ValueError, match="Either 'credential_provider' or both"):
ray.data.read_unity_catalog(
table=... | test | 0 | {"function_name": "test_raises_with_incomplete_credentials", "class_name": "TestReadUnityCatalogAPI", "qualname": "TestReadUnityCatalogAPI.test_raises_with_incomplete_credentials", "file_path": "python/ray/data/tests/datasource/test_uc_datasource.py", "repo_id": "ray-project/ray", "loc": 10, "tested_modules": ["ray.dat... |
infiniflow/ragflow:test/unit_test/common/test_string_utils.py:TestRemoveRedundantSpaces.test_multiple_punctuation | # Context:
import pytest
from common.string_utils import remove_redundant_spaces, clean_markdown_block
class TestCleanMarkdownBlock: ...
class TestRemoveRedundantSpaces:
def test_remove_spaces_before_commas(self): ...
def test_remove_spaces_before_periods(self): ...
def test_remove_spaces_before_exclamati... | def test_multiple_punctuation(self):
"""Test multiple consecutive punctuation marks"""
input_text = "Wow !! ... Really ??"
expected = "Wow!! ... Really??"
assert remove_redundant_spaces(input_text) == expected | test | 1 | {"function_name": "test_multiple_punctuation", "class_name": "TestRemoveRedundantSpaces", "qualname": "TestRemoveRedundantSpaces.test_multiple_punctuation", "file_path": "test/unit_test/common/test_string_utils.py", "repo_id": "infiniflow/ragflow", "loc": 5, "tested_modules": ["common.string_utils"], "has_docstring": t... |
langflow-ai/langflow:src/lfx/src/lfx/inputs/inputs.py:MultilineInput:class_doc | Write a class-level docstring for `MultilineInput` (inherits from MessageTextInput, AIMixin, MultilineMixin, InputTraceMixin, ToolModeMixin) which has methods: various methods. | Represents a multiline input field.
Attributes:
field_type (SerializableFieldTypes): The type of the field. Defaults to FieldTypes.TEXT.
multiline (CoalesceBool): Indicates whether the input field should support multiple lines. Defaults to True.
password (CoalesceBool): Whether to mask the input as a passw... | documentation | 1 | {"doc_type": "class", "class_name": "MultilineInput", "file_path": "src/lfx/src/lfx/inputs/inputs.py", "repo_id": "langflow-ai/langflow", "char_length": 349, "methods": []} |
exo-explore/exo:bench/eval_tool_calls.py:run_scenario | # Context:
import json
import sys
import httpx
class Scenario: ...
def load_scenarios(path: Path) -> list[Scenario]: ...
class ParsedResponse: ...
class ScenarioResult: ...
def validate_args(args_str: str, required_keys: list[str]) -> tuple[bool, str | None]: ...
def validate_nested_args(args_str: str, array_key: str,... | def run_scenario(
client: httpx.Client,
host: str,
port: int,
model: str,
scenario: Scenario,
api_name: ApiName,
timeout: float,
verbose: bool,
) -> list[ScenarioResult]:
"""Run a single scenario against one API adapter. Returns 1-2 results."""
adapter = ADAPTERS[api_name]
bu... | function_complex | 0 | {"cognitive_complexity": 47, "loc": 181, "code_loc": 152, "docstring_loc": 1, "function_name": "run_scenario", "class_name": null, "qualname": "run_scenario", "file_path": "bench/eval_tool_calls.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "file_runnable"} |
apache/airflow:airflow-core/src/airflow/api_fastapi/core_api/services/public/task_instances.py:BulkTaskInstanceService.handle_bulk_delete | # Context:
from fastapi import HTTPException, Query, status
from sqlalchemy import select, tuple_
from airflow.api_fastapi.core_api.datamodels.common import (
BulkActionNotOnExistence,
BulkActionResponse,
BulkBody,
BulkCreateAction,
BulkDeleteAction,
BulkUpdateAction,
)
from airflow.api_fastapi.... | def handle_bulk_delete(
self, action: BulkDeleteAction[BulkTaskInstanceBody], results: BulkActionResponse
) -> None:
"""Bulk delete task instances."""
# Validate and categorize entities into specific and all map index delete sets
delete_specific_map_index_task_keys, delete_all_map_in... | function_complex | 1 | {"cognitive_complexity": 31, "loc": 82, "code_loc": 63, "docstring_loc": 1, "function_name": "handle_bulk_delete", "class_name": "BulkTaskInstanceService", "qualname": "BulkTaskInstanceService.handle_bulk_delete", "file_path": "airflow-core/src/airflow/api_fastapi/core_api/services/public/task_instances.py", "repo_id":... |
apache/airflow:providers/edge3/tests/unit/edge3/cli/test_definition.py:TestEdgeCliDefinition.test_maintenance_command_args_on | # Context:
class TestEdgeCliDefinition:
def setup_parser(self): ...
def test_edge_cli_commands_count(self): ...
def test_edge_commands_count(self): ...
def test_edge_subcommands_defined(self, command): ...
def test_worker_command_args(self): ...
def test_status_command_args(self): ...
def t... | def test_maintenance_command_args_on(self):
"""Test maintenance command to enable maintenance mode."""
params = [
"edge",
"maintenance",
"on",
"--comments",
"Scheduled maintenance",
"--wait",
]
args = self.arg_parser... | test | 1 | {"function_name": "test_maintenance_command_args_on", "class_name": "TestEdgeCliDefinition", "qualname": "TestEdgeCliDefinition.test_maintenance_command_args_on", "file_path": "providers/edge3/tests/unit/edge3/cli/test_definition.py", "repo_id": "apache/airflow", "loc": 14, "tested_modules": ["__future__", "airflow.cli... |
unclecode/crawl4ai:crawl4ai/table_extraction.py:LLMTableExtraction._merge_chunk_results | # Context:
from typing import Dict, List, Optional, Any, Union, Tuple
class TableExtractionStrategy(ABC): ...
class DefaultTableExtraction(TableExtractionStrategy): ...
class NoTableExtraction(TableExtractionStrategy): ...
class LLMTableExtraction(TableExtractionStrategy):
TABLE_EXTRACTION_PROMPT = """You are a s... | def _merge_chunk_results(self, chunk_results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Merge results from multiple chunks into a single table.
"""
# Sort by chunk index to maintain order
chunk_results.sort(key=lambda x: x.get('chunk_index', 0))
# Filter... | function_simple | 1 | {"cognitive_complexity": 3, "loc": 35, "code_loc": 17, "docstring_loc": 3, "function_name": "_merge_chunk_results", "class_name": "LLMTableExtraction", "qualname": "LLMTableExtraction._merge_chunk_results", "file_path": "crawl4ai/table_extraction.py", "repo_id": "unclecode/crawl4ai", "has_docstring": true, "runnable_le... |
huggingface/transformers:tests/models/lighton_ocr/test_modeling_lighton_ocr.py:LightOnOcrForConditionalGenerationModelTest.test_forward_pass_with_image_sizes | # Context:
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
import torch
class LightOnOcrVisionText2TextModelTester: ...
class LightOnOcrForConditionalGenerationIntegrationTest(unittest... | def test_forward_pass_with_image_sizes(self):
"""
Test that the model correctly handles variable image sizes.
"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_devi... | test | 0 | {"function_name": "test_forward_pass_with_image_sizes", "class_name": "LightOnOcrForConditionalGenerationModelTest", "qualname": "LightOnOcrForConditionalGenerationModelTest.test_forward_pass_with_image_sizes", "file_path": "tests/models/lighton_ocr/test_modeling_lighton_ocr.py", "repo_id": "huggingface/transformers", ... |
commaai/openpilot:selfdrive/ui/widgets/prime.py:PrimeWidget._render_for_non_prime_users | # Context:
import pyray as rl
from openpilot.system.ui.lib.application import gui_app, FontWeight
from openpilot.system.ui.lib.multilang import tr
from openpilot.system.ui.lib.text_measure import measure_text_cached
from openpilot.system.ui.lib.wrap_text import wrap_text
from openpilot.system.ui.widgets.label import gu... | def _render_for_non_prime_users(self, rect: rl.Rectangle):
"""Renders the advertisement for non-Prime users."""
rl.draw_rectangle_rounded(rect, 0.025, 10, self.PRIME_BG_COLOR)
# Layout
x, y = rect.x + 80, rect.y + 90
w = rect.width - 160
# Title
gui_label(rl.Rectangle(x, y, w, 90), tr("Up... | function_simple | 0 | {"cognitive_complexity": 1, "loc": 29, "code_loc": 16, "docstring_loc": 1, "function_name": "_render_for_non_prime_users", "class_name": "PrimeWidget", "qualname": "PrimeWidget._render_for_non_prime_users", "file_path": "selfdrive/ui/widgets/prime.py", "repo_id": "commaai/openpilot", "has_docstring": true, "runnable_le... |
langflow-ai/langflow:src/backend/tests/integration/test_image_providers.py:test_anthropic_vision_api_with_jpeg | # Context:
import os
import pytest
from langflow.utils.image import create_image_content_dict
from tests.api_keys import has_api_key
import anthropic
def sample_image(tmp_path): ...
def sample_jpeg_image(tmp_path): ...
def test_openai_vision_api_real_call(sample_image): ...
def test_openai_vision_api_with_jpeg(sample_... | def test_anthropic_vision_api_with_jpeg(sample_jpeg_image):
"""Test Anthropic Claude API with JPEG image format."""
try:
import anthropic
except ImportError:
pytest.skip("Anthropic package not installed")
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
content_d... | test | 1 | {"function_name": "test_anthropic_vision_api_with_jpeg", "class_name": null, "qualname": "test_anthropic_vision_api_with_jpeg", "file_path": "src/backend/tests/integration/test_image_providers.py", "repo_id": "langflow-ai/langflow", "loc": 33, "tested_modules": ["langflow.utils.image", "tests.api_keys", "tests.api_keys... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.