sample_id string | instruction string | completion string | category string | label int64 | metadata string |
|---|---|---|---|---|---|
ray-project/ray:python/ray/data/tests/test_dataset_limits.py:test_per_task_row_limit_with_custom_readtask | # Context:
import pandas as pd
import pyarrow as pa
from ray.data.block import BlockMetadata
from ray.data.datasource.datasource import Datasource, ReadTask
def test_limit_execution(ray_start_regular): ...
def test_limit(ray_start_regular_shared, lazy): ...
def test_limit_no_redundant_read(ray_start_regular_shared, limit): ...
def test_limit_no_num_row_info(ray_start_regular_shared): ...
def test_per_task_row_limit_basic(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_multiple_blocks_per_task(ray_start_regular_shared): ...
def test_per_task_row_limit_larger_than_data(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_exact_block_boundary(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_various_sizes(ray_start_regular_shared, limit, restore_data_context): ...
def test_per_task_row_limit_with_transformations(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_with_filter(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_readtask_properties(ray_start_regular_shared): ...
def test_per_task_row_limit_edge_cases(ray_start_regular_shared, restore_data_context): ...
# Task:
Write a Python test function `test_per_task_row_limit_with_custom_readtask` to test per-block limiting directly with ReadTask implementation.
Module under test: ray.data.block, ray.data.context, ray.data.datasource.datasource | def test_per_task_row_limit_with_custom_readtask(ray_start_regular_shared):
"""Test per-block limiting directly with ReadTask implementation."""
def read_data_with_limit():
# This simulates a ReadTask that reads 200 rows
return [pd.DataFrame({"id": range(200)})]
# Create ReadTask with per-block limit
task_with_limit = ReadTask(
read_fn=read_data_with_limit,
metadata=BlockMetadata(
num_rows=200, size_bytes=1600, input_files=None, exec_stats=None
),
schema=pa.lib.Schema.from_pandas(pd.DataFrame({"id": []})),
per_task_row_limit=50,
)
# Execute the ReadTask
result_blocks = list(task_with_limit())
# Should get only 50 rows due to per-block limiting
assert len(result_blocks) == 1
assert len(result_blocks[0]) == 50
assert result_blocks[0]["id"].tolist() == list(range(50)) | test | 0 | {"function_name": "test_per_task_row_limit_with_custom_readtask", "class_name": null, "qualname": "test_per_task_row_limit_with_custom_readtask", "file_path": "python/ray/data/tests/test_dataset_limits.py", "repo_id": "ray-project/ray", "loc": 24, "tested_modules": ["ray.data.block", "ray.data.context", "ray.data.datasource.datasource", "ray.data.tests.conftest", "ray.data.tests.conftest"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/openai/test_openai.py:test_extra_arguments_are_passed_to_openai_completion | # Context:
from unittest.mock import patch, MagicMock
from crewai.llm import LLM
def test_openai_completion_is_used_when_openai_provider(): ...
def test_openai_completion_is_used_when_no_provider_prefix(): ...
def test_openai_is_default_provider_without_explicit_llm_set_on_agent(): ...
def test_openai_completion_module_is_imported(): ...
def test_native_openai_raises_error_when_initialization_fails(): ...
def test_openai_completion_initialization_parameters(): ...
def test_openai_completion_call(): ...
def test_openai_completion_called_during_crew_execution(): ...
def test_openai_completion_call_arguments(): ...
def test_multiple_openai_calls_in_crew(): ...
def test_openai_completion_with_tools(): ...
def test_openai_completion_call_returns_usage_metrics(): ...
def test_openai_raises_error_when_model_not_supported(): ...
def test_openai_client_setup_with_extra_arguments(): ...
def test_openai_get_client_params_with_api_base(): ...
def test_openai_get_client_params_with_base_url_priority(): ...
def test_openai_get_client_params_with_env_var(): ...
def test_openai_get_client_params_priority_order(): ...
def test_openai_get_client_params_no_base_url(monkeypatch): ...
def test_openai_streaming_with_response_model(): ...
def test_openai_response_format_with_pydantic_model(): ...
def test_openai_response_format_with_dict(): ...
def test_openai_response_format_none(): ...
def test_openai_streaming_returns_usage_metrics(): ...
def test_openai_responses_api_initialization(): ...
def test_openai_responses_api_default_is_completions(): ...
def test_openai_responses_api_prepare_params(): ...
def test_openai_responses_api_tool_format(): ...
def test_openai_completions_api_tool_format(): ...
def test_openai_responses_api_structured_output_format(): ...
def test_openai_responses_api_with_previous_response_id(): ...
def test_openai_responses_api_call_routing(): ...
def test_openai_responses_api_basic_call(): ...
def test_openai_responses_api_with_structured_output(): ...
def test_openai_responses_api_with_system_message_extraction(): ...
def test_openai_responses_api_streaming(): ...
def test_openai_responses_api_returns_usage_metrics(): ...
def test_openai_responses_api_builtin_tools_param(): ...
def test_openai_responses_api_builtin_tools_with_custom_tools(): ...
def test_openai_responses_api_with_web_search(): ...
def test_responses_api_result_dataclass(): ...
def test_responses_api_result_has_tool_outputs(): ...
def test_responses_api_result_has_reasoning(): ...
def test_openai_responses_api_parse_tool_outputs_param(): ...
def test_openai_responses_api_parse_tool_outputs_default_false(): ...
def test_openai_responses_api_with_parse_tool_outputs(): ...
def test_openai_responses_api_parse_tool_outputs_basic_call(): ...
def test_openai_responses_api_auto_chain_param(): ...
def test_openai_responses_api_auto_chain_default_false(): ...
def test_openai_responses_api_last_response_id_property(): ...
def test_openai_responses_api_reset_chain(): ...
def test_openai_responses_api_auto_chain_prepare_params(): ...
def test_openai_responses_api_explicit_previous_response_id_takes_precedence(): ...
def test_openai_responses_api_auto_chain_disabled_no_tracking(): ...
def test_openai_responses_api_auto_chain_integration(): ...
def test_openai_responses_api_auto_chain_with_reset(): ...
def test_openai_responses_api_auto_chain_reasoning_param(): ...
def test_openai_responses_api_auto_chain_reasoning_default_false(): ...
def test_openai_responses_api_last_reasoning_items_property(): ...
def test_openai_responses_api_reset_reasoning_chain(): ...
def test_openai_responses_api_auto_chain_reasoning_adds_include(): ...
def test_openai_responses_api_auto_chain_reasoning_preserves_existing_include(): ...
def test_openai_responses_api_auto_chain_reasoning_no_duplicate_include(): ...
def test_openai_responses_api_auto_chain_reasoning_prepends_to_input(): ...
def test_openai_responses_api_auto_chain_reasoning_disabled_no_include(): ...
def test_openai_responses_api_auto_chain_reasoning_disabled_no_prepend(): ...
def test_openai_responses_api_both_auto_chains_work_together(): ...
def test_openai_agent_kickoff_structured_output_without_tools(): ...
def test_openai_agent_kickoff_structured_output_with_tools(): ...
def test_openai_stop_words_not_applied_to_structured_output(): ...
def test_openai_stop_words_still_applied_to_regular_responses(): ...
def test_openai_structured_output_preserves_json_with_stop_word_patterns(): ...
def test_openai_completions_cached_prompt_tokens(): ...
def test_openai_responses_api_cached_prompt_tokens(): ...
def test_openai_streaming_cached_prompt_tokens(): ...
def test_openai_completions_cached_prompt_tokens_with_tools(): ...
def test_openai_responses_api_cached_prompt_tokens_with_tools(): ...
def test_openai_streaming_returns_tool_calls_without_available_functions(): ...
async def test_openai_async_streaming_returns_tool_calls_without_available_functions(): ...
# Task:
Write a Python test function `test_extra_arguments_are_passed_to_openai_completion` to test that extra arguments are passed to OpenAICompletion.
Module under test: typing, crewai.llm, crewai.llms.providers.openai.completion | def test_extra_arguments_are_passed_to_openai_completion():
"""
Test that extra arguments are passed to OpenAICompletion
"""
llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=1000, top_p=0.5, max_retries=3)
with patch.object(llm.client.chat.completions, 'create') as mock_create:
mock_create.return_value = MagicMock(
choices=[MagicMock(message=MagicMock(content="test response", tool_calls=None))],
usage=MagicMock(prompt_tokens=10, completion_tokens=20, total_tokens=30)
)
llm.call("Hello, how are you?")
assert mock_create.called
call_kwargs = mock_create.call_args[1]
assert call_kwargs['temperature'] == 0.7
assert call_kwargs['max_tokens'] == 1000
assert call_kwargs['top_p'] == 0.5
assert call_kwargs['model'] == 'gpt-4o' | test | 0 | {"function_name": "test_extra_arguments_are_passed_to_openai_completion", "class_name": null, "qualname": "test_extra_arguments_are_passed_to_openai_completion", "file_path": "lib/crewai/tests/llms/openai/test_openai.py", "repo_id": "crewAIInc/crewAI", "loc": 21, "tested_modules": ["typing", "crewai.llm", "crewai.llms.providers.openai.completion", "crewai.crew", "crewai.agent"], "has_docstring": true, "runnable_level": "project_runnable"} |
Zie619/n8n-workflows:src/user_management.py:update_user | # Context:
from fastapi import FastAPI, HTTPException, Depends, status
class User(BaseModel): ...
class UserCreate(BaseModel): ...
class UserLogin(BaseModel): ...
class UserUpdate(BaseModel): ...
class Token(BaseModel): ...
class UserManager: ...
def get_current_user(credentials: HTTPAuthorizationCredentials) -> User: ...
def require_admin(current_user: User) -> User: ...
async def register_user(user_data: UserCreate): ...
async def login_user(login_data: UserLogin): ...
async def get_current_user_info(current_user: User): ...
async def get_all_users(admin: User): ...
async def get_user(user_id: int, current_user: User): ...
async def delete_user(user_id: int, admin: User): ...
async def get_auth_dashboard(): ...
# Task:
Write a Python async function `update_user` to update user data.
Parameters: user_id: int, update_data: UserUpdate, current_user: User | async def update_user(
user_id: int,
update_data: UserUpdate,
current_user: User = Depends(get_current_user),
):
"""Update user data."""
# Users can only update their own profile unless they're admin
if current_user.id != user_id and current_user.role != "admin":
raise HTTPException(status_code=403, detail="Access denied")
# Non-admin users cannot change roles
if current_user.role != "admin" and update_data.role is not None:
raise HTTPException(status_code=403, detail="Cannot change role")
user = user_manager.update_user(user_id, update_data)
if user is None:
raise HTTPException(status_code=404, detail="User not found")
return user | function_simple | 0 | {"cognitive_complexity": 5, "loc": 19, "code_loc": 8, "docstring_loc": 1, "function_name": "update_user", "class_name": null, "qualname": "update_user", "file_path": "src/user_management.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "file_runnable"} |
huggingface/transformers:tests/models/dia/test_modeling_dia.py:DiaModelTest.test_sdpa_can_dispatch_composite_models | # Context:
import tempfile
from transformers import (
DiaForConditionalGeneration,
DiaModel,
DiaProcessor,
PreTrainedConfig,
PreTrainedModel,
)
class DiaModelTester: ...
class DiaForConditionalGenerationIntegrationTest(unittest.TestCase): ...
class DiaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DiaModel, DiaForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (DiaForConditionalGeneration,)
pipeline_model_mapping = {}
test_resize_embeddings = False
is_encoder_decoder = True
_is_composite = True
def setUp(self): ...
def prepare_config_and_inputs_for_generate(self, batch_size): ...
def skip_non_greedy_generate(self): ...
def _prepare_for_class(self, inputs_dict, model_class, return_labels): ...
def test_config(self): ...
def test_model_forward(self): ...
def test_encoder_decoder_model_standalone(self): ...
def _check_logits(self, batch_size, logits, config): ...
def _check_attentions_for_generate(self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values): ...
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length): ...
def _check_hidden_states_for_generate(self, batch_size, hidden_states, prompt_length, output_length, config, use_cache): ...
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, prompt_length): ...
def _check_scores(self, batch_size, scores, generated_length, config): ...
def test_generate_continue_from_past_key_values(self): ...
def test_prepare_inputs_for_generation_kwargs_forwards(self): ...
def test_hidden_states_output(self): ...
def test_model_get_set_embeddings(self): ...
def test_multi_gpu_data_parallel_forward(self): ...
# Task:
Write a Python test method `test_sdpa_can_dispatch_composite_models` in test class `DiaModelTest` to overwritten as it relies on hardcoded namings atm - checking for our case here specifically.
Module under test: transformers.models.dia, transformers.testing_utils, transformers.utils | def test_sdpa_can_dispatch_composite_models(self):
"""
Overwritten as it relies on hardcoded namings atm - checking for our case here specifically
"""
for model_class in self.all_model_classes:
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
sub_models_supporting_sdpa = [
(module._supports_sdpa or module._supports_attention_backend)
for name, module in model.named_modules()
if isinstance(module, PreTrainedModel) and name != ""
]
supports_sdpa_all_modules = (
all(sub_models_supporting_sdpa)
if len(sub_models_supporting_sdpa) > 0
else (model._supports_sdpa or model._supports_attention_backend)
)
if not supports_sdpa_all_modules:
with self.assertRaises(ValueError):
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
else:
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
for key in model_sdpa.config:
if isinstance(getattr(model_sdpa.config, key), PreTrainedConfig):
sub_config = getattr(model_sdpa.config, key)
self.assertTrue(sub_config._attn_implementation == "sdpa") | test | 0 | {"function_name": "test_sdpa_can_dispatch_composite_models", "class_name": "DiaModelTest", "qualname": "DiaModelTest.test_sdpa_can_dispatch_composite_models", "file_path": "tests/models/dia/test_modeling_dia.py", "repo_id": "huggingface/transformers", "loc": 32, "tested_modules": ["transformers.models.dia", "transformers.testing_utils", "transformers.utils", "transformers.utils.import_utils", "generation.test_utils"], "has_docstring": true, "runnable_level": "file_runnable"} |
Zie619/n8n-workflows:src/performance_monitor.py:PerformanceMonitor._check_alerts | # Context:
class PerformanceMetrics(BaseModel): ...
class Alert(BaseModel): ...
async def get_current_metrics(): ...
async def get_historical_metrics(hours: int): ...
async def get_alerts(): ...
async def resolve_alert(alert_id: str): ...
async def websocket_endpoint(websocket: WebSocket): ...
async def get_monitoring_dashboard(): ...
class PerformanceMonitor:
def __init__(self, db_path: str = "workflows.db"):
self.db_path = db_path
self.metrics_history = []
self.alerts = []
self.websocket_connections = []
self.monitoring_active = False
self.metrics_queue = queue.Queue()
def start_monitoring(self): ...
def _monitor_loop(self): ...
def _collect_metrics(self) -> PerformanceMetrics: ...
def _measure_api_time(self, endpoint: str) -> float: ...
def _get_workflow_executions(self) -> int: ...
def _calculate_error_rate(self) -> float: ...
def _create_alert(self, alert_type: str, severity: str, message: str): ...
def _broadcast_metrics(self, metrics: PerformanceMetrics): ...
def _broadcast_alert(self, alert: Alert): ...
def _broadcast_to_websockets(self, message: dict): ...
def get_metrics_summary(self) -> Dict[str, Any]: ...
def get_historical_metrics(self, hours: int) -> List[Dict]: ...
def resolve_alert(self, alert_id: str) -> bool: ...
# Task:
Write a Python method `_check_alerts` for the class `PerformanceMonitor` to check metrics against alert thresholds.
Parameters: metrics: PerformanceMetrics | def _check_alerts(self, metrics: PerformanceMetrics):
"""Check metrics against alert thresholds."""
# CPU alert
if metrics.cpu_usage > 80:
self._create_alert(
"high_cpu", "warning", f"High CPU usage: {metrics.cpu_usage}%"
)
# Memory alert
if metrics.memory_usage > 85:
self._create_alert(
"high_memory", "warning", f"High memory usage: {metrics.memory_usage}%"
)
# Disk alert
if metrics.disk_usage > 90:
self._create_alert(
"high_disk", "critical", f"High disk usage: {metrics.disk_usage}%"
)
# API response time alert
for endpoint, response_time in metrics.api_response_times.items():
if response_time > 1000: # 1 second
self._create_alert(
"slow_api",
"warning",
f"Slow API response: {endpoint} ({response_time}ms)",
)
# Error rate alert
if metrics.error_rate > 10:
self._create_alert(
"high_error_rate", "critical", f"High error rate: {metrics.error_rate}%"
) | function_complex | 0 | {"cognitive_complexity": 7, "loc": 34, "code_loc": 23, "docstring_loc": 1, "function_name": "_check_alerts", "class_name": "PerformanceMonitor", "qualname": "PerformanceMonitor._check_alerts", "file_path": "src/performance_monitor.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "file_runnable"} |
vllm-project/vllm:tests/lora/test_gptoss_tp.py:test_gpt_oss_lora | # Context:
import pytest
import vllm
def generate_and_test(llm: vllm.LLM, lora_path: str, lora_id: int) -> None: ...
def test_gpt_oss_lora_tp2(monkeypatch: pytest.MonkeyPatch, gptoss20b_lora_files, fully_sharded_loras, mxfp4_use_marlin): ...
# Task:
Write a Python test function `test_gpt_oss_lora` to verify the behavior of `gpt_oss_lora`.
Module under test: vllm.lora.request, utils | def test_gpt_oss_lora(
monkeypatch: pytest.MonkeyPatch, gptoss20b_lora_files, mxfp4_use_marlin
):
with monkeypatch.context() as m:
m.setenv("VLLM_MXFP4_USE_MARLIN", "1" if mxfp4_use_marlin else "0")
llm = vllm.LLM(
MODEL_PATH,
max_model_len=1024,
enable_lora=True,
max_loras=4,
max_lora_rank=8,
max_num_seqs=2,
max_num_batched_tokens=2048,
compilation_config=vllm.config.CompilationConfig( # Avoid OOM
cudagraph_specialize_lora=False,
),
)
generate_and_test(llm, gptoss20b_lora_files, lora_id=1)
generate_and_test(llm, gptoss20b_lora_files, lora_id=2) | test | 1 | {"function_name": "test_gpt_oss_lora", "class_name": null, "qualname": "test_gpt_oss_lora", "file_path": "tests/lora/test_gptoss_tp.py", "repo_id": "vllm-project/vllm", "loc": 20, "tested_modules": ["vllm.lora.request", "utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/utils/torch_utils.py:guard_cuda_initialization | # Context:
import contextlib
import os
from vllm.platforms import current_platform
def is_strictly_contiguous(t: torch.Tensor) -> bool: ...
def set_default_torch_dtype(dtype: torch.dtype): ...
def set_default_torch_num_threads(num_threads: int | None): ...
def get_dtype_size(dtype: torch.dtype) -> int: ...
def _get_precision_level(dtype: torch.dtype) -> int: ...
def is_lossless_cast(src_dtype: torch.dtype, tgt_dtype: torch.dtype): ...
def common_broadcastable_dtype(dtypes: Collection[torch.dtype]): ...
def _generate_random_fp8(tensor: torch.Tensor, low: float, high: float) -> None: ...
def get_kv_cache_torch_dtype(cache_dtype: str | torch.dtype | None, model_dtype: str | torch.dtype | None) -> torch.dtype: ...
def get_kv_cache_quant_algo_string(quant_cfg: dict[str, Any]) -> str | None: ...
def get_kv_cache_quant_algo_dtype(quant_cfg: dict[str, Any]) -> torch.dtype | None: ...
def resolve_kv_cache_dtype_string(kv_cache_dtype: str, model_config: ModelConfig) -> str: ...
def kv_cache_dtype_str_to_dtype(kv_cache_dtype: str, model_config: ModelConfig) -> torch.dtype: ...
def set_random_seed(seed: int | None) -> None: ...
def create_kv_caches_with_random_flash(num_blocks: int, block_size: int, num_layers: int, num_heads: int, head_size: int, cache_dtype: str | torch.dtype | None, model_dtype: str | torch.dtype | None, seed: int | None, device: str | None, cache_layout: str | None) -> tuple[list[torch.Tensor], list[torch.Tensor]]: ...
def create_kv_caches_with_random(num_blocks: int, block_size: int, num_layers: int, num_heads: int, head_size: int, cache_dtype: str | torch.dtype | None, model_dtype: str | torch.dtype | None, seed: int | None, device: str | None) -> tuple[list[torch.Tensor], list[torch.Tensor]]: ...
def async_tensor_h2d(data: list, dtype: torch.dtype, target_device: str | torch.device, pin_memory: bool) -> torch.Tensor: ...
def make_ndarray_with_pad(x: list[list[T]], pad: T, dtype: npt.DTypeLike, max_len: int | None) -> npt.NDArray: ...
def make_tensor_with_pad(x: list[list[T]], pad: T, dtype: torch.dtype, max_len: int | None, device: str | torch.device | None, pin_memory: bool) -> torch.Tensor: ...
def _patched_set_stream(stream: torch.cuda.Stream) -> None: ...
class _StreamPlaceholder: ...
def current_stream() -> torch.cuda.Stream: ...
def aux_stream() -> torch.cuda.Stream | None: ...
def _cuda_device_count_stateless(cuda_visible_devices: str | None) -> int: ...
def cuda_device_count_stateless() -> int: ...
def weak_ref_tensor(tensor: Any) -> Any: ...
def weak_ref_tensors(tensors: torch.Tensor | list[torch.Tensor] | tuple[torch.Tensor] | IntermediateTensors) -> torch.Tensor | list[Any] | tuple[Any] | Any: ...
def get_accelerator_view_from_cpu_tensor(cpu_tensor: torch.Tensor) -> torch.Tensor: ...
def _is_torch_equal_or_newer(torch_version: str, target: str) -> bool: ...
def is_torch_equal_or_newer(target: str) -> bool: ...
def _is_torch_equal(target: str) -> bool: ...
def is_torch_equal(target: str) -> bool: ...
class ModuleName(OpaqueBase): ...
def supports_xccl() -> bool: ...
def supports_xpu_graph() -> bool: ...
def direct_register_custom_op(op_name: str, op_func: Callable, mutates_args: list[str] | None, fake_impl: Callable | None, target_lib: Library | None, dispatch_key: str | None, tags: tuple[torch.Tag, ...]): ...
# Task:
Write a Python function `guard_cuda_initialization` to avoid unexpected CUDA initialization. | def guard_cuda_initialization():
"""Avoid unexpected CUDA initialization."""
from vllm.platforms import current_platform
if not current_platform.is_cuda():
yield
return
old_value = os.environ.get("CUDA_VISIBLE_DEVICES")
os.environ["CUDA_VISIBLE_DEVICES"] = ""
try:
yield
except Exception as e:
if "No CUDA GPUs are available" in str(e):
err_msg = "CUDA initialization is blocked."
else:
err_msg = str(e)
raise RuntimeError(err_msg) from e
finally:
if old_value is None:
del os.environ["CUDA_VISIBLE_DEVICES"]
else:
os.environ["CUDA_VISIBLE_DEVICES"] = old_value | function_complex | 1 | {"cognitive_complexity": 8, "loc": 23, "code_loc": 19, "docstring_loc": 1, "function_name": "guard_cuda_initialization", "class_name": null, "qualname": "guard_cuda_initialization", "file_path": "vllm/utils/torch_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_call_limit.py:test_thread_limit_with_create_agent | # Context:
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langgraph.checkpoint.memory import InMemorySaver
from langchain.agents.factory import create_agent
from langchain.agents.middleware.model_call_limit import (
ModelCallLimitExceededError,
ModelCallLimitMiddleware,
ModelCallLimitState,
)
from tests.unit_tests.agents.model import FakeToolCallingModel
def simple_tool(value: str) -> str: ...
def test_middleware_unit_functionality() -> None: ...
def test_run_limit_with_create_agent() -> None: ...
def test_middleware_initialization_validation() -> None: ...
def test_exception_error_message() -> None: ...
def test_run_limit_resets_between_invocations() -> None: ...
# Task:
Write a Python test function `test_thread_limit_with_create_agent` to test that thread limits work correctly with create_agent.
Module under test: langchain_core.messages, langchain_core.tools, langgraph.checkpoint.memory | def test_thread_limit_with_create_agent() -> None:
"""Test that thread limits work correctly with create_agent."""
model = FakeToolCallingModel()
# Set thread limit to 1 (should be exceeded after 1 call)
agent = create_agent(
model=model,
tools=[simple_tool],
middleware=[ModelCallLimitMiddleware(thread_limit=1)],
checkpointer=InMemorySaver(),
)
# First invocation should work - 1 model call, within thread limit
result = agent.invoke(
{"messages": [HumanMessage("Hello")]}, {"configurable": {"thread_id": "thread1"}}
)
# Should complete successfully with 1 model call
assert "messages" in result
assert len(result["messages"]) == 2 # Human + AI messages
# Second invocation in same thread should hit thread limit
# The agent should jump to end after detecting the limit
result2 = agent.invoke(
{"messages": [HumanMessage("Hello again")]}, {"configurable": {"thread_id": "thread1"}}
)
assert "messages" in result2
# The agent should have detected the limit and jumped to end with a limit exceeded message
# So we should have: previous messages + new human message + limit exceeded AI message
assert len(result2["messages"]) == 4 # Previous Human + AI + New Human + Limit AI
assert isinstance(result2["messages"][0], HumanMessage) # First human
assert isinstance(result2["messages"][1], AIMessage) # First AI response
assert isinstance(result2["messages"][2], HumanMessage) # Second human
assert isinstance(result2["messages"][3], AIMessage) # Limit exceeded message
assert "thread limit" in result2["messages"][3].content | test | 1 | {"function_name": "test_thread_limit_with_create_agent", "class_name": null, "qualname": "test_thread_limit_with_create_agent", "file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_call_limit.py", "repo_id": "langchain-ai/langchain", "loc": 36, "tested_modules": ["langchain_core.messages", "langchain_core.tools", "langgraph.checkpoint.memory", "langgraph.runtime", "langchain.agents.factory"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/base_alibaba.py:AlibabaBaseHook._get_field | # Context:
from typing import Any, NamedTuple
class AccessKeyCredentials(NamedTuple): ...
class AlibabaBaseHook(BaseHook):
conn_name_attr = "alibabacloud_conn_id"
default_conn_name = "alibabacloud_default"
conn_type = "alibaba_cloud"
hook_name = "Alibaba Cloud"
def __init__(
self,
alibabacloud_conn_id: str = "alibabacloud_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.alibaba_cloud_conn_id = alibabacloud_conn_id
self.extras: dict = self.get_connection(self.alibaba_cloud_conn_id).extra_dejson
def get_connection_form_widgets(cls) -> dict[str, Any]: ...
def get_ui_field_behaviour(cls) -> dict[str, Any]: ...
def get_access_key_credential(self) -> AccessKeyCredentials: ...
# Task:
Write a Python method `_get_field` for the class `AlibabaBaseHook` to fetch a field from extras, and returns it.
Parameters: field_name: str, default: Any
Returns: Any | def _get_field(self, field_name: str, default: Any = None) -> Any:
"""Fetch a field from extras, and returns it."""
value = self.extras.get(field_name)
return value if value is not None else default | function_simple | 1 | {"cognitive_complexity": 1, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "_get_field", "class_name": "AlibabaBaseHook", "qualname": "AlibabaBaseHook._get_field", "file_path": "providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/base_alibaba.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "class_runnable"} |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-solr/llama_index/readers/solr/base.py:SolrReader.load_data | # Context:
from typing import Any, Optional
from llama_index.core.schema import Document
class SolrReader(BasePydanticReader):
def __init__(
self,
endpoint: str,
):
"""Initialize with parameters."""
super().__init__(endpoint=endpoint)
self._client = pysolr.Solr(endpoint)
# Task:
Write a Python method `load_data` for the class `SolrReader` to read data from the Solr index. At least one field argument must be specified.
Parameters: query: dict[str, Any], field: str, id_field: str, metadata_fields: Optional[list[str]], embedding: Optional[str]
Returns: list[Document] | def load_data(
self,
query: dict[str, Any],
field: str,
id_field: str = "id",
metadata_fields: Optional[list[str]] = None,
embedding: Optional[str] = None,
) -> list[Document]:
r"""
Read data from the Solr index. At least one field argument must be specified.
Args:
query (dict): The Solr query parameters.
- "q" is required.
- "rows" should be specified or will default to 10 by Solr.
- If "fl" is provided, it is respected exactly as given.
If "fl" is NOT provided, a default `fl` is constructed from
{id_field, field, embedding?, metadata_fields?}.
field (str): Field in Solr to retrieve as document text.
id_field (str): Field in Solr to retrieve as the document identifier. Defaults to "id".
metadata_fields (list[str], optional): Fields to include as metadata. Defaults to None.
embedding (str, optional): Field to use for embeddings. Defaults to None.
Raises:
ValueError: If the HTTP call to Solr fails.
Returns:
list[Document]: A list of retrieved documents where field is populated.
"""
if "q" not in query:
raise ValueError("Query parameters must include a 'q' field for the query.")
fl_default = {}
if "fl" not in query:
fields = [id_field, field]
if embedding:
fields.append(embedding)
if metadata_fields:
fields.extend(metadata_fields)
fl_default = {"fl": ",".join(fields)}
try:
query_params = {
**query,
**fl_default,
}
results = self._client.search(**query_params)
except Exception as e: # pragma: no cover
raise ValueError(f"Failed to query Solr endpoint: {e!s}") from e
documents: list[Document] = []
for doc in results.docs:
if field not in doc:
continue
doc_kwargs: dict[str, Any] = {
"id_": str(doc[id_field]),
"text": doc[field],
**({"embedding": doc.get(embedding)} if embedding else {}),
"metadata": {
metadata_field: doc[metadata_field]
for metadata_field in (metadata_fields or [])
if metadata_field in doc
},
}
documents.append(Document(**doc_kwargs))
return documents | function_complex | 1 | {"cognitive_complexity": 12, "loc": 68, "code_loc": 34, "docstring_loc": 22, "function_name": "load_data", "class_name": "SolrReader", "qualname": "SolrReader.load_data", "file_path": "llama-index-integrations/readers/llama-index-readers-solr/llama_index/readers/solr/base.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_human_approval.py:TestToolHookHumanInput.test_request_human_input_returns_user_response | # Context:
from unittest.mock import Mock, patch
from crewai.hooks.tool_hooks import ToolCallHookContext
def mock_executor(): ...
def mock_tool(): ...
def mock_agent(): ...
def mock_task(): ...
class TestLLMHookHumanInput: ...
class TestApprovalHookIntegration: ...
class TestCostControlApproval: ...
class TestToolHookHumanInput:
def test_request_human_input_handles_empty_input(self, mock_event_listener, mock_input, mock_tool): ...
def test_request_human_input_pauses_and_resumes(self, mock_event_listener, mock_input, mock_tool): ...
def test_request_human_input_resumes_on_keyboard_interrupt(self, mock_event_listener, mock_input, mock_tool): ...
# Task:
Write a Python test method `test_request_human_input_returns_user_response` in test class `TestToolHookHumanInput` to test that request_human_input returns the user's input.
Module under test: __future__, crewai.hooks.llm_hooks, crewai.hooks.tool_hooks | def test_request_human_input_returns_user_response(
self, mock_event_listener, mock_input, mock_tool, mock_agent, mock_task
):
"""Test that request_human_input returns the user's input."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={"arg": "value"},
tool=mock_tool,
agent=mock_agent,
task=mock_task,
)
response = context.request_human_input(
prompt="Approve this tool?", default_message="Type 'approve':"
)
assert response == "approve"
mock_input.assert_called_once() | test | 0 | {"function_name": "test_request_human_input_returns_user_response", "class_name": "TestToolHookHumanInput", "qualname": "TestToolHookHumanInput.test_request_human_input_returns_user_response", "file_path": "lib/crewai/tests/hooks/test_human_approval.py", "repo_id": "crewAIInc/crewAI", "loc": 21, "tested_modules": ["__future__", "crewai.hooks.llm_hooks", "crewai.hooks.tool_hooks"], "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-anthropic/tests/test_anthropic_utils.py:TestAnthropicPromptCachingSupport.test_claude_3_5_sonnet_supported | # Context:
from llama_index.llms.anthropic.utils import (
is_anthropic_prompt_caching_supported_model,
ANTHROPIC_PROMPT_CACHING_SUPPORTED_MODELS,
update_tool_calls,
is_anthropic_structured_output_supported,
STRUCTURED_OUTPUT_SUPPORT,
messages_to_anthropic_beta_messages,
)
def test_update_tool_calls() -> None: ...
def test_messages_to_anthropic_beta_messages() -> None: ...
def test_is_anthropic_structured_output_supported() -> None: ...
class TestAnthropicPromptCachingSupport:
def test_claude_4_5_opus_supported(self): ...
def test_claude_4_1_opus_supported(self): ...
def test_claude_4_opus_supported(self): ...
def test_claude_4_5_sonnet_supported(self): ...
def test_claude_4_sonnet_supported(self): ...
def test_claude_3_7_sonnet_supported(self): ...
def test_claude_4_5_haiku_supported(self): ...
def test_claude_3_5_haiku_supported(self): ...
def test_claude_3_haiku_supported(self): ...
def test_claude_3_opus_deprecated_but_supported(self): ...
def test_claude_2_not_supported(self): ...
def test_claude_instant_not_supported(self): ...
def test_invalid_model_not_supported(self): ...
def test_constant_contains_all_supported_models(self): ...
def test_case_sensitivity(self): ...
# Task:
Write a Python test method `test_claude_3_5_sonnet_supported` in test class `TestAnthropicPromptCachingSupport` to test Claude 3.5 Sonnet models support prompt caching.
Module under test: llama_index.llms.anthropic.utils, llama_index.core.base.llms.types, anthropic.types.beta | def test_claude_3_5_sonnet_supported(self):
"""Test Claude 3.5 Sonnet models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-3-5-sonnet-20241022")
assert is_anthropic_prompt_caching_supported_model("claude-3-5-sonnet-20240620")
assert is_anthropic_prompt_caching_supported_model("claude-3-5-sonnet-latest") | test | 1 | {"function_name": "test_claude_3_5_sonnet_supported", "class_name": "TestAnthropicPromptCachingSupport", "qualname": "TestAnthropicPromptCachingSupport.test_claude_3_5_sonnet_supported", "file_path": "llama-index-integrations/llms/llama-index-llms-anthropic/tests/test_anthropic_utils.py", "repo_id": "run-llama/llama_index", "loc": 5, "tested_modules": ["llama_index.llms.anthropic.utils", "llama_index.core.base.llms.types", "anthropic.types.beta"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/test_human_feedback_decorator.py:TestHumanFeedbackResult.test_result_creation | # Context:
from datetime import datetime
from crewai.flow.human_feedback import (
HumanFeedbackConfig,
HumanFeedbackResult,
)
class TestHumanFeedbackValidation: ...
class TestHumanFeedbackConfig: ...
class TestDecoratorAttributePreservation: ...
class TestAsyncSupport: ...
class TestHumanFeedbackExecution: ...
class TestHumanFeedbackHistory: ...
class TestCollapseToOutcome: ...
class TestHumanFeedbackLearn: ...
class TestHumanFeedbackResult:
def test_result_with_metadata(self): ...
# Task:
Write a Python test method `test_result_creation` in test class `TestHumanFeedbackResult` to test HumanFeedbackResult can be created with all fields.
Module under test: __future__, datetime, typing | def test_result_creation(self):
"""Test HumanFeedbackResult can be created with all fields."""
result = HumanFeedbackResult(
output={"title": "Test"},
feedback="Looks good",
outcome="approved",
method_name="test_method",
)
assert result.output == {"title": "Test"}
assert result.feedback == "Looks good"
assert result.outcome == "approved"
assert result.method_name == "test_method"
assert isinstance(result.timestamp, datetime)
assert result.metadata == {} | test | 0 | {"function_name": "test_result_creation", "class_name": "TestHumanFeedbackResult", "qualname": "TestHumanFeedbackResult.test_result_creation", "file_path": "lib/crewai/tests/test_human_feedback_decorator.py", "repo_id": "crewAIInc/crewAI", "loc": 15, "tested_modules": ["__future__", "datetime", "typing", "crewai.flow", "crewai.flow.human_feedback"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/base_llm.py:BaseLLM._apply_stop_words | # Context:
import logging
def llm_call_context() -> Generator[str, None, None]: ...
def get_current_call_id() -> str: ...
class BaseLLM(ABC):
def __init__(
self,
model: str,
temperature: float | None = None,
api_key: str | None = None,
base_url: str | None = None,
provider: str | None = None,
prefer_upload: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the BaseLLM with default attributes.
Args:
model: The model identifier/name.
temperature: Optional temperature setting for response generation.
stop: Optional list of stop sequences for generation.
prefer_upload: Whether to prefer file upload over inline base64.
**kwargs: Additional provider-specific parameters.
"""
if not model:
raise ValueError("Model name is required and cannot be empty")
self.model = model
self.temperature = temperature
self.api_key = api_key
self.base_url = base_url
self.prefer_upload = prefer_upload
# Store additional parameters for provider-specific use
self.additional_params = kwargs
self._provider = provider or "openai"
stop = kwargs.pop("stop", None)
if stop is None:
self.stop: list[str] = []
elif isinstance(stop, str):
self.stop = [stop]
elif isinstance(stop, list):
self.stop = stop
else:
self.stop = []
self._token_usage = {
"total_tokens": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"successful_requests": 0,
"cached_prompt_tokens": 0,
}
def provider(self) -> str: ...
def provider(self, value: str) -> None: ...
def call(self, messages: str | list[LLMMessage], tools: list[dict[str, BaseTool]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Task | None, from_agent: Agent | None, response_model: type[BaseModel] | None) -> str | Any: ...
async def acall(self, messages: str | list[LLMMessage], tools: list[dict[str, BaseTool]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Task | None, from_agent: Agent | None, response_model: type[BaseModel] | None) -> str | Any: ...
def _convert_tools_for_interference(self, tools: list[dict[str, BaseTool]]) -> list[dict[str, BaseTool]]: ...
def supports_stop_words(self) -> bool: ...
def _supports_stop_words_implementation(self) -> bool: ...
def get_context_window_size(self) -> int: ...
def supports_multimodal(self) -> bool: ...
def format_text_content(self, text: str) -> dict[str, Any]: ...
def get_file_uploader(self) -> Any: ...
def _emit_call_started_event(self, messages: str | list[LLMMessage], tools: list[dict[str, BaseTool]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Task | None, from_agent: Agent | None) -> None: ...
def _emit_call_completed_event(self, response: Any, call_type: LLMCallType, from_task: Task | None, from_agent: Agent | None, messages: str | list[LLMMessage] | None) -> None: ...
def _emit_call_failed_event(self, error: str, from_task: Task | None, from_agent: Agent | None) -> None: ...
def _emit_stream_chunk_event(self, chunk: str, from_task: Task | None, from_agent: Agent | None, tool_call: dict[str, Any] | None, call_type: LLMCallType | None, response_id: str | None) -> None: ...
def _handle_tool_execution(self, function_name: str, function_args: dict[str, Any], available_functions: dict[str, Any], from_task: Task | None, from_agent: Agent | None) -> str | None: ...
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]: ...
def _process_message_files(self, messages: list[LLMMessage]) -> list[LLMMessage]: ...
def _validate_structured_output(response: str, response_format: type[BaseModel] | None) -> str | BaseModel: ...
def _extract_provider(model: str) -> str: ...
def _track_token_usage_internal(self, usage_data: dict[str, Any]) -> None: ...
def get_token_usage_summary(self) -> UsageMetrics: ...
def _invoke_before_llm_call_hooks(self, messages: list[LLMMessage], from_agent: Agent | None) -> bool: ...
def _invoke_after_llm_call_hooks(self, messages: list[LLMMessage], response: str, from_agent: Agent | None) -> str: ...
# Task:
Write a Python method `_apply_stop_words` for the class `BaseLLM` to apply stop words to truncate response content.
Parameters: content: str
Returns: str | def _apply_stop_words(self, content: str) -> str:
"""Apply stop words to truncate response content.
This method provides consistent stop word behavior across all native SDK providers.
Native providers should call this method to post-process their responses.
Args:
content: The raw response content from the LLM
Returns:
Content truncated at the first occurrence of any stop word
Example:
>>> llm = MyNativeLLM(stop=["Observation:", "Final Answer:"])
>>> response = (
... "I need to search.\\n\\nAction: search\\nObservation: Found results"
... )
>>> llm._apply_stop_words(response)
"I need to search.\\n\\nAction: search"
"""
if not self.stop or not content:
return content
# Find the earliest occurrence of any stop word
earliest_stop_pos = len(content)
found_stop_word = None
for stop_word in self.stop:
stop_pos = content.find(stop_word)
if stop_pos != -1 and stop_pos < earliest_stop_pos:
earliest_stop_pos = stop_pos
found_stop_word = stop_word
# Truncate at the stop word if found
if found_stop_word is not None:
truncated = content[:earliest_stop_pos].strip()
logging.debug(
f"Applied stop word '{found_stop_word}' at position {earliest_stop_pos}"
)
return truncated
return content | function_complex | 0 | {"cognitive_complexity": 7, "loc": 42, "code_loc": 16, "docstring_loc": 19, "function_name": "_apply_stop_words", "class_name": "BaseLLM", "qualname": "BaseLLM._apply_stop_words", "file_path": "lib/crewai/src/crewai/llms/base_llm.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "class_runnable"} |
exo-explore/exo:src/exo/utils/keyed_backoff.py:KeyedBackoff.reset | # Context:
class KeyedBackoff(Generic[K]):
def __init__(self, base: float = 0.5, cap: float = 10.0):
self._base = base
self._cap = cap
self._attempts: dict[K, int] = {}
self._last_time: dict[K, float] = {}
def should_proceed(self, key: K) -> bool: ...
def record_attempt(self, key: K) -> None: ...
# Task:
Write a Python method `reset` for the class `KeyedBackoff` to reset backoff state for a key (e.g., on success).
Parameters: key: K
Returns: None | def reset(self, key: K) -> None:
"""Reset backoff state for a key (e.g., on success)."""
self._attempts.pop(key, None)
self._last_time.pop(key, None) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "reset", "class_name": "KeyedBackoff", "qualname": "KeyedBackoff.reset", "file_path": "src/exo/utils/keyed_backoff.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "file_runnable"} |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/social_media_router.py:read_top_users | # Context:
from fastapi import APIRouter, Query
from typing import List, Optional, Dict, Any
from services.social_media_service import social_media_service
async def read_posts(page: int, per_page: int, platform: Optional[str], user_handle: Optional[str], sentiment: Optional[str], category: Optional[str], date_from: Optional[str], date_to: Optional[str], search: Optional[str]): ...
async def read_post(post_id: str): ...
async def read_platforms(): ...
async def read_sentiments(date_from: Optional[str], date_to: Optional[str]): ...
async def read_categories(date_from: Optional[str], date_to: Optional[str]): ...
async def read_user_sentiment(limit: int, platform: Optional[str], date_from: Optional[str], date_to: Optional[str]): ...
async def read_category_sentiment(date_from: Optional[str], date_to: Optional[str]): ...
async def read_trending_topics(limit: int, date_from: Optional[str], date_to: Optional[str]): ...
async def read_sentiment_over_time(platform: Optional[str], date_from: Optional[str], date_to: Optional[str]): ...
async def read_influential_posts(sentiment: Optional[str], limit: int, date_from: Optional[str], date_to: Optional[str]): ...
async def read_engagement_stats(date_from: Optional[str], date_to: Optional[str]): ...
def _run_browser_setup_background(sites: Optional[List[str]]): ...
async def setup_browser_session(sites: Optional[List[str]]): ...
# Task:
Write a Python async function `read_top_users` to get top users by post count.
Parameters: platform: Optional[str], limit: int, date_from: Optional[str], date_to: Optional[str] | async def read_top_users(
platform: Optional[str] = Query(None, description="Filter by platform"),
limit: int = Query(10, ge=1, le=50, description="Number of top users to return"),
date_from: Optional[str] = Query(None, description="Filter by start date (format: YYYY-MM-DD)"),
date_to: Optional[str] = Query(None, description="Filter by end date (format: YYYY-MM-DD)"),
):
"""Get top users by post count."""
return await social_media_service.get_top_users(platform=platform, limit=limit, date_from=date_from, date_to=date_to) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 8, "code_loc": 1, "docstring_loc": 1, "function_name": "read_top_users", "class_name": null, "qualname": "read_top_users", "file_path": "advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/social_media_router.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/model_retry.py:ModelRetryMiddleware._handle_failure | # Context:
from langchain_core.messages import AIMessage
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
class ModelRetryMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
def __init__(
self,
*,
max_retries: int = 2,
retry_on: RetryOn = (Exception,),
on_failure: OnFailure = "continue",
backoff_factor: float = 2.0,
initial_delay: float = 1.0,
max_delay: float = 60.0,
jitter: bool = True,
) -> None:
"""Initialize `ModelRetryMiddleware`.
Args:
max_retries: Maximum number of retry attempts after the initial call.
Must be `>= 0`.
retry_on: Either a tuple of exception types to retry on, or a callable
that takes an exception and returns `True` if it should be retried.
Default is to retry on all exceptions.
on_failure: Behavior when all retries are exhausted.
Options:
- `'continue'`: Return an `AIMessage` with error details,
allowing the agent to continue with an error response.
- `'error'`: Re-raise the exception, stopping agent execution.
- **Custom callable:** Function that takes the exception and returns a
string for the `AIMessage` content, allowing custom error
formatting.
backoff_factor: Multiplier for exponential backoff.
Each retry waits `initial_delay * (backoff_factor ** retry_number)`
seconds.
Set to `0.0` for constant delay.
initial_delay: Initial delay in seconds before first retry.
max_delay: Maximum delay in seconds between retries.
Caps exponential backoff growth.
jitter: Whether to add random jitter (`±25%`) to delay to avoid thundering herd.
Raises:
ValueError: If `max_retries < 0` or delays are negative.
"""
super().__init__()
# Validate parameters
validate_retry_params(max_retries, initial_delay, max_delay, backoff_factor)
self.max_retries = max_retries
self.tools = [] # No additional tools registered by this middleware
self.retry_on = retry_on
self.on_failure = on_failure
self.backoff_factor = backoff_factor
self.initial_delay = initial_delay
self.max_delay = max_delay
self.jitter = jitter
def _format_failure_message(exc: Exception, attempts_made: int) -> AIMessage: ...
def wrap_model_call(self, request: ModelRequest[ContextT], handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]]) -> ModelResponse[ResponseT] | AIMessage: ...
async def awrap_model_call(self, request: ModelRequest[ContextT], handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]]) -> ModelResponse[ResponseT] | AIMessage: ...
# Task:
Write a Python method `_handle_failure` for the class `ModelRetryMiddleware` to handle failure when all retries are exhausted.
Parameters: exc: Exception, attempts_made: int
Returns: ModelResponse[ResponseT] | def _handle_failure(self, exc: Exception, attempts_made: int) -> ModelResponse[ResponseT]:
"""Handle failure when all retries are exhausted.
Args:
exc: The exception that caused the failure.
attempts_made: Number of attempts actually made.
Returns:
`ModelResponse` with error details.
Raises:
Exception: If `on_failure` is `'error'`, re-raises the exception.
"""
if self.on_failure == "error":
raise exc
if callable(self.on_failure):
content = self.on_failure(exc)
ai_msg = AIMessage(content=content)
else:
ai_msg = self._format_failure_message(exc, attempts_made)
return ModelResponse(result=[ai_msg]) | function_simple | 1 | {"cognitive_complexity": 3, "loc": 23, "code_loc": 8, "docstring_loc": 12, "function_name": "_handle_failure", "class_name": "ModelRetryMiddleware", "qualname": "ModelRetryMiddleware._handle_failure", "file_path": "libs/langchain_v1/langchain/agents/middleware/model_retry.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_async_event_bus.py:test_aemit_with_async_handlers | # Context:
import asyncio
import pytest
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import crewai_event_bus
class AsyncTestEvent(BaseEvent): ...
async def test_async_handler_execution(): ...
async def test_multiple_async_handlers(): ...
async def test_mixed_sync_and_async_handlers(): ...
async def test_async_handler_error_handling(): ...
async def test_aemit_with_no_handlers(): ...
async def test_async_handler_registration_via_register_handler(): ...
async def test_emit_async_handlers_fire_and_forget(): ...
async def test_scoped_handlers_with_async(): ...
# Task:
Write a Python test function `test_aemit_with_async_handlers` to verify the behavior of `aemit_with_async_handlers`.
Module under test: crewai.events.base_events, crewai.events.event_bus | async def test_aemit_with_async_handlers():
received_events = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(AsyncTestEvent)
async def async_handler(source: object, event: BaseEvent) -> None:
await asyncio.sleep(0.01)
received_events.append(event)
event = AsyncTestEvent(type="async_test")
await crewai_event_bus.aemit("test_source", event)
assert len(received_events) == 1
assert received_events[0] == event | test | 0 | {"function_name": "test_aemit_with_async_handlers", "class_name": null, "qualname": "test_aemit_with_async_handlers", "file_path": "lib/crewai/tests/utilities/events/test_async_event_bus.py", "repo_id": "crewAIInc/crewAI", "loc": 15, "tested_modules": ["crewai.events.base_events", "crewai.events.event_bus"], "has_docstring": false, "runnable_level": "project_runnable"} |
Zie619/n8n-workflows:src/analytics_engine.py:WorkflowAnalytics.get_workflow_analytics | # Context:
from typing import List, Dict, Any
import json
from datetime import datetime
from collections import Counter, defaultdict
class AnalyticsResponse(BaseModel): ...
async def get_analytics_overview(): ...
async def get_trend_analysis(days: int): ...
async def get_usage_insights(): ...
async def get_analytics_dashboard(): ...
class WorkflowAnalytics:
def __init__(self, db_path: str = "workflows.db"):
self.db_path = db_path
def get_db_connection(self): ...
def analyze_workflow_patterns(self, conn) -> Dict[str, Any]: ...
def categorize_service(self, service: str) -> str: ...
def generate_recommendations(self, total: int, active: int, triggers: Dict, complexity: Dict, integrations: Dict) -> List[str]: ...
def get_trend_analysis(self, days: int) -> Dict[str, Any]: ...
def get_usage_insights(self) -> Dict[str, Any]: ...
# Task:
Write a Python method `get_workflow_analytics` for the class `WorkflowAnalytics` to get comprehensive workflow analytics.
Returns: Dict[str, Any] | def get_workflow_analytics(self) -> Dict[str, Any]:
"""Get comprehensive workflow analytics."""
conn = self.get_db_connection()
# Basic statistics
cursor = conn.execute("SELECT COUNT(*) as total FROM workflows")
total_workflows = cursor.fetchone()["total"]
cursor = conn.execute(
"SELECT COUNT(*) as active FROM workflows WHERE active = 1"
)
active_workflows = cursor.fetchone()["active"]
# Trigger type distribution
cursor = conn.execute("""
SELECT trigger_type, COUNT(*) as count
FROM workflows
GROUP BY trigger_type
ORDER BY count DESC
""")
trigger_distribution = {
row["trigger_type"]: row["count"] for row in cursor.fetchall()
}
# Complexity distribution
cursor = conn.execute("""
SELECT complexity, COUNT(*) as count
FROM workflows
GROUP BY complexity
ORDER BY count DESC
""")
complexity_distribution = {
row["complexity"]: row["count"] for row in cursor.fetchall()
}
# Node count statistics
cursor = conn.execute("""
SELECT
AVG(node_count) as avg_nodes,
MIN(node_count) as min_nodes,
MAX(node_count) as max_nodes,
COUNT(*) as total
FROM workflows
""")
node_stats = dict(cursor.fetchone())
# Integration analysis
cursor = conn.execute(
"SELECT integrations FROM workflows WHERE integrations IS NOT NULL"
)
all_integrations = []
for row in cursor.fetchall():
integrations = json.loads(row["integrations"] or "[]")
all_integrations.extend(integrations)
integration_counts = Counter(all_integrations)
top_integrations = dict(integration_counts.most_common(10))
# Workflow patterns
patterns = self.analyze_workflow_patterns(conn)
# Recommendations
recommendations = self.generate_recommendations(
total_workflows,
active_workflows,
trigger_distribution,
complexity_distribution,
top_integrations,
)
conn.close()
return {
"overview": {
"total_workflows": total_workflows,
"active_workflows": active_workflows,
"activation_rate": round((active_workflows / total_workflows) * 100, 2)
if total_workflows > 0
else 0,
"unique_integrations": len(integration_counts),
"avg_nodes_per_workflow": round(node_stats["avg_nodes"], 2),
"most_complex_workflow": node_stats["max_nodes"],
},
"distributions": {
"trigger_types": trigger_distribution,
"complexity_levels": complexity_distribution,
"top_integrations": top_integrations,
},
"patterns": patterns,
"recommendations": recommendations,
"generated_at": datetime.now().isoformat(),
} | function_simple | 0 | {"cognitive_complexity": 3, "loc": 92, "code_loc": 72, "docstring_loc": 1, "function_name": "get_workflow_analytics", "class_name": "WorkflowAnalytics", "qualname": "WorkflowAnalytics.get_workflow_analytics", "file_path": "src/analytics_engine.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "class_runnable"} |
browser-use/browser-use:examples/use-cases/onepassword.py:fill_field | # Context:
from browser_use import ActionResult, Agent, Browser, ChatOpenAI, Tools
from browser_use.browser.session import BrowserSession
async def main(): ...
# Task:
Write a Python async function `fill_field` to fills in a specific field for a website using the value from 1Password.
Parameters: vault_name: str, item_name: str, field_name: str, browser_session: BrowserSession | async def fill_field(vault_name: str, item_name: str, field_name: str, browser_session: BrowserSession):
"""
Fills in a specific field for a website using the value from 1Password.
Note: Use blur_page before calling this if you want visual security.
"""
try:
# Resolve field value from 1Password
field_value = await op_client.secrets.resolve(f'op://{vault_name}/{item_name}/{field_name}')
# Get current page
page = await browser_session.must_get_current_page()
# Find and fill the element
target_field = await page.must_get_element_by_prompt(f'{field_name} input field', llm)
await target_field.fill(field_value)
return ActionResult(
extracted_content=f'Successfully filled {field_name} field for {vault_name}/{item_name}', include_in_memory=True
)
except Exception as e:
return ActionResult(error=f'Failed to fill {field_name} field: {str(e)}', include_in_memory=True) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 21, "code_loc": 10, "docstring_loc": 4, "function_name": "fill_field", "class_name": null, "qualname": "fill_field", "file_path": "examples/use-cases/onepassword.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/tracing/otel.py:instrument_otel | # Context:
import functools
import inspect
from vllm.tracing.utils import TRACE_HEADERS, LoadingSpanAttributes
from opentelemetry import trace
def is_otel_available() -> bool: ...
def init_otel_tracer(instrumenting_module_name: str, otlp_traces_endpoint: str, extra_attributes: dict[str, str] | None) -> Tracer: ...
def get_span_exporter(endpoint): ...
def init_otel_worker_tracer(instrumenting_module_name: str, process_kind: str, process_name: str) -> Tracer: ...
def extract_trace_context(headers: Mapping[str, str] | None) -> Context | None: ...
def manual_instrument_otel(span_name: str, start_time: int, end_time: int | None, attributes: dict[str, Any] | None, context: Context | None, kind: Any): ...
def _get_smart_context() -> Context | None: ...
def propagate_trace_to_env(): ...
# Task:
Write a Python function `instrument_otel` to internal wrapper logic for sync and async functions.
Parameters: func, span_name, attributes, record_exception | def instrument_otel(func, span_name, attributes, record_exception):
"""Internal wrapper logic for sync and async functions."""
# Pre-calculate static code attributes once (these don't change)
code_attrs = {
LoadingSpanAttributes.CODE_FUNCTION: func.__qualname__,
LoadingSpanAttributes.CODE_NAMESPACE: func.__module__,
LoadingSpanAttributes.CODE_FILEPATH: func.__code__.co_filename,
LoadingSpanAttributes.CODE_LINENO: str(func.__code__.co_firstlineno),
}
if attributes:
code_attrs.update(attributes)
final_span_name = span_name or func.__qualname__
module_name = func.__module__
@functools.wraps(func)
async def async_wrapper(*args, **kwargs):
tracer = trace.get_tracer(module_name)
ctx = _get_smart_context()
with (
tracer.start_as_current_span(
final_span_name,
context=ctx,
attributes=code_attrs,
record_exception=record_exception,
),
propagate_trace_to_env(),
):
return await func(*args, **kwargs)
@functools.wraps(func)
def sync_wrapper(*args, **kwargs):
tracer = trace.get_tracer(module_name)
ctx = _get_smart_context()
with (
tracer.start_as_current_span(
final_span_name,
context=ctx,
attributes=code_attrs,
record_exception=record_exception,
),
propagate_trace_to_env(),
):
return func(*args, **kwargs)
return async_wrapper if inspect.iscoroutinefunction(func) else sync_wrapper | function_simple | 1 | {"cognitive_complexity": 3, "loc": 47, "code_loc": 39, "docstring_loc": 1, "function_name": "instrument_otel", "class_name": null, "qualname": "instrument_otel", "file_path": "vllm/tracing/otel.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/tool_selection.py:LLMToolSelectorMiddleware:class_doc | Write a class-level docstring for `LLMToolSelectorMiddleware` (inherits from AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]) which has methods: `__init__`, `_prepare_selection_request`, `_process_selection_response`, `wrap_model_call`, `awrap_model_call`. | Uses an LLM to select relevant tools before calling the main model.
When an agent has many tools available, this middleware filters them down
to only the most relevant ones for the user's query. This reduces token usage
and helps the main model focus on the right tools.
Examples:
!!! example "Limit to 3 tools"
```python
from langchain.agents.middleware import LLMToolSelectorMiddleware
middleware = LLMToolSelectorMiddleware(max_tools=3)
agent = create_agent(
model="openai:gpt-4o",
tools=[tool1, tool2, tool3, tool4, tool5],
middleware=[middleware],
)
```
!!! example "Use a smaller model for selection"
```python
middleware = LLMToolSelectorMiddleware(model="openai:gpt-4o-mini", max_tools=2)
``` | documentation | 1 | {"doc_type": "class", "class_name": "LLMToolSelectorMiddleware", "file_path": "libs/langchain_v1/langchain/agents/middleware/tool_selection.py", "repo_id": "langchain-ai/langchain", "char_length": 823, "methods": ["__init__", "_prepare_selection_request", "_process_selection_response", "wrap_model_call", "awrap_model_call"]} |
ray-project/ray:python/ray/data/tests/unit/expressions/test_predicate.py:TestIsIn.test_is_in_structural_equality | # Context:
from ray.data.expressions import BinaryExpr, Operation, UnaryExpr, col, lit
class TestIsNull: ...
class TestIsNotNull: ...
class TestNullPredicateCombinations: ...
class TestNotIn: ...
class TestMembershipWithNulls: ...
class TestMembershipCombinations: ...
class TestIsIn:
def sample_data(self): ...
def test_is_in_string_list(self, sample_data): ...
def test_is_in_single_value_list(self, sample_data): ...
def test_is_in_numeric_list(self, sample_data): ...
def test_is_in_empty_list(self, sample_data): ...
def test_is_in_with_literal_expr(self, sample_data): ...
# Task:
Write a Python test method `test_is_in_structural_equality` in test class `TestIsIn` to test structural equality for is_in expressions.
Module under test: ray.data._internal.planner.plan_expression.expression_evaluator, ray.data.expressions | def test_is_in_structural_equality(self):
"""Test structural equality for is_in expressions."""
expr1 = col("status").is_in(["active", "pending"])
expr2 = col("status").is_in(["active", "pending"])
expr3 = col("status").is_in(["active"])
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3) | test | 0 | {"function_name": "test_is_in_structural_equality", "class_name": "TestIsIn", "qualname": "TestIsIn.test_is_in_structural_equality", "file_path": "python/ray/data/tests/unit/expressions/test_predicate.py", "repo_id": "ray-project/ray", "loc": 8, "tested_modules": ["ray.data._internal.planner.plan_expression.expression_evaluator", "ray.data.expressions"], "has_docstring": true, "runnable_level": "plib_runnable"} |
huggingface/transformers:tests/models/ministral/test_modeling_ministral.py:MinistralIntegrationTest.test_model_8b_logits | # Context:
import gc
from transformers.testing_utils import (
backend_empty_cache,
cleanup,
require_bitsandbytes,
require_flash_attn,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
import torch
from transformers import (
AutoModelForCausalLM,
MinistralForCausalLM,
MinistralModel,
)
class MinistralModelTester(CausalLMModelTester): ...
class MinistralModelTest(CausalLMModelTest, unittest.TestCase): ...
class MinistralIntegrationTest(unittest.TestCase):
def tearDown(self): ...
def test_model_8b_generation(self): ...
def test_model_8b_long_prompt(self): ...
def test_export_text_with_hybrid_cache(self): ...
def test_past_sliding_window_generation(self): ...
# Task:
Write a Python test method `test_model_8b_logits` in test class `MinistralIntegrationTest` to verify the behavior of `model_8b_logits`.
Module under test: transformers, transformers.testing_utils, causal_lm_tester | def test_model_8b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = AutoModelForCausalLM.from_pretrained("mistralai/Ministral-8B-Instruct-2410", device_map="auto")
assert isinstance(model, MinistralForCausalLM)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-1.5029, -7.2815, 4.5190, 0.5930, -5.2526, 3.0765, -0.6314, 1.8068]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-3.9446, -3.9466, 0.6383, -3.9466, -3.9468, -3.9448, -3.9462, -3.9455,
-3.9451, -0.8244, -3.9472, -3.9458, -3.9460, -3.9406, -3.9462, -3.9462,
-3.9458, -3.9462, -3.9463, -3.9461, -3.9448, -3.9451, -3.9462, -3.9458,
-3.9455, -3.9452, -3.9458, -3.9469, -3.9460, -3.9464]) # fmt: skip
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
del model
backend_empty_cache(torch_device)
gc.collect() | test | 0 | {"function_name": "test_model_8b_logits", "class_name": "MinistralIntegrationTest", "qualname": "MinistralIntegrationTest.test_model_8b_logits", "file_path": "tests/models/ministral/test_modeling_ministral.py", "repo_id": "huggingface/transformers", "loc": 20, "tested_modules": ["transformers", "transformers.testing_utils", "causal_lm_tester", "transformers", "transformers.testing_utils"], "has_docstring": false, "runnable_level": "plib_runnable"} |
666ghj/BettaFish:MindSpider/DeepSentimentCrawling/keyword_manager.py:KeywordManager.__init__ | # Context:
from sqlalchemy.engine import Engine
class KeywordManager:
def connect(self): ...
def get_latest_keywords(self, target_date: date, max_keywords: int) -> List[str]: ...
def get_daily_topics(self, extract_date: date) -> Optional[Dict]: ...
def get_recent_topics(self, days: int) -> List[Dict]: ...
def _get_default_keywords(self) -> List[str]: ...
def get_all_keywords_for_platforms(self, platforms: List[str], target_date: date, max_keywords: int) -> List[str]: ...
def get_keywords_for_platform(self, platform: str, target_date: date, max_keywords: int) -> List[str]: ...
def _filter_keywords_by_platform(self, keywords: List[str], platform: str) -> List[str]: ...
def get_crawling_summary(self, target_date: date) -> Dict: ...
def close(self): ...
def __enter__(self): ...
def __exit__(self, exc_type, exc_val, exc_tb): ...
# Task:
Write a Python method `__init__` for the class `KeywordManager` to 初始化关键词管理器. | def __init__(self):
"""初始化关键词管理器"""
self.engine: Engine = None
self.connect() | function_simple | 1 | {"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "__init__", "class_name": "KeywordManager", "qualname": "KeywordManager.__init__", "file_path": "MindSpider/DeepSentimentCrawling/keyword_manager.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "class_runnable"} |
crewAIInc/crewAI:lib/devtools/src/crewai_devtools/cli.py:translate_release_notes | # Context:
from openai import OpenAI
from crewai_devtools.prompts import RELEASE_NOTES_PROMPT, TRANSLATE_RELEASE_NOTES_PROMPT
def run_command(cmd: list[str], cwd: Path | None) -> str: ...
def check_gh_installed() -> None: ...
def check_git_clean() -> None: ...
def update_version_in_file(file_path: Path, new_version: str) -> bool: ...
def update_pyproject_dependencies(file_path: Path, new_version: str) -> bool: ...
def add_docs_version(docs_json_path: Path, version: str) -> bool: ...
def _format_changelog_date(lang: str) -> str: ...
def update_changelog(changelog_path: Path, version: str, release_notes: str, lang: str) -> bool: ...
def update_template_dependencies(templates_dir: Path, new_version: str) -> list[Path]: ...
def find_version_files(base_path: Path) -> list[Path]: ...
def get_packages(lib_dir: Path) -> list[Path]: ...
def get_commits_from_last_tag(tag_name: str, version: str) -> tuple[str, str]: ...
def get_github_contributors(commit_range: str) -> list[str]: ...
def cli() -> None: ...
def bump(version: str, dry_run: bool, no_push: bool, no_commit: bool) -> None: ...
def tag(dry_run: bool, no_edit: bool) -> None: ...
def main() -> None: ...
# Task:
Write a Python function `translate_release_notes` to translate release notes into the target language using OpenAI.
Parameters: release_notes: str, lang: str, client: OpenAI
Returns: str | def translate_release_notes(
release_notes: str,
lang: str,
client: OpenAI,
) -> str:
"""Translate release notes into the target language using OpenAI.
Args:
release_notes: English release notes markdown.
lang: Language code (e.g., "pt-BR", "ko").
client: OpenAI client instance.
Returns:
Translated release notes, or original on failure.
"""
locale_cfg = _CHANGELOG_LOCALES.get(lang)
if not locale_cfg:
return release_notes
language_name = locale_cfg["language_name"]
prompt = TRANSLATE_RELEASE_NOTES_PROMPT.substitute(
language=language_name,
release_notes=release_notes,
)
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": f"You are a professional translator. Translate technical documentation into {language_name}.",
},
{"role": "user", "content": prompt},
],
temperature=0.3,
)
return response.choices[0].message.content or release_notes
except Exception as e:
console.print(
f"[yellow]Warning:[/yellow] Could not translate to {language_name}: {e}"
)
return release_notes | function_simple | 0 | {"cognitive_complexity": 3, "loc": 43, "code_loc": 26, "docstring_loc": 10, "function_name": "translate_release_notes", "class_name": null, "qualname": "translate_release_notes", "file_path": "lib/devtools/src/crewai_devtools/cli.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/test_repartition_e2e.py:test_key_based_repartition_shuffle | # Context:
import numpy as np
import ray
from ray.data.context import DataContext, ShuffleStrategy
def test_repartition_shuffle(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension): ...
def test_repartition_noshuffle(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension): ...
def test_repartition_shuffle_arrow(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension): ...
def test_repartition_target_num_rows_per_block(ray_start_regular_shared_2_cpus, total_rows, target_num_rows_per_block, expected_num_blocks, disable_fallback_to_object_extension): ...
def test_repartition_invalid_inputs(ray_start_regular_shared_2_cpus, num_blocks, target_num_rows_per_block, shuffle, expected_exception_msg, disable_fallback_to_object_extension): ...
def test_repartition_empty_datasets(ray_start_regular_shared_2_cpus, shuffle): ...
def test_streaming_repartition_write_with_operator_fusion(ray_start_regular_shared_2_cpus, tmp_path, disable_fallback_to_object_extension, streaming_repartition_first, n_target_num_rows): ...
def test_streaming_repartition_fusion_output_shape(ray_start_regular_shared_2_cpus, tmp_path, disable_fallback_to_object_extension): ...
def test_repartition_guarantee_row_num_to_be_exact(ray_start_regular_shared_2_cpus, num_rows, override_num_blocks_list, target_num_rows_per_block, disable_fallback_to_object_extension): ...
def test_streaming_repartition_with_partial_last_block(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension): ...
def test_streaming_repartition_non_strict_mode(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension): ...
def test_streaming_repartition_fusion_non_strict(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension, batch_size): ...
def test_streaming_repartition_empty_dataset(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension): ...
# Task:
Write a Python test function `test_key_based_repartition_shuffle` to verify the behavior of `key_based_repartition_shuffle`.
Module under test: ray.data._internal.logical.optimizers, ray.data._internal.planner, ray.data.block | def test_key_based_repartition_shuffle(
ray_start_regular_shared_2_cpus,
restore_data_context,
disable_fallback_to_object_extension,
):
context = DataContext.get_current()
context.shuffle_strategy = ShuffleStrategy.HASH_SHUFFLE
context.hash_shuffle_operator_actor_num_cpus_override = 0.001
ds = ray.data.range(20, override_num_blocks=10)
assert ds._plan.initial_num_blocks() == 10
assert ds.sum() == 190
assert ds._block_num_rows() == [2] * 10
ds2 = ds.repartition(3, keys=["id"])
assert ds2._plan.initial_num_blocks() == 3
assert ds2.sum() == 190
ds3 = ds.repartition(5, keys=["id"])
assert ds3._plan.initial_num_blocks() == 5
assert ds3.sum() == 190
large = ray.data.range(10000, override_num_blocks=100)
large = large.repartition(20, keys=["id"])
assert large._plan.initial_num_blocks() == 20
# Assert block sizes distribution
assert sum(large._block_num_rows()) == 10000
assert 495 < np.mean(large._block_num_rows()) < 505
assert large.sum() == 49995000 | test | 0 | {"function_name": "test_key_based_repartition_shuffle", "class_name": null, "qualname": "test_key_based_repartition_shuffle", "file_path": "python/ray/data/tests/test_repartition_e2e.py", "repo_id": "ray-project/ray", "loc": 32, "tested_modules": ["ray.data._internal.logical.optimizers", "ray.data._internal.planner", "ray.data.block", "ray.data.context", "ray.data.tests.conftest"], "has_docstring": false, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/train/v2/_internal/execution/controller/placement_group_cleaner.py:PlacementGroupCleaner.start_monitoring | # Context:
import threading
class PlacementGroupCleaner:
def __init__(self, check_interval_s: float = 1.0):
self._check_interval_s = check_interval_s
self._pg_queue: queue.Queue = queue.Queue()
self._stop_event = threading.Event()
self._controller_actor_id: Optional[str] = None
self._monitor_thread: Optional[threading.Thread] = None
self._get_actor_timeout_s = GET_ACTOR_TIMEOUT_S
self._exiting: bool = False
def register_controller_and_placement_group(self, controller_actor_id: str, placement_group: PlacementGroup): ...
def _monitor_loop(self): ...
def _cleanup_placement_group(self, placement_group: PlacementGroup): ...
def _stop_monitor_thread(self): ...
def stop(self): ...
def _is_placement_group_removed(self, placement_group: PlacementGroup) -> bool: ...
def _exit(self): ...
# Task:
Write a Python method `start_monitoring` for the class `PlacementGroupCleaner` to start monitoring the controller and placement group. | def start_monitoring(self):
"""Start monitoring the controller and placement group."""
if self._monitor_thread is not None and self._monitor_thread.is_alive():
# Thread already running, just return True
logger.debug("Monitor thread already running")
return True
self._monitor_thread = threading.Thread(
target=self._monitor_loop,
name="PlacementGroupCleanerMonitor",
daemon=True,
)
self._monitor_thread.start()
logger.debug("PlacementGroupCleaner started monitoring in background thread")
return True | function_simple | 0 | {"cognitive_complexity": 2, "loc": 15, "code_loc": 11, "docstring_loc": 1, "function_name": "start_monitoring", "class_name": "PlacementGroupCleaner", "qualname": "PlacementGroupCleaner.start_monitoring", "file_path": "python/ray/train/v2/_internal/execution/controller/placement_group_cleaner.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
ansible/ansible:test/units/_internal/_yaml/test_dumper.py:test_yaml_dump_iterables | # Context:
import pytest
from ansible.template import Templar, trust_as_template, is_trusted_as_template
def test_yaml_dump(filter_name: str, _vault_secrets_context: VaultTestHelper) -> None: ...
def test_yaml_dump_undefined() -> None: ...
# Task:
Write a Python test function `test_yaml_dump_iterables` to verify the behavior of `yaml_dump_iterables`.
Module under test: __future__, ansible.errors, ansible.parsing.utils.yaml | def test_yaml_dump_iterables(value: object, expected: object) -> None:
result = Templar(variables=dict(value=value)).template(trust_as_template("{{ value | to_yaml }}"))
assert result == expected | test | 1 | {"function_name": "test_yaml_dump_iterables", "class_name": null, "qualname": "test_yaml_dump_iterables", "file_path": "test/units/_internal/_yaml/test_dumper.py", "repo_id": "ansible/ansible", "loc": 4, "tested_modules": ["__future__", "ansible.errors", "ansible.parsing.utils.yaml", "ansible.parsing.vault", "ansible.template"], "has_docstring": false, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/v1/e2e/test_pooling_chunked_prefill.py:test_pooling_prefix_cache | # Context:
import pytest
from vllm.platforms import current_platform
class WrapperPooler(nn.Module): ...
def inject_pooler(self): ...
def retrieve_chunks(self): ...
def test_pooling_chunked_prefill(vllm_runner, monkeypatch): ...
# Task:
Write a Python test function `test_pooling_prefix_cache` to test chunked prefill for pooling models with LastPool.
Module under test: vllm.platforms | def test_pooling_prefix_cache(vllm_runner, monkeypatch):
"""Test chunked prefill for pooling models with LastPool."""
verses = prompt.split("\n\n")
with monkeypatch.context() as m:
m.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
model_id = "Qwen/Qwen3-Embedding-0.6B"
with vllm_runner(
model_id,
runner="pooling",
enable_prefix_caching=True,
tensor_parallel_size=1,
enforce_eager=True,
) as llm:
llm.get_llm().llm_engine.collective_rpc(inject_pooler)
tokenizer = llm.get_llm().get_tokenizer()
prompt1 = "\n\n".join([verses[0], verses[1]])
prompt2 = "\n\n".join([verses[0], verses[2]])
tokens1 = tokenizer(prompt1)["input_ids"]
tokens2 = tokenizer(prompt2)["input_ids"]
prompt1_len = len(tokens1)
prompt2_len = len(tokens2)
llm.embed([prompt1])
chunks = llm.get_llm().llm_engine.collective_rpc(retrieve_chunks)[0]
assert len(chunks) == 1
assert chunks[0] == prompt1_len
llm.embed([prompt2])
chunks = llm.get_llm().llm_engine.collective_rpc(retrieve_chunks)[0]
assert len(chunks) == 1
assert chunks[0] <= prompt1_len
assert chunks[0] < prompt2_len
vllm_config = llm.get_llm().llm_engine.vllm_config
cache_config = vllm_config.cache_config
print(f"{cache_config=}")
# Prefixes are cached in blocks
assert (prompt2_len - chunks[0]) % cache_config.block_size == 0 | test | 1 | {"function_name": "test_pooling_prefix_cache", "class_name": null, "qualname": "test_pooling_prefix_cache", "file_path": "tests/v1/e2e/test_pooling_chunked_prefill.py", "repo_id": "vllm-project/vllm", "loc": 44, "tested_modules": ["vllm.platforms"], "has_docstring": true, "runnable_level": "project_runnable"} |
exo-explore/exo:src/exo/master/api.py:API.bench_image_edits | # Context:
import contextlib
from typing import Annotated, Literal, cast
from fastapi import FastAPI, File, Form, HTTPException, Query, Request, UploadFile
from exo.shared.models.model_cards import (
ModelCard,
ModelId,
delete_custom_card,
get_model_cards,
is_custom_card,
)
from exo.shared.types.api import (
AddCustomModelParams,
AdvancedImageParams,
BenchChatCompletionRequest,
BenchChatCompletionResponse,
BenchImageGenerationResponse,
BenchImageGenerationTaskParams,
ChatCompletionChoice,
ChatCompletionMessage,
ChatCompletionRequest,
ChatCompletionResponse,
CreateInstanceParams,
CreateInstanceResponse,
DeleteDownloadResponse,
DeleteInstanceResponse,
DeleteTracesRequest,
DeleteTracesResponse,
ErrorInfo,
ErrorResponse,
FinishReason,
GenerationStats,
HuggingFaceSearchResult,
ImageData,
ImageEditsTaskParams,
ImageGenerationResponse,
ImageGenerationStats,
ImageGenerationTaskParams,
ImageListItem,
ImageListResponse,
ImageSize,
ModelList,
ModelListModel,
PlaceInstanceParams,
PlacementPreview,
PlacementPreviewResponse,
StartDownloadParams,
StartDownloadResponse,
ToolCall,
TraceCategoryStats,
TraceEventResponse,
TraceListItem,
TraceListResponse,
TraceRankStats,
TraceResponse,
TraceStatsResponse,
normalize_image_size,
)
def _format_to_content_type(image_format: Literal['png', 'jpeg', 'webp'] | None) -> str: ...
def _ensure_seed(params: AdvancedImageParams | None) -> AdvancedImageParams: ...
class API:
def __init__(
self,
node_id: NodeId,
*,
port: int,
event_receiver: Receiver[IndexedEvent],
command_sender: Sender[ForwarderCommand],
download_command_sender: Sender[ForwarderDownloadCommand],
# This lets us pause the API if an election is running
election_receiver: Receiver[ElectionMessage],
) -> None:
self.state = State()
self._event_log = DiskEventLog(_API_EVENT_LOG_DIR)
self._system_id = SystemId()
self.command_sender = command_sender
self.download_command_sender = download_command_sender
self.event_receiver = event_receiver
self.election_receiver = election_receiver
self.node_id: NodeId = node_id
self.last_completed_election: int = 0
self.port = port
self.paused: bool = False
self.paused_ev: anyio.Event = anyio.Event()
self.app = FastAPI()
@self.app.middleware("http")
async def _log_requests( # pyright: ignore[reportUnusedFunction]
request: Request,
call_next: Callable[[Request], Awaitable[StreamingResponse]],
) -> StreamingResponse:
logger.debug(f"API request: {request.method} {request.url.path}")
return await call_next(request)
self._setup_exception_handlers()
self._setup_cors()
self._setup_routes()
self.app.mount(
"/",
StaticFiles(
directory=DASHBOARD_DIR,
html=True,
),
name="dashboard",
)
self._text_generation_queues: dict[
CommandId,
Sender[TokenChunk | ErrorChunk | ToolCallChunk | PrefillProgressChunk],
] = {}
self._image_generation_queues: dict[
CommandId, Sender[ImageChunk | ErrorChunk]
] = {}
self._image_store = ImageStore(EXO_IMAGE_CACHE_DIR)
self._tg: TaskGroup = TaskGroup()
def reset(self, result_clock: int, event_receiver: Receiver[IndexedEvent]): ...
def unpause(self, result_clock: int): ...
def _setup_exception_handlers(self) -> None: ...
async def http_exception_handler(self, _: Request, exc: HTTPException) -> JSONResponse: ...
def _setup_cors(self) -> None: ...
def _setup_routes(self) -> None: ...
async def place_instance(self, payload: PlaceInstanceParams): ...
async def create_instance(self, payload: CreateInstanceParams) -> CreateInstanceResponse: ...
async def get_placement(self, model_id: ModelId, sharding: Sharding, instance_meta: InstanceMeta, min_nodes: int) -> Instance: ...
async def get_placement_previews(self, model_id: ModelId, node_ids: Annotated[list[NodeId] | None, Query()]) -> PlacementPreviewResponse: ...
def get_instance(self, instance_id: InstanceId) -> Instance: ...
async def delete_instance(self, instance_id: InstanceId) -> DeleteInstanceResponse: ...
async def _token_chunk_stream(self, command_id: CommandId) -> AsyncGenerator[TokenChunk | ErrorChunk | ToolCallChunk | PrefillProgressChunk, None]: ...
async def _collect_text_generation_with_stats(self, command_id: CommandId) -> BenchChatCompletionResponse: ...
async def _trigger_notify_user_to_download_model(self, model_id: ModelId) -> None: ...
async def chat_completions(self, payload: ChatCompletionRequest) -> ChatCompletionResponse | StreamingResponse: ...
async def bench_chat_completions(self, payload: BenchChatCompletionRequest) -> BenchChatCompletionResponse: ...
async def _resolve_and_validate_text_model(self, model_id: ModelId) -> ModelId: ...
async def _validate_image_model(self, model: ModelId) -> ModelId: ...
def stream_events(self) -> StreamingResponse: ...
async def get_image(self, image_id: str) -> FileResponse: ...
async def list_images(self, request: Request) -> ImageListResponse: ...
def _build_image_url(self, request: Request, image_id: Id) -> str: ...
async def image_generations(self, request: Request, payload: ImageGenerationTaskParams) -> ImageGenerationResponse | StreamingResponse: ...
async def _generate_image_stream(self, request: Request, command_id: CommandId, num_images: int, response_format: str) -> AsyncGenerator[str, None]: ...
async def _collect_image_chunks(self, request: Request | None, command_id: CommandId, num_images: int, response_format: str, capture_stats: bool) -> tuple[list[ImageData], ImageGenerationStats | None]: ...
async def _collect_image_generation(self, request: Request, command_id: CommandId, num_images: int, response_format: str) -> ImageGenerationResponse: ...
async def _collect_image_generation_with_stats(self, request: Request | None, command_id: CommandId, num_images: int, response_format: str) -> BenchImageGenerationResponse: ...
async def bench_image_generations(self, request: Request, payload: BenchImageGenerationTaskParams) -> BenchImageGenerationResponse: ...
async def _send_image_edits_command(self, image: UploadFile, prompt: str, model: ModelId, n: int, size: ImageSize, response_format: Literal['url', 'b64_json'], input_fidelity: Literal['low', 'high'], stream: bool, partial_images: int, bench: bool, quality: Literal['high', 'medium', 'low'], output_format: Literal['png', 'jpeg', 'webp'], advanced_params: AdvancedImageParams | None) -> ImageEdits: ...
async def image_edits(self, request: Request, image: UploadFile, prompt: str, model: str, n: int, size: str | None, response_format: Literal['url', 'b64_json'], input_fidelity: Literal['low', 'high'], stream: str, partial_images: str, quality: Literal['high', 'medium', 'low'], output_format: Literal['png', 'jpeg', 'webp'], advanced_params: str | None) -> ImageGenerationResponse | StreamingResponse: ...
async def claude_messages(self, payload: ClaudeMessagesRequest) -> ClaudeMessagesResponse | StreamingResponse: ...
async def openai_responses(self, payload: ResponsesRequest) -> ResponsesResponse | StreamingResponse: ...
async def _ollama_root(self) -> JSONResponse: ...
async def ollama_chat(self, request: Request) -> OllamaChatResponse | StreamingResponse: ...
async def ollama_generate(self, request: Request) -> OllamaGenerateResponse | StreamingResponse: ...
async def ollama_tags(self) -> OllamaTagsResponse: ...
async def ollama_show(self, request: Request) -> OllamaShowResponse: ...
async def ollama_ps(self) -> OllamaPsResponse: ...
async def ollama_version(self) -> dict[str, str]: ...
def _calculate_total_available_memory(self) -> Memory: ...
async def get_models(self, status: str | None) -> ModelList: ...
async def add_custom_model(self, payload: AddCustomModelParams) -> ModelListModel: ...
async def delete_custom_model(self, model_id: ModelId) -> JSONResponse: ...
async def search_models(self, query: str, limit: int) -> list[HuggingFaceSearchResult]: ...
async def run(self): ...
async def run_api(self, ev: anyio.Event): ...
async def _apply_state(self): ...
def _save_merged_trace(self, event: TracesMerged) -> None: ...
async def _pause_on_new_election(self): ...
async def _cleanup_expired_images(self): ...
async def _send(self, command: Command): ...
async def _send_download(self, command: DownloadCommand): ...
async def start_download(self, payload: StartDownloadParams) -> StartDownloadResponse: ...
async def delete_download(self, node_id: NodeId, model_id: ModelId) -> DeleteDownloadResponse: ...
def _get_trace_path(task_id: str) -> Path: ...
async def list_traces(self) -> TraceListResponse: ...
async def get_trace(self, task_id: str) -> TraceResponse: ...
async def get_trace_stats(self, task_id: str) -> TraceStatsResponse: ...
async def get_trace_raw(self, task_id: str) -> FileResponse: ...
async def delete_traces(self, request: DeleteTracesRequest) -> DeleteTracesResponse: ...
async def get_onboarding(self) -> JSONResponse: ...
async def complete_onboarding(self) -> JSONResponse: ...
# Task:
Write a Python async method `bench_image_edits` for the class `API` to handle benchmark image editing requests with generation stats.
Parameters: request: Request, image: UploadFile, prompt: str, model: str, n: int, size: str | None, response_format: Literal['url', 'b64_json'], input_fidelity: Literal['low', 'high'], quality: Literal['high', 'medium', 'low'], output_format: Literal['png', 'jpeg', 'webp'], advanced_params: str | None
Returns: BenchImageGenerationResponse | async def bench_image_edits(
self,
request: Request,
image: UploadFile = File(...), # noqa: B008
prompt: str = Form(...),
model: str = Form(...),
n: int = Form(1),
size: str | None = Form(None),
response_format: Literal["url", "b64_json"] = Form("b64_json"),
input_fidelity: Literal["low", "high"] = Form("low"),
quality: Literal["high", "medium", "low"] = Form("medium"),
output_format: Literal["png", "jpeg", "webp"] = Form("png"),
advanced_params: str | None = Form(None),
) -> BenchImageGenerationResponse:
"""Handle benchmark image editing requests with generation stats."""
parsed_advanced_params: AdvancedImageParams | None = None
if advanced_params:
with contextlib.suppress(Exception):
parsed_advanced_params = AdvancedImageParams.model_validate_json(
advanced_params
)
command = await self._send_image_edits_command(
image=image,
prompt=prompt,
model=ModelId(model),
n=n,
size=normalize_image_size(size),
response_format=response_format,
input_fidelity=input_fidelity,
stream=False,
partial_images=0,
bench=True,
quality=quality,
output_format=output_format,
advanced_params=parsed_advanced_params,
)
return await self._collect_image_generation_with_stats(
request=request,
command_id=command.command_id,
num_images=n,
response_format=response_format,
) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 44, "code_loc": 27, "docstring_loc": 1, "function_name": "bench_image_edits", "class_name": "API", "qualname": "API.bench_image_edits", "file_path": "src/exo/master/api.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/test_envs.py:TestEnvWithChoices.test_valid_lowercase_value_returned_case_insensitive | # Context:
import os
from unittest.mock import patch
from vllm.envs import (
disable_envs_cache,
enable_envs_cache,
env_list_with_choices,
env_set_with_choices,
env_with_choices,
environment_variables,
)
def test_getattr_without_cache(monkeypatch: pytest.MonkeyPatch): ...
def test_getattr_with_cache(monkeypatch: pytest.MonkeyPatch): ...
def test_getattr_with_reset(monkeypatch: pytest.MonkeyPatch) -> None: ...
def test_is_envs_cache_enabled() -> None: ...
class TestEnvListWithChoices: ...
class TestEnvSetWithChoices: ...
class TestVllmConfigureLogging: ...
class TestEnvWithChoices:
def test_default_value_returned_when_env_not_set(self): ...
def test_none_default_returned_when_env_not_set(self): ...
def test_valid_value_returned_case_sensitive(self): ...
def test_valid_uppercase_value_returned_case_insensitive(self): ...
def test_invalid_value_raises_error_case_sensitive(self): ...
def test_case_mismatch_raises_error_case_sensitive(self): ...
def test_invalid_value_raises_error_case_insensitive(self): ...
def test_callable_choices_resolved_correctly(self): ...
def test_callable_choices_with_invalid_value(self): ...
# Task:
Write a Python test method `test_valid_lowercase_value_returned_case_insensitive` in test class `TestEnvWithChoices` to test that lowercase value is accepted in case insensitive mode.
Module under test: vllm.envs | def test_valid_lowercase_value_returned_case_insensitive(self):
"""Test that lowercase value is accepted in case insensitive mode."""
with patch.dict(os.environ, {"TEST_ENV": "option1"}):
env_func = env_with_choices(
"TEST_ENV", "default", ["OPTION1", "OPTION2"], case_sensitive=False
)
assert env_func() == "option1" | test | 1 | {"function_name": "test_valid_lowercase_value_returned_case_insensitive", "class_name": "TestEnvWithChoices", "qualname": "TestEnvWithChoices.test_valid_lowercase_value_returned_case_insensitive", "file_path": "tests/test_envs.py", "repo_id": "vllm-project/vllm", "loc": 7, "tested_modules": ["vllm.envs"], "has_docstring": true, "runnable_level": "project_runnable"} |
exo-explore/exo:bench/eval_tool_calls.py:_claude_parse_response | # Context:
import json
from typing import Any, Literal
class Scenario: ...
def load_scenarios(path: Path) -> list[Scenario]: ...
class ParsedResponse: ...
class ScenarioResult: ...
def validate_args(args_str: str, required_keys: list[str]) -> tuple[bool, str | None]: ...
def validate_nested_args(args_str: str, array_key: str, required_item_keys: list[str]) -> tuple[bool, str | None]: ...
def call_api(client: httpx.Client, host: str, port: int, path: str, body: dict[str, Any], timeout: float) -> tuple[dict[str, Any], float]: ...
def _openai_build_request(model: str, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> tuple[str, dict[str, Any]]: ...
def _openai_parse_response(data: dict[str, Any]) -> ParsedResponse: ...
def _openai_build_followup(messages: list[dict[str, Any]], tools: list[dict[str, Any]], model: str, parsed: ParsedResponse, tool_result: str) -> tuple[str, dict[str, Any]]: ...
def _claude_translate_tools(tools: list[dict[str, Any]]) -> list[dict[str, Any]]: ...
def _claude_translate_messages(messages: list[dict[str, Any]]) -> list[dict[str, Any]]: ...
def _claude_build_request(model: str, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> tuple[str, dict[str, Any]]: ...
def _claude_build_followup(messages: list[dict[str, Any]], tools: list[dict[str, Any]], model: str, parsed: ParsedResponse, tool_result: str) -> tuple[str, dict[str, Any]]: ...
def _responses_translate_input(messages: list[dict[str, Any]]) -> list[dict[str, Any]]: ...
def _responses_build_request(model: str, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> tuple[str, dict[str, Any]]: ...
def _responses_parse_response(data: dict[str, Any]) -> ParsedResponse: ...
def _responses_build_followup(messages: list[dict[str, Any]], tools: list[dict[str, Any]], model: str, parsed: ParsedResponse, tool_result: str) -> tuple[str, dict[str, Any]]: ...
def run_scenario(client: httpx.Client, host: str, port: int, model: str, scenario: Scenario, api_name: ApiName, timeout: float, verbose: bool) -> list[ScenarioResult]: ...
def result_to_dict(result: ScenarioResult) -> dict[str, Any]: ...
def _placement_sort_key(p: dict[str, Any]) -> tuple[int, int]: ...
def main() -> None: ...
# Task:
Write a Python function `_claude_parse_response` to parse Claude Messages response into common format.
Parameters: data: dict[str, Any]
Returns: ParsedResponse | def _claude_parse_response(data: dict[str, Any]) -> ParsedResponse:
"""Parse Claude Messages response into common format."""
stop_reason = data.get("stop_reason", "")
content_blocks = data.get("content", [])
if stop_reason == "tool_use":
finish_reason = "tool_calls"
elif stop_reason == "end_turn":
finish_reason = "stop"
else:
finish_reason = stop_reason
tool_call_info: dict[str, str] | None = None
text_parts: list[str] = []
has_tool_call = False
for block in content_blocks:
block_type = block.get("type")
if block_type == "tool_use":
has_tool_call = True
if tool_call_info is None:
input_data = block.get("input", {})
tool_call_info = {
"id": block.get("id", "call_0"),
"name": block.get("name", ""),
"arguments": json.dumps(input_data)
if isinstance(input_data, dict)
else str(input_data),
}
elif block_type == "text":
text = block.get("text", "")
if text.strip():
text_parts.append(text)
content = "\n".join(text_parts) if text_parts else None
return ParsedResponse(
finish_reason=finish_reason,
has_tool_call=has_tool_call,
tool_call=tool_call_info,
content=content,
) | function_complex | 0 | {"cognitive_complexity": 18, "loc": 42, "code_loc": 35, "docstring_loc": 1, "function_name": "_claude_parse_response", "class_name": null, "qualname": "_claude_parse_response", "file_path": "bench/eval_tool_calls.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "file_runnable"} |
huggingface/transformers:tests/models/sam3/test_modeling_sam3.py:Sam3ModelTest.test_forward_with_both_pixel_values_and_vision_embeds_raises_error | # Context:
from transformers.testing_utils import (
backend_empty_cache,
require_deterministic_for_xpu,
require_torch,
slow,
torch_device,
)
import torch
class Sam3VisionModelTester: ...
class Sam3VisionModelTest(ModelTesterMixin, unittest.TestCase): ...
class Sam3ModelTester: ...
def prepare_coco_cat_image(): ...
def prepare_coco_kitchen_image(): ...
class Sam3ModelIntegrationTest(unittest.TestCase): ...
class Sam3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Sam3Model,) if is_torch_available() else ()
pipeline_model_mapping = {"mask-generation": Sam3Model} if is_torch_available() else {}
test_resize_embeddings = False
_is_composite = True
def setUp(self): ...
def test_config(self): ...
def test_inputs_embeds(self): ...
def test_model_get_set_embeddings(self): ...
def test_model(self): ...
def test_batching_equivalence(self, atol, rtol): ...
def test_attention_outputs(self): ...
def test_retain_grad_hidden_states_attentions(self): ...
def test_hidden_states_output(self): ...
def test_flex_attention_with_grads(self): ...
def test_flash_attn_2_inference_equivalence(self): ...
def test_flash_attn_2_inference_equivalence_right_padding(self): ...
def test_flash_attn_3_inference_equivalence(self): ...
def test_flash_attn_3_inference_equivalence_right_padding(self): ...
def test_flash_attn_kernels_inference_equivalence(self): ...
def test_flash_attn_kernels_mps_inference_equivalence(self): ...
def test_sdpa_can_dispatch_composite_models(self): ...
def test_forward_with_text_embeds(self): ...
def test_forward_with_both_input_ids_and_text_embeds_raises_error(self): ...
def test_forward_with_vision_embeds(self): ...
def test_custom_image_size(self): ...
def test_sdpa_can_compile_dynamic(self): ...
def test_sdpa_can_dispatch_on_flash(self): ...
def test_model_outputs_equivalence(self): ...
def _prepare_for_class(self, inputs_dict, model_class, return_labels): ...
# Task:
Write a Python test method `test_forward_with_both_pixel_values_and_vision_embeds_raises_error` in test class `Sam3ModelTest` to test that passing both pixel_values and vision_embeds raises an error.
Module under test: transformers.testing_utils, transformers.utils, test_configuration_common | def test_forward_with_both_pixel_values_and_vision_embeds_raises_error(self):
"""Test that passing both pixel_values and vision_embeds raises an error."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
# Get vision embeddings
with torch.no_grad():
vision_embeds = model.get_vision_features(pixel_values=inputs_dict["pixel_values"])
# Try to pass both (should raise error)
inputs_with_both = {
"pixel_values": inputs_dict["pixel_values"],
"vision_embeds": vision_embeds,
"input_ids": inputs_dict["input_ids"],
"attention_mask": inputs_dict["attention_mask"],
}
with self.assertRaises(ValueError):
model(**inputs_with_both) | test | 0 | {"function_name": "test_forward_with_both_pixel_values_and_vision_embeds_raises_error", "class_name": "Sam3ModelTest", "qualname": "Sam3ModelTest.test_forward_with_both_pixel_values_and_vision_embeds_raises_error", "file_path": "tests/models/sam3/test_modeling_sam3.py", "repo_id": "huggingface/transformers", "loc": 23, "tested_modules": ["transformers.testing_utils", "transformers.utils", "test_configuration_common", "test_modeling_common", "test_pipeline_mixin"], "has_docstring": true, "runnable_level": "class_runnable"} |
apache/airflow:dev/verify_release_calendar.py:find_matching_entry | # Context:
class Release: ...
class CalendarEntry: ...
def fetch_confluence_page() -> str: ...
def print_confluence_debug_info(soup: BeautifulSoup) -> None: ...
def get_release_sections() -> dict[str, list[str]]: ...
def find_table_for_heading(heading: Any) -> Any | None: ...
def find_section_and_parse(soup: BeautifulSoup, release_type: str, section_names: list[str]) -> list[Release]: ...
def parse_confluence_releases(html_content: str) -> list[Release]: ...
def get_table_headers(rows: list[Any]) -> tuple[list[str], bool]: ...
def find_column_indices(headers: list[str]) -> tuple[int | None, int | None, int | None]: ...
def parse_date_string(date_str: str) -> datetime | None: ...
def extract_manager_first_name(release_manager: str) -> str: ...
def generate_version_from_date(date: datetime) -> str: ...
def parse_table_row(cells: list[Any], row_num: int, version_idx: int | None, date_idx: int | None, manager_idx: int | None, release_type: str) -> Release | None: ...
def parse_table(table: Any, release_type: str) -> list[Release]: ...
def parse_calendar_component(component: Any) -> CalendarEntry | None: ...
def fetch_calendar_entries() -> list[CalendarEntry]: ...
def normalize_name(name: str) -> str: ...
def dates_match(release_date: datetime, entry_date: datetime) -> bool: ...
def check_release_type_match(release_type: str, summary: str) -> bool: ...
def check_version_match(version: str, summary: str) -> bool: ...
def check_manager_match(manager_name: str, summary: str) -> bool: ...
def is_matching_entry(release: Release, entry: CalendarEntry) -> bool: ...
def print_verification_header() -> None: ...
def print_matched_release(release: Release, entry: CalendarEntry) -> None: ...
def print_unmatched_release(release: Release) -> None: ...
def print_verification_summary(total_releases: int, matched_count: int, unmatched_releases: list[Release]) -> None: ...
def verify_releases(releases: list[Release], calendar_entries: list[CalendarEntry]) -> bool: ...
def load_html_content(args: argparse.Namespace) -> str: ...
def validate_releases(releases: list[Release]) -> None: ...
def validate_calendar_entries(calendar_entries: list[CalendarEntry]) -> None: ...
def print_final_result(all_matched: bool) -> None: ...
def main(): ...
# Task:
Write a Python function `find_matching_entry` to find a calendar entry that matches the given release, or None if not found.
Parameters: release: Release, calendar_entries: list[CalendarEntry]
Returns: CalendarEntry | None | def find_matching_entry(release: Release, calendar_entries: list[CalendarEntry]) -> CalendarEntry | None:
"""Find a calendar entry that matches the given release, or None if not found."""
for entry in calendar_entries:
if is_matching_entry(release, entry):
return entry
return None | function_simple | 1 | {"cognitive_complexity": 3, "loc": 6, "code_loc": 4, "docstring_loc": 1, "function_name": "find_matching_entry", "class_name": null, "qualname": "find_matching_entry", "file_path": "dev/verify_release_calendar.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "file_runnable"} |
keras-team/keras:keras/src/export/litert_test.py:ExportLitertTest.test_export_invalid_filepath | # Context:
import os
import numpy as np
class CustomModel(models.Model): ...
def get_model(type, input_shape, layer_list): ...
def _convert_to_numpy(structure): ...
def _normalize_name(name): ...
def _set_interpreter_inputs(interpreter, inputs): ...
def _get_interpreter_outputs(interpreter): ...
class ExportLitertTest(testing.TestCase):
def test_standard_model_export(self, model_type): ...
def test_model_with_input_structure(self, struct_type): ...
def test_model_with_multiple_inputs(self): ...
def test_export_with_custom_input_signature(self): ...
def test_multi_output_model_export(self): ...
def test_export_with_verbose(self): ...
def test_export_error_handling(self): ...
def test_export_subclass_model(self): ...
def test_export_with_optimizations_default(self): ...
def test_export_with_optimizations_sparsity(self): ...
def test_export_with_optimizations_size(self): ...
def test_export_with_optimizations_latency(self): ...
def test_export_with_multiple_optimizations(self): ...
def test_export_with_representative_dataset(self): ...
def test_export_with_multiple_kwargs(self): ...
def test_export_optimization_file_size_comparison(self): ...
def test_signature_def_with_named_model(self): ...
def test_signature_def_with_functional_model(self): ...
def test_signature_def_with_multi_input_model(self): ...
def test_signature_def_with_multi_output_model(self): ...
def test_dict_input_adapter_creation(self): ...
def test_dict_input_signature_inference(self): ...
def test_dict_input_with_custom_signature(self): ...
def test_dict_input_numerical_accuracy(self): ...
def test_dict_input_preserves_variable_sharing(self): ...
def test_dict_input_multi_output_model(self): ...
# Task:
Write a Python test method `test_export_invalid_filepath` in test class `ExportLitertTest` to test that export fails with invalid file extension.
Module under test: absl.testing, keras.src, keras.src | def test_export_invalid_filepath(self):
"""Test that export fails with invalid file extension."""
model = get_model("sequential")
dummy_input = np.random.random((3, 10)).astype(np.float32)
model(dummy_input)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.txt")
# Should raise ValueError for wrong extension
with self.assertRaises(ValueError):
model.export(temp_filepath, format="litert") | test | 1 | {"function_name": "test_export_invalid_filepath", "class_name": "ExportLitertTest", "qualname": "ExportLitertTest.test_export_invalid_filepath", "file_path": "keras/src/export/litert_test.py", "repo_id": "keras-team/keras", "loc": 11, "tested_modules": ["absl.testing", "keras.src", "keras.src", "keras.src", "keras.src"], "has_docstring": true, "runnable_level": "file_runnable"} |
huggingface/transformers:tests/models/deepseek_v2/test_modeling_deepseek_v2.py:DeepseekV2ModelTest.test_model_rope_scaling_frequencies | # Context:
from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device
import torch
from transformers.models.deepseek_v2.modeling_deepseek_v2 import DeepseekV2RotaryEmbedding
class DeepseekV2ModelTester(CausalLMModelTester): ...
class DeepseekV2IntegrationTest(unittest.TestCase): ...
class DeepseekV2ModelTest(CausalLMModelTest, unittest.TestCase):
test_all_params_have_gradient = False
model_tester_class = DeepseekV2ModelTester
model_split_percents = [0.5, 0.7, 0.8]
_torch_compile_train_cls = DeepseekV2ForCausalLM if is_torch_available() else None
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config): ...
def test_tp_plan_matches_params(self): ...
# Task:
Write a Python test method `test_model_rope_scaling_frequencies` in test class `DeepseekV2ModelTest` to overwritten: DeepseekV2 implements RoPE in the complex domain, as opposed to in the real domain with.
Module under test: transformers, transformers.testing_utils, causal_lm_tester | def test_model_rope_scaling_frequencies(self):
"""
Overwritten: DeepseekV2 implements RoPE in the complex domain, as opposed to in the real domain with
`sin` and `cos`. Nevertheless, the checks are the same as in the original test.
"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
scaling_factor = 10
short_input_length = 10
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
original_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
original_freqs_cis_short = original_rope(x, position_ids_short)
original_freqs_cis_long = original_rope(x, position_ids_long)
torch.testing.assert_close(original_freqs_cis_short, original_freqs_cis_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
config.rope_parameters = {"rope_type": "linear", "rope_theta": 10000.0, "factor": scaling_factor}
linear_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
linear_freqs_cis_short = linear_scaling_rope(x, position_ids_short)
linear_freqs_cis_long = linear_scaling_rope(x, position_ids_long)
torch.testing.assert_close(linear_freqs_cis_short, linear_freqs_cis_long[:, :short_input_length, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
config.rope_parameters = {"rope_type": "dynamic", "rope_theta": 10000.0, "factor": scaling_factor}
ntk_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
ntk_freqs_cis_short = ntk_scaling_rope(x, position_ids_short)
ntk_freqs_cis_long = ntk_scaling_rope(x, position_ids_long)
torch.testing.assert_close(ntk_freqs_cis_short, original_freqs_cis_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_freqs_cis_long, original_freqs_cis_long)
self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all())
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
config.rope_parameters = {"rope_type": "yarn", "rope_theta": 10000.0, "factor": scaling_factor}
yarn_scaling_rope = DeepseekV2RotaryEmbedding(config=config).to(torch_device)
yarn_freqs_cis_short = yarn_scaling_rope(x, position_ids_short)
yarn_freqs_cis_long = yarn_scaling_rope(x, position_ids_long)
torch.testing.assert_close(yarn_freqs_cis_short, yarn_freqs_cis_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_freqs_cis_short, original_freqs_cis_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_freqs_cis_long, original_freqs_cis_long) | test | 0 | {"function_name": "test_model_rope_scaling_frequencies", "class_name": "DeepseekV2ModelTest", "qualname": "DeepseekV2ModelTest.test_model_rope_scaling_frequencies", "file_path": "tests/models/deepseek_v2/test_modeling_deepseek_v2.py", "repo_id": "huggingface/transformers", "loc": 56, "tested_modules": ["transformers", "transformers.testing_utils", "causal_lm_tester", "transformers", "transformers.models.deepseek_v2.modeling_deepseek_v2"], "has_docstring": true, "runnable_level": "class_runnable"} |
huggingface/transformers:src/transformers/models/qwen3_next/modular_qwen3_next.py:Qwen3NextDynamicCache.get_mask_sizes | # Context:
import torch
class Qwen3NextRMSNormGated(nn.Module): ...
class Qwen3NextRotaryEmbedding(Gemma2RotaryEmbedding): ...
class Qwen3NextRMSNorm(Gemma3RMSNorm): ...
class Qwen3NextAttention(Qwen3MoeAttention): ...
def torch_causal_conv1d_update(hidden_states, conv_state, weight, bias, activation): ...
def l2norm(x: torch.FloatTensor, dim: int, eps: float): ...
def torch_chunk_gated_delta_rule(query, key, value, g, beta, chunk_size, initial_state, output_final_state, use_qk_l2norm_in_kernel): ...
def torch_recurrent_gated_delta_rule(query, key, value, g, beta, initial_state, output_final_state, use_qk_l2norm_in_kernel): ...
class Qwen3NextGatedDeltaNet(nn.Module): ...
class Qwen3NextMLP(Qwen3MoeMLP): ...
class Qwen3NextExperts(Qwen2MoeExperts): ...
class Qwen3NextTopKRouter(Qwen2MoeTopKRouter): ...
class Qwen3NextSparseMoeBlock(Qwen2MoeSparseMoeBlock): ...
class Qwen3NextDecoderLayer(Qwen3MoeDecoderLayer): ...
class Qwen3NextPreTrainedModel(PreTrainedModel): ...
class Qwen3NextModel(Qwen3NextPreTrainedModel): ...
class Qwen3NextForCausalLM(MixtralForCausalLM): ...
class Qwen3NextForSequenceClassification(LlamaForSequenceClassification): ...
class Qwen3NextForTokenClassification(LlamaForTokenClassification): ...
class Qwen3NextForQuestionAnswering(LlamaForQuestionAnswering): ...
class Qwen3NextDynamicCache:
is_compileable = False
def __init__(self, config: Qwen3NextConfig):
super().__init__()
self.layer_types = config.layer_types
self.transformer_layers = [
i for i in range(config.num_hidden_layers) if self.layer_types[i] == "full_attention"
]
self.last_linear_layer = len(self.layer_types) - 1 - self.layer_types[::-1].index("linear_attention")
# Initialize everything to None -> will be lazy initialized to allow multi-gpu (device_map) inference
self.conv_states = [None for _ in range(config.num_hidden_layers)]
self.recurrent_states = [None for _ in range(config.num_hidden_layers)]
self.key_cache = [None for _ in range(config.num_hidden_layers)]
self.value_cache = [None for _ in range(config.num_hidden_layers)]
def __len__(self): ...
def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: dict[str, Any] | None) -> tuple[torch.Tensor, torch.Tensor]: ...
def reorder_cache(self, beam_idx: torch.LongTensor): ...
def get_seq_length(self, layer_idx: int | None) -> int: ...
def has_previous_state(self): ...
# Task:
Write a Python method `get_mask_sizes` for the class `Qwen3NextDynamicCache` to return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for.
Parameters: cache_position: torch.Tensor, layer_idx: int
Returns: tuple[int, int] | def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
"""
Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for
the given layer at `layer_idx`.
The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns for each layer.
"""
kv_offset = 0
query_length = cache_position.shape[0]
past_seen_tokens = self.get_seq_length(layer_idx)
kv_length = query_length + past_seen_tokens
return kv_length, kv_offset | function_simple | 0 | {"cognitive_complexity": 0, "loc": 11, "code_loc": 5, "docstring_loc": 5, "function_name": "get_mask_sizes", "class_name": "Qwen3NextDynamicCache", "qualname": "Qwen3NextDynamicCache.get_mask_sizes", "file_path": "src/transformers/models/qwen3_next/modular_qwen3_next.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "class_runnable"} |
apache/airflow:airflow-ctl/tests/airflow_ctl/ctl/commands/test_config_command.py:TestCliConfigCommands.test_lint_detects_removed_configs | # Context:
import os
from unittest.mock import patch
from airflowctl.api.client import ClientKind
from airflowctl.api.datamodels.generated import Config, ConfigOption, ConfigSection
from airflowctl.ctl.commands import config_command
from airflowctl.ctl.commands.config_command import ConfigChange, ConfigParameter
class TestCliConfigCommands:
parser = cli_parser.get_parser()
def test_lint_no_issues(self, mock_rich_print, api_client_maker): ...
def test_lint_detects_default_changed_configs(self, mock_rich_print, api_client_maker): ...
def test_lint_detects_renamed_configs_different_section(self, mock_rich_print, api_client_maker): ...
def test_lint_detects_renamed_configs_same_section(self, mock_rich_print, api_client_maker): ...
def test_lint_detects_invalid_values(self, mock_rich_print, api_client_maker): ...
def test_lint_detects_configs_with_suggestions(self, mock_rich_print, api_client_maker): ...
def test_config_list_masking_preservation(self, mock_rich_print, _mock_credentials, api_client_maker, capsys): ...
# Task:
Write a Python test method `test_lint_detects_removed_configs` in test class `TestCliConfigCommands` to verify the behavior of `lint_detects_removed_configs`.
Module under test: __future__, airflowctl.api.client, airflowctl.api.datamodels.generated | def test_lint_detects_removed_configs(self, mock_rich_print, api_client_maker):
response_config = Config(
sections=[
ConfigSection(
name="test_section",
options=[
ConfigOption(
key="test_option",
value="test_value",
)
],
)
]
)
api_client = api_client_maker(
path="/api/v2/config",
response_json=response_config.model_dump(),
expected_http_status_code=200,
kind=ClientKind.CLI,
)
api_client.configs.list.return_value = response_config
config_command.lint(
self.parser.parse_args(["config", "lint"]),
api_client=api_client,
)
calls = [call[0][0] for call in mock_rich_print.call_args_list]
assert "[red]Found issues in your airflow.cfg:[/red]" in calls[0]
assert (
"- [yellow]Removed deprecated `test_option` configuration parameter from `test_section` section.[/yellow]"
in calls[1]
) | test | 1 | {"function_name": "test_lint_detects_removed_configs", "class_name": "TestCliConfigCommands", "qualname": "TestCliConfigCommands.test_lint_detects_removed_configs", "file_path": "airflow-ctl/tests/airflow_ctl/ctl/commands/test_config_command.py", "repo_id": "apache/airflow", "loc": 34, "tested_modules": ["__future__", "airflowctl.api.client", "airflowctl.api.datamodels.generated", "airflowctl.ctl", "airflowctl.ctl.commands"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_mdx_loader.py:TestMDXLoader.test_load_basic_mdx_file | # Context:
from crewai_tools.rag.base_loader import LoaderResult
class TestMDXLoader:
def _write_temp_mdx(self, content): ...
def _load_from_file(self, content): ...
def test_mdx_multiple_imports_exports(self): ...
def test_complex_jsx_cleanup(self): ...
def test_whitespace_cleanup(self): ...
def test_only_jsx_content(self): ...
def test_load_mdx_from_url(self, mock_get): ...
def test_load_mdx_with_custom_headers(self, mock_get): ...
def test_mdx_url_fetch_error(self, mock_get): ...
def test_load_inline_mdx_text(self): ...
def test_empty_result_after_cleaning(self): ...
def test_edge_case_parsing(self): ...
# Task:
Write a Python test method `test_load_basic_mdx_file` in test class `TestMDXLoader` to verify the behavior of `load_basic_mdx_file`.
Module under test: crewai_tools.rag.base_loader, crewai_tools.rag.loaders.mdx_loader, crewai_tools.rag.source_content | def test_load_basic_mdx_file(self):
content = """
import Component from './Component'
export const meta = { title: 'Test' }
# Test MDX File
This is a **markdown** file with JSX.
<Component prop="value" />
Some more content.
<div className="container">
<p>Nested content</p>
</div>
"""
result, path = self._load_from_file(content)
assert isinstance(result, LoaderResult)
assert all(
tag not in result.content
for tag in ["import", "export", "<Component", "<div", "</div>"]
)
assert all(
text in result.content
for text in [
"# Test MDX File",
"markdown",
"Some more content",
"Nested content",
]
)
assert result.metadata["format"] == "mdx"
assert result.source == path | test | 0 | {"function_name": "test_load_basic_mdx_file", "class_name": "TestMDXLoader", "qualname": "TestMDXLoader.test_load_basic_mdx_file", "file_path": "lib/crewai-tools/tests/rag/test_mdx_loader.py", "repo_id": "crewAIInc/crewAI", "loc": 35, "tested_modules": ["crewai_tools.rag.base_loader", "crewai_tools.rag.loaders.mdx_loader", "crewai_tools.rag.source_content"], "has_docstring": false, "runnable_level": "project_runnable"} |
github/spec-kit:tests/test_extensions.py:TestExtensionManifest.test_invalid_command_name | # Context:
import pytest
from specify_cli.extensions import (
ExtensionManifest,
ExtensionRegistry,
ExtensionManager,
CommandRegistrar,
ExtensionCatalog,
ExtensionError,
ValidationError,
CompatibilityError,
version_satisfies,
)
import yaml
def temp_dir(): ...
def valid_manifest_data(): ...
def extension_dir(temp_dir, valid_manifest_data): ...
def project_dir(temp_dir): ...
class TestExtensionRegistry: ...
class TestExtensionManager: ...
class TestCommandRegistrar: ...
class TestVersionSatisfies: ...
class TestIntegration: ...
class TestExtensionCatalog: ...
class TestExtensionManifest:
def test_valid_manifest(self, extension_dir): ...
def test_missing_required_field(self, temp_dir): ...
def test_invalid_extension_id(self, temp_dir, valid_manifest_data): ...
def test_invalid_version(self, temp_dir, valid_manifest_data): ...
def test_no_commands(self, temp_dir, valid_manifest_data): ...
def test_manifest_hash(self, extension_dir): ...
# Task:
Write a Python test method `test_invalid_command_name` in test class `TestExtensionManifest` to test manifest with invalid command name format.
Module under test: pathlib, datetime, specify_cli.extensions | def test_invalid_command_name(self, temp_dir, valid_manifest_data):
"""Test manifest with invalid command name format."""
import yaml
valid_manifest_data["provides"]["commands"][0]["name"] = "invalid-name"
manifest_path = temp_dir / "extension.yml"
with open(manifest_path, 'w') as f:
yaml.dump(valid_manifest_data, f)
with pytest.raises(ValidationError, match="Invalid command name"):
ExtensionManifest(manifest_path) | test | 0 | {"function_name": "test_invalid_command_name", "class_name": "TestExtensionManifest", "qualname": "TestExtensionManifest.test_invalid_command_name", "file_path": "tests/test_extensions.py", "repo_id": "github/spec-kit", "loc": 12, "tested_modules": ["pathlib", "datetime", "specify_cli.extensions"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:common/data_source/gmail_connector.py:GmailConnector.poll_source | # Context:
from common.data_source.google_util.constant import DB_CREDENTIALS_PRIMARY_ADMIN_KEY, MISSING_SCOPES_ERROR_STR, SCOPE_INSTRUCTIONS, USER_FIELDS
from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch, SlimConnectorWithPermSync
from common.data_source.models import BasicExpertInfo, Document, ExternalAccess, GenerateDocumentsOutput, GenerateSlimDocumentOutput, SlimDocument, TextSection
def _get_owners_from_emails(emails: dict[str, str | None]) -> list[BasicExpertInfo]: ...
def message_to_section(message: dict[str, Any]) -> tuple[TextSection, dict[str, str]]: ...
def thread_to_document(full_thread: dict[str, Any], email_used_to_fetch_thread: str) -> Document | None: ...
class GmailConnector(LoadConnector, PollConnector, SlimConnectorWithPermSync):
def __init__(self, batch_size: int = INDEX_BATCH_SIZE) -> None:
self.batch_size = batch_size
self._creds: OAuthCredentials | ServiceAccountCredentials | None = None
self._primary_admin_email: str | None = None
def primary_admin_email(self) -> str: ...
def google_domain(self) -> str: ...
def creds(self) -> OAuthCredentials | ServiceAccountCredentials: ...
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, str] | None: ...
def _get_all_user_emails(self) -> list[str]: ...
def _fetch_threads(self, time_range_start: SecondsSinceUnixEpoch | None, time_range_end: SecondsSinceUnixEpoch | None) -> GenerateDocumentsOutput: ...
def load_from_state(self) -> GenerateDocumentsOutput: ...
def retrieve_all_slim_docs_perm_sync(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, callback) -> GenerateSlimDocumentOutput: ...
# Task:
Write a Python method `poll_source` for the class `GmailConnector` to poll Gmail for documents within time range.
Parameters: start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
Returns: GenerateDocumentsOutput | def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> GenerateDocumentsOutput:
"""Poll Gmail for documents within time range."""
try:
yield from self._fetch_threads(start, end)
except Exception as e:
if MISSING_SCOPES_ERROR_STR in str(e):
raise PermissionError(SCOPE_INSTRUCTIONS) from e
raise e | function_simple | 1 | {"cognitive_complexity": 3, "loc": 8, "code_loc": 6, "docstring_loc": 1, "function_name": "poll_source", "class_name": "GmailConnector", "qualname": "GmailConnector.poll_source", "file_path": "common/data_source/gmail_connector.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:shared/configuration/src/airflow_shared/configuration/parser.py:AirflowConfigParser._get_option_from_secrets | # Context:
class ValueNotFound: ...
def expand_env_var(env_var: None) -> None: ...
def expand_env_var(env_var: str) -> str: ...
def expand_env_var(env_var: str | None) -> str | None: ...
def run_command(command: str) -> str: ...
def _is_template(configuration_description: dict[str, dict[str, Any]], section: str, key: str) -> bool: ...
def configure_parser_from_configuration_description(parser: ConfigParser, configuration_description: dict[str, dict[str, Any]], all_vars: dict[str, Any]) -> None: ...
class AirflowConfigParser(ConfigParser):
def _lookup_sequence(self) -> list[Callable]: ...
def _validators(self) -> list[Callable[[], None]]: ...
def validate(self) -> None: ...
def _validate_deprecated_values(self) -> None: ...
def _using_old_value(self, old: Pattern, current_value: str) -> bool: ...
def _update_env_var(self, section: str, name: str, new_value: str) -> None: ...
def _create_future_warning(name: str, section: str, current_value: Any, new_value: Any) -> None: ...
def __init__(
self,
configuration_description: dict[str, dict[str, Any]],
_default_values: ConfigParser,
*args,
**kwargs,
):
"""
Initialize the parser.
:param configuration_description: Description of configuration options
:param _default_values: ConfigParser with default values
"""
super().__init__(*args, **kwargs)
self.configuration_description = configuration_description
self._default_values = _default_values
self._suppress_future_warnings = False
self.upgraded_values: dict[tuple[str, str], str] = {}
def inversed_deprecated_options(self): ...
def inversed_deprecated_sections(self): ...
def sensitive_config_values(self) -> set[tuple[str, str]]: ...
def _update_defaults_from_string(self, config_string: str) -> None: ...
def get_default_value(self, section: str, key: str, fallback: Any, raw, **kwargs) -> Any: ...
def _get_custom_secret_backend(self, worker_mode: bool) -> Any | None: ...
def _get_config_value_from_secret_backend(self, config_key: str) -> str | None: ...
def _get_cmd_option_from_config_sources(self, config_sources: ConfigSourcesType, section: str, key: str) -> str | None: ...
def _get_secret_option_from_config_sources(self, config_sources: ConfigSourcesType, section: str, key: str) -> str | None: ...
def _include_secrets(self, config_sources: ConfigSourcesType, display_sensitive: bool, display_source: bool, raw: bool): ...
def _include_commands(self, config_sources: ConfigSourcesType, display_sensitive: bool, display_source: bool, raw: bool): ...
def _include_envs(self, config_sources: ConfigSourcesType, display_sensitive: bool, display_source: bool, raw: bool): ...
def _filter_by_source(self, config_sources: ConfigSourcesType, display_source: bool, getter_func): ...
def _deprecated_value_is_set_in_config(deprecated_section: str, deprecated_key: str, configs: Iterable[tuple[str, ConfigParser]]) -> bool: ...
def _deprecated_variable_is_set(deprecated_section: str, deprecated_key: str) -> bool: ...
def _deprecated_command_is_set_in_config(deprecated_section: str, deprecated_key: str, configs: Iterable[tuple[str, ConfigParser]]) -> bool: ...
def _deprecated_variable_command_is_set(deprecated_section: str, deprecated_key: str) -> bool: ...
def _deprecated_secret_is_set_in_config(deprecated_section: str, deprecated_key: str, configs: Iterable[tuple[str, ConfigParser]]) -> bool: ...
def _deprecated_variable_secret_is_set(deprecated_section: str, deprecated_key: str) -> bool: ...
def _replace_config_with_display_sources(config_sources: ConfigSourcesType, configs: Iterable[tuple[str, ConfigParser]], configuration_description: dict[str, dict[str, Any]], display_source: bool, raw: bool, deprecated_options: dict[tuple[str, str], tuple[str, str, str]], include_env: bool, include_cmds: bool, include_secret: bool): ...
def _replace_section_config_with_display_sources(config: ConfigParser, config_sources: ConfigSourcesType, configuration_description: dict[str, dict[str, Any]], display_source: bool, raw: bool, section: str, source_name: str, deprecated_options: dict[tuple[str, str], tuple[str, str, str]], configs: Iterable[tuple[str, ConfigParser]], include_env: bool, include_cmds: bool, include_secret: bool): ...
def _warn_deprecate(self, section: str, key: str, deprecated_section: str, deprecated_name: str, extra_stacklevel: int): ...
def suppress_future_warnings(self): ...
def _env_var_name(self, section: str, key: str, team_name: str | None) -> str: ...
def _get_env_var_option(self, section: str, key: str, team_name: str | None): ...
def _get_cmd_option(self, section: str, key: str): ...
def _get_secret_option(self, section: str, key: str) -> str | None: ...
def _get_environment_variables(self, deprecated_key: str | None, deprecated_section: str | None, key: str, section: str, issue_warning: bool, extra_stacklevel: int, **kwargs) -> str | ValueNotFound: ...
def _get_option_from_config_file(self, deprecated_key: str | None, deprecated_section: str | None, key: str, section: str, issue_warning: bool, extra_stacklevel: int, **kwargs) -> str | ValueNotFound: ...
def _get_option_from_commands(self, deprecated_key: str | None, deprecated_section: str | None, key: str, section: str, issue_warning: bool, extra_stacklevel: int, **kwargs) -> str | ValueNotFound: ...
def _get_option_from_defaults(self, deprecated_key: str | None, deprecated_section: str | None, key: str, section: str, issue_warning: bool, extra_stacklevel: int, team_name: str | None, **kwargs) -> str | ValueNotFound: ...
def _resolve_deprecated_lookup(self, section: str, key: str, lookup_from_deprecated: bool, extra_stacklevel: int) -> tuple[str, str, str | None, str | None, bool]: ...
def get(self, section: str, key: str, fallback: str, **kwargs) -> str: ...
def get(self, section: str, key: str, **kwargs) -> str | None: ...
def get(self, section: str, key: str, suppress_warnings: bool, lookup_from_deprecated: bool, _extra_stacklevel: int, team_name: str | None, **kwargs) -> str | None: ...
def getboolean(self, section: str, key: str, **kwargs) -> bool: ...
def getint(self, section: str, key: str, **kwargs) -> int: ...
def getfloat(self, section: str, key: str, **kwargs) -> float: ...
def getlist(self, section: str, key: str, delimiter, **kwargs): ...
E = TypeVar("E", bound=Enum)
def getenum(self, section: str, key: str, enum_class: type[E], **kwargs) -> E: ...
def getenumlist(self, section: str, key: str, enum_class: type[E], delimiter, **kwargs) -> list[E]: ...
def getimport(self, section: str, key: str, **kwargs) -> Any: ...
def getjson(self, section: str, key: str, fallback, **kwargs) -> dict | list | str | int | float | None: ...
def gettimedelta(self, section: str, key: str, fallback: Any, **kwargs) -> datetime.timedelta | None: ...
def get_mandatory_value(self, section: str, key: str, **kwargs) -> str: ...
def get_mandatory_list_value(self, section: str, key: str, **kwargs) -> list[str]: ...
def read(self, filenames: str | bytes | os.PathLike | Iterable[str | bytes | os.PathLike], encoding: str | None) -> list[str]: ...
def read_dict(self, dictionary: dict[str, dict[str, Any]], source: str) -> None: ...
def get_sections_including_defaults(self) -> list[str]: ...
def get_options_including_defaults(self, section: str) -> list[str]: ...
def has_option(self, section: str, option: str, lookup_from_deprecated: bool, **kwargs) -> bool: ...
def set(self, section: str, option: str, value: str | None) -> None: ...
def remove_option(self, section: str, option: str, remove_default: bool): ...
def optionxform(self, optionstr: str) -> str: ...
def _get_config_sources_for_as_dict(self) -> list[tuple[str, ConfigParser]]: ...
def as_dict(self, display_source: bool, display_sensitive: bool, raw: bool, include_env: bool, include_cmds: bool, include_secret: bool) -> ConfigSourcesType: ...
def _write_option_header(self, file: IO[str], option: str, extra_spacing: bool, include_descriptions: bool, include_env_vars: bool, include_examples: bool, include_sources: bool, section_config_description: dict[str, dict[str, Any]], section_to_write: str, sources_dict: ConfigSourcesType) -> tuple[bool, bool]: ...
def is_template(self, section: str, key) -> bool: ...
def getsection(self, section: str, team_name: str | None) -> ConfigOptionsDictType | None: ...
def _write_section_header(file: IO[str], include_descriptions: bool, section_config_description: dict[str, str], section_to_write: str) -> None: ...
def _write_value(self, file: IO[str], option: str, comment_out_everything: bool, needs_separation: bool, only_defaults: bool, section_to_write: str, hide_sensitive: bool, is_sensitive: bool, show_values: bool): ...
def write(self, file: IO[str], section: str | None, include_examples: bool, include_descriptions: bool, include_sources: bool, include_env_vars: bool, include_providers: bool, comment_out_everything: bool, hide_sensitive: bool, extra_spacing: bool, only_defaults: bool, show_values: bool, **kwargs) -> None: ...
def make_sure_configuration_loaded(self, with_providers: bool) -> Generator[None, None, None]: ...
def _ensure_providers_config_loaded(self) -> None: ...
def _ensure_providers_config_unloaded(self) -> bool: ...
def _reload_provider_configs(self) -> None: ...
# Task:
Write a Python method `_get_option_from_secrets` for the class `AirflowConfigParser` to get config option from secrets backend.
Parameters: deprecated_key: str | None, deprecated_section: str | None, key: str, section: str, issue_warning: bool, extra_stacklevel: int
Returns: str | ValueNotFound | def _get_option_from_secrets(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
**kwargs,
) -> str | ValueNotFound:
"""Get config option from secrets backend."""
if kwargs.get("team_name", None):
# Secrets based team config fetching is not currently supported
return VALUE_NOT_FOUND_SENTINEL
option = self._get_secret_option(section, key)
if option:
return option
if deprecated_section and deprecated_key:
with self.suppress_future_warnings():
option = self._get_secret_option(deprecated_section, deprecated_key)
if option:
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
return option
return VALUE_NOT_FOUND_SENTINEL | function_complex | 1 | {"cognitive_complexity": 9, "loc": 25, "code_loc": 13, "docstring_loc": 1, "function_name": "_get_option_from_secrets", "class_name": "AirflowConfigParser", "qualname": "AirflowConfigParser._get_option_from_secrets", "file_path": "shared/configuration/src/airflow_shared/configuration/parser.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/_common/tests/test_ray_option_utils.py:TestTaskActorOptionValidation.test_validate_actor_options_invalid_keyword | # Context:
import pytest
from ray._common.ray_option_utils import (
Option,
_check_deprecate_placement_group,
_counting_option,
_resource_option,
_validate_resource_quantity,
_validate_resources,
update_options,
validate_actor_options,
validate_task_options,
)
class TestOptionValidation: ...
class TestUpdateOptions: ...
class TestTaskActorOptionValidation:
def test_validate_task_options_valid(self): ...
def test_validate_task_options_invalid_keyword(self): ...
def test_validate_task_options_in_options_invalid(self): ...
def test_validate_actor_options_valid(self): ...
def test_validate_actor_options_in_options_invalid(self): ...
def test_validate_actor_get_if_exists_no_name(self): ...
def test_validate_actor_object_store_memory_warning(self): ...
def test_check_deprecate_placement_group(self): ...
# Task:
Write a Python test method `test_validate_actor_options_invalid_keyword` in test class `TestTaskActorOptionValidation` to verify the behavior of `validate_actor_options_invalid_keyword`.
Module under test: ray._common.ray_option_utils, ray.util.placement_group | def test_validate_actor_options_invalid_keyword(self):
with pytest.raises(ValueError, match="Invalid option keyword"):
validate_actor_options({"invalid_option": 1}, in_options=False) | test | 0 | {"function_name": "test_validate_actor_options_invalid_keyword", "class_name": "TestTaskActorOptionValidation", "qualname": "TestTaskActorOptionValidation.test_validate_actor_options_invalid_keyword", "file_path": "python/ray/_common/tests/test_ray_option_utils.py", "repo_id": "ray-project/ray", "loc": 3, "tested_modules": ["ray._common.ray_option_utils", "ray.util.placement_group"], "has_docstring": false, "runnable_level": "plib_runnable"} |
vllm-project/vllm:tests/models/language/pooling/test_bge_m3.py:test_bge_m3_api_server_multi_vector | # Context:
import httpx
import openai
import pytest
import torch
def server(): ...
async def client(server): ...
async def test_bge_m3_api_server_embedding(client: openai.AsyncOpenAI): ...
async def tokenize(client: openai.AsyncOpenAI, sentences: list[str]) -> list[list[int]]: ...
async def sparse_embeddings(client: openai.AsyncOpenAI, sentences: list[str]) -> list[dict[int, float]]: ...
def compute_lexical_matching_score(lw1: dict[int, float], lw2: dict[int, float]) -> float: ...
async def test_bge_m3_api_server_sparse_embedding(client: openai.AsyncOpenAI): ...
async def test_bge_m3_api_server_sparse_embedding_corner_case(client: openai.AsyncOpenAI): ...
def colbert_score(q_reps: torch.Tensor, p_reps: torch.Tensor) -> torch.Tensor: ...
# Task:
Write a Python test function `test_bge_m3_api_server_multi_vector` to verify the behavior of `bge_m3_api_server_multi_vector`.
Module under test: utils, embed_utils | async def test_bge_m3_api_server_multi_vector(client: openai.AsyncOpenAI):
result_1 = await client.post(
"../pooling",
body={"model": MODEL_NAME, "input": sentences_1, "task": "token_embed"},
cast_to=httpx.Response,
)
embeddings_1 = [torch.tensor(data["data"]) for data in result_1.json()["data"]]
result_2 = await client.post(
"../pooling",
body={"model": MODEL_NAME, "input": sentences_2, "task": "token_embed"},
cast_to=httpx.Response,
)
embeddings_2 = [torch.tensor(data["data"]) for data in result_2.json()["data"]]
colbert_score_1_0_x_2_0 = colbert_score(embeddings_1[0], embeddings_2[0])
assert colbert_score_1_0_x_2_0 == pytest.approx(
colbert_score_reference[0], rel=0.01
)
colbert_score_1_0_x_2_1 = colbert_score(embeddings_1[0], embeddings_2[1])
assert colbert_score_1_0_x_2_1 == pytest.approx(
colbert_score_reference[1], rel=0.01
) | test | 1 | {"function_name": "test_bge_m3_api_server_multi_vector", "class_name": null, "qualname": "test_bge_m3_api_server_multi_vector", "file_path": "tests/models/language/pooling/test_bge_m3.py", "repo_id": "vllm-project/vllm", "loc": 23, "tested_modules": ["utils", "embed_utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/skill_cli/python_session.py:PythonSession.execute | # Context:
import asyncio
import io
import traceback
from contextlib import redirect_stderr, redirect_stdout
class ExecutionResult: ...
class BrowserWrapper: ...
class PythonSession:
def __post_init__(self) -> None: ...
def reset(self) -> None: ...
def get_variables(self) -> dict[str, str]: ...
# Task:
Write a Python method `execute` for the class `PythonSession` to execute code in persistent namespace.
Parameters: code: str, browser_session: 'BrowserSession', loop: asyncio.AbstractEventLoop | None
Returns: ExecutionResult | def execute(
self, code: str, browser_session: 'BrowserSession', loop: asyncio.AbstractEventLoop | None = None
) -> ExecutionResult:
"""Execute code in persistent namespace.
The `browser` variable is injected into the namespace before each execution,
providing a convenient wrapper around the BrowserSession.
Args:
code: Python code to execute
browser_session: The browser session for browser operations
loop: The event loop for async operations (required for browser access)
"""
# Inject browser wrapper with the event loop for async operations
if loop is not None:
self.namespace['browser'] = BrowserWrapper(browser_session, loop)
self.execution_count += 1
stdout = io.StringIO()
stderr = io.StringIO()
try:
with redirect_stdout(stdout), redirect_stderr(stderr):
try:
# First try to compile as expression (for REPL-like behavior)
compiled = compile(code, '<input>', 'eval')
result = eval(compiled, self.namespace)
if result is not None:
print(repr(result))
except SyntaxError:
# Compile as statements
compiled = compile(code, '<input>', 'exec')
exec(compiled, self.namespace)
output = stdout.getvalue()
if stderr.getvalue():
output += stderr.getvalue()
result = ExecutionResult(success=True, output=output)
except Exception as e:
output = stdout.getvalue()
error_msg = traceback.format_exc()
result = ExecutionResult(success=False, output=output, error=error_msg)
self.history.append((code, result))
return result | function_complex | 0 | {"cognitive_complexity": 9, "loc": 47, "code_loc": 25, "docstring_loc": 10, "function_name": "execute", "class_name": "PythonSession", "qualname": "PythonSession.execute", "file_path": "browser_use/skill_cli/python_session.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "file_runnable"} |
google/langextract:tests/prompting_test.py:ContextAwarePromptBuilderTest.test_second_chunk_includes_previous_context | # Context:
from langextract import prompting
class QAPromptGeneratorTest(parameterized.TestCase): ...
class PromptBuilderTest(absltest.TestCase): ...
class ContextAwarePromptBuilderTest(absltest.TestCase):
def _create_generator(self): ...
def test_context_window_chars_property(self): ...
def test_first_chunk_has_no_previous_context(self): ...
def test_context_disabled_when_none(self): ...
def test_context_isolated_per_document(self): ...
def test_combines_previous_context_with_additional_context(self): ...
# Task:
Write a Python test method `test_second_chunk_includes_previous_context` in test class `ContextAwarePromptBuilderTest` to verifies the second chunk includes text from the first chunk.
Module under test: absl.testing, absl.testing, langextract | def test_second_chunk_includes_previous_context(self):
"""Verifies the second chunk includes text from the first chunk."""
generator = self._create_generator()
builder = prompting.ContextAwarePromptBuilder(
generator, context_window_chars=20
)
context_prefix = prompting.ContextAwarePromptBuilder._CONTEXT_PREFIX
builder.build_prompt(chunk_text="First chunk ending.", document_id="doc1")
second_prompt = builder.build_prompt(
chunk_text="Second chunk text.",
document_id="doc1",
)
self.assertIn(context_prefix, second_prompt)
self.assertIn("chunk ending.", second_prompt) | test | 1 | {"function_name": "test_second_chunk_includes_previous_context", "class_name": "ContextAwarePromptBuilderTest", "qualname": "ContextAwarePromptBuilderTest.test_second_chunk_includes_previous_context", "file_path": "tests/prompting_test.py", "repo_id": "google/langextract", "loc": 16, "tested_modules": ["absl.testing", "absl.testing", "langextract", "langextract.core", "langextract.core"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_list_documents.py:TestDocumentsList.test_invalid_params | # Context:
import pytest
class TestDocumentsList:
def test_default(self, add_documents): ...
def test_page(self, add_documents, params, expected_page_size, expected_message): ...
def test_page_size(self, add_documents, params, expected_page_size, expected_message): ...
def test_orderby(self, add_documents, params, expected_message): ...
def test_desc(self, add_documents, params, expected_message): ...
def test_keywords(self, add_documents, params, expected_num): ...
def test_name(self, add_documents, params, expected_num, expected_message): ...
def test_id(self, add_documents, document_id, expected_num, expected_message): ...
def test_name_and_id(self, add_documents, document_id, name, expected_num, expected_message): ...
def test_concurrent_list(self, add_documents): ...
# Task:
Write a Python test method `test_invalid_params` in test class `TestDocumentsList` to verify the behavior of `invalid_params`.
Module under test: concurrent.futures | def test_invalid_params(self, add_documents):
dataset, _ = add_documents
params = {"a": "b"}
with pytest.raises(TypeError) as exception_info:
dataset.list_documents(**params)
assert "got an unexpected keyword argument" in str(exception_info.value), str(exception_info.value) | test | 1 | {"function_name": "test_invalid_params", "class_name": "TestDocumentsList", "qualname": "TestDocumentsList.test_invalid_params", "file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_list_documents.py", "repo_id": "infiniflow/ragflow", "loc": 6, "tested_modules": ["concurrent.futures"], "has_docstring": false, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/dashboard/modules/aggregator/publisher/ray_event_publisher.py:RayEventPublisherInterface:class_doc | Write a class-level docstring for `RayEventPublisherInterface` (inherits from ABC) which has methods: `run_forever`, `wait_until_running`. | Abstract interface for publishing Ray event batches to external destinations. | documentation | 0 | {"doc_type": "class", "class_name": "RayEventPublisherInterface", "file_path": "python/ray/dashboard/modules/aggregator/publisher/ray_event_publisher.py", "repo_id": "ray-project/ray", "char_length": 77, "methods": ["run_forever", "wait_until_running"]} |
infiniflow/ragflow:test/unit_test/common/test_misc_utils.py:TestHashStr2Int.test_unicode_string | # Context:
from common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes
class TestGetUuid: ...
class TestDownloadImg: ...
class TestConvertBytes: ...
class TestHashStr2Int:
def test_basic_hashing(self): ...
def test_default_mod_value(self): ...
def test_custom_mod_value(self): ...
def test_same_input_same_output(self): ...
def test_different_input_different_output(self): ...
def test_empty_string(self): ...
def test_special_characters(self): ...
def test_large_string(self): ...
def test_mod_value_1(self): ...
def test_mod_value_2(self): ...
def test_very_large_mod(self): ...
def test_hash_algorithm_sha1(self): ...
def test_utf8_encoding(self): ...
def test_range_with_different_mods(self): ...
def test_hexdigest_conversion(self): ...
def test_consistent_with_direct_calculation(self): ...
def test_numeric_strings(self): ...
def test_whitespace_strings(self): ...
# Task:
Write a Python test method `test_unicode_string` in test class `TestHashStr2Int` to test hashing unicode strings.
Module under test: common.misc_utils | def test_unicode_string(self):
"""Test hashing unicode strings"""
test_strings = [
"中文",
"🚀火箭",
"café",
"🎉",
"Hello 世界"
]
for test_str in test_strings:
result = hash_str2int(test_str)
assert isinstance(result, int)
assert 0 <= result < 10 ** 8 | test | 1 | {"function_name": "test_unicode_string", "class_name": "TestHashStr2Int", "qualname": "TestHashStr2Int.test_unicode_string", "file_path": "test/unit_test/common/test_misc_utils.py", "repo_id": "infiniflow/ragflow", "loc": 14, "tested_modules": ["common.misc_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/integrations/moe.py:ExpertsInterface:class_doc | Write a class-level docstring for `ExpertsInterface` (inherits from GeneralInterface) which has methods: `get_interface`. | Interface for registering custom experts implementations. | documentation | 0 | {"doc_type": "class", "class_name": "ExpertsInterface", "file_path": "src/transformers/integrations/moe.py", "repo_id": "huggingface/transformers", "char_length": 57, "methods": ["get_interface"]} |
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py:TestBugFixTextJoinEmptyFirst:class_doc | Write a class-level docstring for `TestBugFixTextJoinEmptyFirst` which has methods: `test_process_text_allows_empty_for_text_join`. | Bug #9: Text Join should return second text when first is empty. | documentation | 1 | {"doc_type": "class", "class_name": "TestBugFixTextJoinEmptyFirst", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "repo_id": "langflow-ai/langflow", "char_length": 64, "methods": ["test_process_text_allows_empty_for_text_join"]} |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestForceAdditionalPropertiesFalse.test_adds_to_object | # Context:
from copy import deepcopy
from crewai.utilities.pydantic_schema_utils import (
build_rich_field_description,
convert_oneof_to_anyof,
create_model_from_schema,
ensure_all_properties_required,
ensure_type_in_schemas,
force_additional_properties_false,
resolve_refs,
strip_null_from_types,
strip_unsupported_formats,
)
class TestSimpleTypes: ...
class TestRequiredOptional: ...
class TestEnumLiteral: ...
class TestFormatMapping: ...
class TestNestedObjects: ...
class TestTypedArrays: ...
class TestUnionTypes: ...
class TestAllOfMerging: ...
class TestRefResolution: ...
class TestModelName: ...
class TestEnrichDescriptions: ...
class TestEdgeCases: ...
class TestBuildRichFieldDescription: ...
class TestResolveRefs: ...
class TestStripUnsupportedFormats: ...
class TestEnsureTypeInSchemas: ...
class TestConvertOneofToAnyof: ...
class TestEnsureAllPropertiesRequired: ...
class TestStripNullFromTypes: ...
class TestEndToEndMCPSchema: ...
class TestForceAdditionalPropertiesFalse:
def test_adds_empty_properties_and_required(self) -> None: ...
def test_recursive_nested_objects(self) -> None: ...
def test_does_not_affect_non_objects(self) -> None: ...
# Task:
Write a Python test method `test_adds_to_object` in test class `TestForceAdditionalPropertiesFalse` to verify the behavior of `adds_to_object`.
Module under test: __future__, copy, typing | def test_adds_to_object(self) -> None:
schema = {"type": "object", "properties": {"x": {"type": "integer"}}}
result = force_additional_properties_false(deepcopy(schema))
assert result["additionalProperties"] is False | test | 0 | {"function_name": "test_adds_to_object", "class_name": "TestForceAdditionalPropertiesFalse", "qualname": "TestForceAdditionalPropertiesFalse.test_adds_to_object", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/partners/openrouter/tests/unit_tests/test_chat_models.py:TestErrorPaths.test_max_retries_scales_elapsed_time | # Context:
from unittest.mock import AsyncMock, MagicMock, patch
from pydantic import BaseModel, Field, SecretStr
from langchain_openrouter.chat_models import (
ChatOpenRouter,
_convert_chunk_to_message_chunk,
_convert_dict_to_message,
_convert_file_block_to_openrouter,
_convert_message_to_dict,
_convert_video_block_to_openrouter,
_create_usage_metadata,
_format_message_content,
_has_file_content_blocks,
_wrap_messages_for_sdk,
)
def _make_model(**kwargs) -> ChatOpenRouter: ...
class GetWeather(BaseModel): ...
class GenerateUsername(BaseModel): ...
def _make_sdk_response(response_dict: dict[str, Any]) -> MagicMock: ...
class _MockSyncStream: ...
class _MockAsyncStream: ...
class TestChatOpenRouterInstantiation: ...
class TestSerialization: ...
class TestMockedGenerate: ...
class TestRequestPayload: ...
class TestBindTools: ...
class TestWithStructuredOutput: ...
class TestMessageConversion: ...
class TestCreateChatResult: ...
class TestStreamingChunks: ...
class TestUsageMetadata: ...
class TestReasoningDetails: ...
class TestOpenRouterSpecificParams: ...
class TestFormatMessageContent: ...
class TestWrapMessagesForSdk: ...
class TestStructuredOutputIntegration: ...
class TestMultipleChoices: ...
class TestEnvironmentConfiguration: ...
class TestStreamingErrors: ...
class TestErrorPaths:
def test_n_less_than_1_raises(self) -> None: ...
def test_n_greater_than_1_with_streaming_raises(self) -> None: ...
def test_n_forwarded_in_params(self) -> None: ...
def test_n_default_excluded_from_params(self) -> None: ...
def test_error_response_raises(self) -> None: ...
def test_error_response_without_message(self) -> None: ...
def test_empty_choices_raises(self) -> None: ...
def test_missing_role_raises(self) -> None: ...
def test_unknown_message_type_raises(self) -> None: ...
def test_duplicate_model_kwargs_raises(self) -> None: ...
def test_known_field_in_model_kwargs_raises(self) -> None: ...
def test_max_retries_zero_disables_retries(self) -> None: ...
# Task:
Write a Python test method `test_max_retries_scales_elapsed_time` in test class `TestErrorPaths` to test that max_retries value scales max_elapsed_time.
Module under test: __future__, typing, langchain_core.load | def test_max_retries_scales_elapsed_time(self) -> None:
"""Test that max_retries value scales max_elapsed_time."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
max_retries=4,
)
call_kwargs = mock_cls.call_args[1]
retry_config = call_kwargs["retry_config"]
assert retry_config.backoff.max_elapsed_time == 4 * 150_000 | test | 1 | {"function_name": "test_max_retries_scales_elapsed_time", "class_name": "TestErrorPaths", "qualname": "TestErrorPaths.test_max_retries_scales_elapsed_time", "file_path": "libs/partners/openrouter/tests/unit_tests/test_chat_models.py", "repo_id": "langchain-ai/langchain", "loc": 12, "tested_modules": ["__future__", "typing", "langchain_core.load", "langchain_core.messages", "langchain_core.runnables"], "has_docstring": true, "runnable_level": "project_runnable"} |
fastapi/fastapi:tests/test_request_params/test_query/test_required_str.py:test_required_alias_and_validation_alias_schema | # Context:
import pytest
from inline_snapshot import snapshot
async def read_required_str(p: str): ...
class QueryModelRequiredStr(BaseModel): ...
async def read_model_required_str(p: Annotated[QueryModelRequiredStr, Query()]): ...
def test_required_str_schema(path: str): ...
def test_required_str_missing(path: str): ...
def test_required_str(path: str): ...
async def read_required_alias(p: Annotated[str, Query(alias='p_alias')]): ...
class QueryModelRequiredAlias(BaseModel): ...
async def read_model_required_alias(p: Annotated[QueryModelRequiredAlias, Query()]): ...
def test_required_str_alias_schema(path: str): ...
def test_required_alias_missing(path: str): ...
def test_required_alias_by_name(path: str): ...
def test_required_alias_by_alias(path: str): ...
def read_required_validation_alias(p: Annotated[str, Query(validation_alias='p_val_alias')]): ...
class QueryModelRequiredValidationAlias(BaseModel): ...
def read_model_required_validation_alias(p: Annotated[QueryModelRequiredValidationAlias, Query()]): ...
def test_required_validation_alias_schema(path: str): ...
def test_required_validation_alias_missing(path: str): ...
def test_required_validation_alias_by_name(path: str): ...
def test_required_validation_alias_by_validation_alias(path: str): ...
def read_required_alias_and_validation_alias(p: Annotated[str, Query(alias='p_alias', validation_alias='p_val_alias')]): ...
class QueryModelRequiredAliasAndValidationAlias(BaseModel): ...
def read_model_required_alias_and_validation_alias(p: Annotated[QueryModelRequiredAliasAndValidationAlias, Query()]): ...
def test_required_alias_and_validation_alias_missing(path: str): ...
def test_required_alias_and_validation_alias_by_name(path: str): ...
def test_required_alias_and_validation_alias_by_alias(path: str): ...
def test_required_alias_and_validation_alias_by_validation_alias(path: str): ...
# Task:
Write a Python test function `test_required_alias_and_validation_alias_schema` to verify the behavior of `required_alias_and_validation_alias_schema`.
Module under test: typing, dirty_equals, fastapi | def test_required_alias_and_validation_alias_schema(path: str):
assert app.openapi()["paths"][path]["get"]["parameters"] == snapshot(
[
{
"required": True,
"schema": {"title": "P Val Alias", "type": "string"},
"name": "p_val_alias",
"in": "query",
}
]
) | test | 1 | {"function_name": "test_required_alias_and_validation_alias_schema", "class_name": null, "qualname": "test_required_alias_and_validation_alias_schema", "file_path": "tests/test_request_params/test_query/test_required_str.py", "repo_id": "fastapi/fastapi", "loc": 11, "tested_modules": ["typing", "dirty_equals", "fastapi", "fastapi.testclient", "inline_snapshot"], "has_docstring": false, "runnable_level": "project_runnable"} |
vnpy/vnpy:tests/test_alpha101.py:TestAlpha101.test_alpha6 | # Context:
import polars as pl
from vnpy.alpha.dataset.utility import calculate_by_expression
def create_test_df(n_symbols: int, n_days: int) -> pl.DataFrame: ...
def test_df() -> pl.DataFrame: ...
class TestAlpha101:
def test_alpha1(self, test_df: pl.DataFrame) -> None: ...
def test_alpha2(self, test_df: pl.DataFrame) -> None: ...
def test_alpha3(self, test_df: pl.DataFrame) -> None: ...
def test_alpha4(self, test_df: pl.DataFrame) -> None: ...
def test_alpha5(self, test_df: pl.DataFrame) -> None: ...
def test_alpha7(self, test_df: pl.DataFrame) -> None: ...
def test_alpha8(self, test_df: pl.DataFrame) -> None: ...
def test_alpha9(self, test_df: pl.DataFrame) -> None: ...
def test_alpha10(self, test_df: pl.DataFrame) -> None: ...
def test_alpha11(self, test_df: pl.DataFrame) -> None: ...
def test_alpha12(self, test_df: pl.DataFrame) -> None: ...
def test_alpha13(self, test_df: pl.DataFrame) -> None: ...
def test_alpha14(self, test_df: pl.DataFrame) -> None: ...
def test_alpha15(self, test_df: pl.DataFrame) -> None: ...
def test_alpha16(self, test_df: pl.DataFrame) -> None: ...
def test_alpha17(self, test_df: pl.DataFrame) -> None: ...
def test_alpha18(self, test_df: pl.DataFrame) -> None: ...
def test_alpha19(self, test_df: pl.DataFrame) -> None: ...
def test_alpha20(self, test_df: pl.DataFrame) -> None: ...
def test_alpha21(self, test_df: pl.DataFrame) -> None: ...
def test_alpha22(self, test_df: pl.DataFrame) -> None: ...
def test_alpha23(self, test_df: pl.DataFrame) -> None: ...
def test_alpha24(self, test_df: pl.DataFrame) -> None: ...
def test_alpha25(self, test_df: pl.DataFrame) -> None: ...
def test_alpha26(self, test_df: pl.DataFrame) -> None: ...
def test_alpha27(self, test_df: pl.DataFrame) -> None: ...
def test_alpha28(self, test_df: pl.DataFrame) -> None: ...
def test_alpha29(self, test_df: pl.DataFrame) -> None: ...
def test_alpha30(self, test_df: pl.DataFrame) -> None: ...
def test_alpha31(self, test_df: pl.DataFrame) -> None: ...
def test_alpha32(self, test_df: pl.DataFrame) -> None: ...
def test_alpha33(self, test_df: pl.DataFrame) -> None: ...
def test_alpha34(self, test_df: pl.DataFrame) -> None: ...
def test_alpha35(self, test_df: pl.DataFrame) -> None: ...
def test_alpha36(self, test_df: pl.DataFrame) -> None: ...
def test_alpha37(self, test_df: pl.DataFrame) -> None: ...
def test_alpha38(self, test_df: pl.DataFrame) -> None: ...
def test_alpha39(self, test_df: pl.DataFrame) -> None: ...
def test_alpha40(self, test_df: pl.DataFrame) -> None: ...
def test_alpha41(self, test_df: pl.DataFrame) -> None: ...
def test_alpha42(self, test_df: pl.DataFrame) -> None: ...
def test_alpha43(self, test_df: pl.DataFrame) -> None: ...
def test_alpha44(self, test_df: pl.DataFrame) -> None: ...
def test_alpha45(self, test_df: pl.DataFrame) -> None: ...
def test_alpha46(self, test_df: pl.DataFrame) -> None: ...
def test_alpha47(self, test_df: pl.DataFrame) -> None: ...
def test_alpha48(self, test_df: pl.DataFrame) -> None: ...
def test_alpha49(self, test_df: pl.DataFrame) -> None: ...
def test_alpha50(self, test_df: pl.DataFrame) -> None: ...
def test_alpha51(self, test_df: pl.DataFrame) -> None: ...
def test_alpha52(self, test_df: pl.DataFrame) -> None: ...
def test_alpha53(self, test_df: pl.DataFrame) -> None: ...
def test_alpha54(self, test_df: pl.DataFrame) -> None: ...
def test_alpha55(self, test_df: pl.DataFrame) -> None: ...
def test_alpha57(self, test_df: pl.DataFrame) -> None: ...
def test_alpha58(self, test_df: pl.DataFrame) -> None: ...
def test_alpha59(self, test_df: pl.DataFrame) -> None: ...
def test_alpha60(self, test_df: pl.DataFrame) -> None: ...
def test_alpha61(self, test_df: pl.DataFrame) -> None: ...
def test_alpha62(self, test_df: pl.DataFrame) -> None: ...
def test_alpha63(self, test_df: pl.DataFrame) -> None: ...
def test_alpha64(self, test_df: pl.DataFrame) -> None: ...
def test_alpha65(self, test_df: pl.DataFrame) -> None: ...
def test_alpha66(self, test_df: pl.DataFrame) -> None: ...
def test_alpha67(self, test_df: pl.DataFrame) -> None: ...
def test_alpha68(self, test_df: pl.DataFrame) -> None: ...
def test_alpha69(self, test_df: pl.DataFrame) -> None: ...
def test_alpha70(self, test_df: pl.DataFrame) -> None: ...
def test_alpha71(self, test_df: pl.DataFrame) -> None: ...
def test_alpha72(self, test_df: pl.DataFrame) -> None: ...
def test_alpha73(self, test_df: pl.DataFrame) -> None: ...
def test_alpha74(self, test_df: pl.DataFrame) -> None: ...
def test_alpha75(self, test_df: pl.DataFrame) -> None: ...
def test_alpha76(self, test_df: pl.DataFrame) -> None: ...
def test_alpha77(self, test_df: pl.DataFrame) -> None: ...
def test_alpha78(self, test_df: pl.DataFrame) -> None: ...
def test_alpha79(self, test_df: pl.DataFrame) -> None: ...
def test_alpha80(self, test_df: pl.DataFrame) -> None: ...
def test_alpha81(self, test_df: pl.DataFrame) -> None: ...
def test_alpha82(self, test_df: pl.DataFrame) -> None: ...
def test_alpha83(self, test_df: pl.DataFrame) -> None: ...
def test_alpha84(self, test_df: pl.DataFrame) -> None: ...
def test_alpha85(self, test_df: pl.DataFrame) -> None: ...
def test_alpha86(self, test_df: pl.DataFrame) -> None: ...
def test_alpha87(self, test_df: pl.DataFrame) -> None: ...
def test_alpha88(self, test_df: pl.DataFrame) -> None: ...
def test_alpha89(self, test_df: pl.DataFrame) -> None: ...
def test_alpha90(self, test_df: pl.DataFrame) -> None: ...
def test_alpha91(self, test_df: pl.DataFrame) -> None: ...
def test_alpha92(self, test_df: pl.DataFrame) -> None: ...
def test_alpha93(self, test_df: pl.DataFrame) -> None: ...
def test_alpha94(self, test_df: pl.DataFrame) -> None: ...
def test_alpha95(self, test_df: pl.DataFrame) -> None: ...
def test_alpha96(self, test_df: pl.DataFrame) -> None: ...
def test_alpha97(self, test_df: pl.DataFrame) -> None: ...
def test_alpha98(self, test_df: pl.DataFrame) -> None: ...
def test_alpha99(self, test_df: pl.DataFrame) -> None: ...
def test_alpha100(self, test_df: pl.DataFrame) -> None: ...
def test_alpha101(self, test_df: pl.DataFrame) -> None: ...
# Task:
Write a Python test method `test_alpha6` in test class `TestAlpha101` to test Alpha#6.
Module under test: datetime, vnpy.alpha.dataset.utility | def test_alpha6(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#6"""
expr = "(-1) * ts_corr(open, volume, 10)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns | test | 1 | {"function_name": "test_alpha6", "class_name": "TestAlpha101", "qualname": "TestAlpha101.test_alpha6", "file_path": "tests/test_alpha101.py", "repo_id": "vnpy/vnpy", "loc": 5, "tested_modules": ["datetime", "vnpy.alpha.dataset.utility"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/model_executor/layers/batch_invariant.py:rms_norm | # Context:
import torch
def _matmul_launch_metadata(grid: Callable[..., Any], kernel: Any, args: dict[str, Any]) -> dict[str, Any]: ...
def _compute_pid(tile_id, num_pid_in_group, num_pid_m, GROUP_SIZE_M, NUM_SMS): ...
def matmul_kernel_persistent(a_ptr, b_ptr, c_ptr, bias_ptr, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, NUM_SMS: tl.constexpr, A_LARGE: tl.constexpr, B_LARGE: tl.constexpr, C_LARGE: tl.constexpr, HAS_BIAS: tl.constexpr): ...
def matmul_persistent(a: torch.Tensor, b: torch.Tensor, bias: torch.Tensor | None): ...
def bmm_kernel(a_ptr, b_ptr, c_ptr, B, M, N, K, stride_ab, stride_am, stride_ak, stride_bb, stride_bk, stride_bn, stride_cb, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, A_LARGE: tl.constexpr, B_LARGE: tl.constexpr, C_LARGE: tl.constexpr): ...
def _log_softmax_kernel(input_ptr, output_ptr, input_row_stride, output_row_stride, n_cols, BLOCK_SIZE: tl.constexpr): ...
def log_softmax(input: torch.Tensor, dim: int) -> torch.Tensor: ...
def mean_kernel(input_ptr, output_ptr, input_stride0, input_stride1, input_stride2, output_stride0, output_stride1, M, N, K, BLOCK_SIZE: tl.constexpr): ...
def mean_dim(input: torch.Tensor, dim: int, keepdim: bool, dtype: torch.dtype | None) -> torch.Tensor: ...
def mm_batch_invariant(a, b): ...
def matmul_batch_invariant(a, b, out): ...
def bmm_batch_invariant(a, b, out): ...
def addmm_batch_invariant(bias, a, b): ...
def _log_softmax_batch_invariant(input, dim, _half_to_float): ...
def softmax_batch_invariant(input, dim, dtype): ...
def mean_batch_invariant(input, dim, keepdim, dtype: torch.dtype | None): ...
def _rms_norm_kernel(input_ptr, weight_ptr, output_ptr, input_row_stride, output_row_stride, n_cols, eps, BLOCK_SIZE: tl.constexpr): ...
def rms_norm_batch_invariant(input: torch.Tensor, weight: torch.Tensor, eps: float) -> torch.Tensor: ...
def linear_batch_invariant(input, weight, bias): ...
def enable_batch_invariant_mode(): ...
def _read_vllm_batch_invariant() -> bool: ...
def vllm_is_batch_invariant() -> bool: ...
def override_envs_for_invariance(attention_backend: AttentionBackendEnum | None): ...
def init_batch_invariance(attention_backend: AttentionBackendEnum | None): ...
# Task:
Write a Python function `rms_norm` to compute RMS normalization using Triton kernel.
Parameters: input: torch.Tensor, weight: torch.Tensor, eps: float
Returns: torch.Tensor | def rms_norm(
input: torch.Tensor, weight: torch.Tensor, eps: float = 1e-6
) -> torch.Tensor:
"""
Compute RMS normalization using Triton kernel.
RMS Norm normalizes the input by the root mean square and scales by weight:
output = input / sqrt(mean(input^2) + eps) * weight
Args:
input: Input tensor of shape (..., hidden_size)
weight: Weight tensor of shape (hidden_size,)
eps: Small constant for numerical stability
Returns:
Tensor with RMS normalization applied along the last dimension
"""
assert weight.dim() == 1, "Weight must be 1-dimensional"
assert input.shape[-1] == weight.shape[0], (
f"Input last dimension ({input.shape[-1]}) must match "
f"weight dimension ({weight.shape[0]})"
)
# Flatten all dimensions except the last one
original_shape = input.shape
input_2d = input.reshape(-1, input.shape[-1])
input_2d = input_2d.contiguous()
weight = weight.contiguous()
n_rows, n_cols = input_2d.shape
output = torch.empty_like(input_2d)
BLOCK_SIZE = 1024
grid = (n_rows,)
_rms_norm_kernel[grid](
input_2d,
weight,
output,
input_2d.stride(0),
output.stride(0),
n_cols,
eps,
BLOCK_SIZE=BLOCK_SIZE,
)
return output.reshape(original_shape) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 45, "code_loc": 24, "docstring_loc": 14, "function_name": "rms_norm", "class_name": null, "qualname": "rms_norm", "file_path": "vllm/model_executor/layers/batch_invariant.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"} |
docling-project/docling:tests/test_backend_latex.py:test_latex_table_environment | # Context:
from io import BytesIO
from docling.backend.latex_backend import LatexDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult, DoclingDocument, InputDocument
from docling.datamodel.document import InputDocument
def test_latex_basic_conversion(): ...
def test_latex_preamble_filter(): ...
def test_latex_table_parsing(): ...
def test_latex_math_parsing(): ...
def test_latex_escaped_chars(): ...
def test_latex_unknown_macro_fallback(): ...
def test_latex_abstract_environment(): ...
def test_latex_list_itemize(): ...
def test_latex_list_enumerate(): ...
def test_latex_description_list(): ...
def test_latex_verbatim_environment(): ...
def test_latex_lstlisting_environment(): ...
def test_latex_bibliography(): ...
def test_latex_caption(): ...
def test_latex_footnote(): ...
def test_latex_citet_macro(): ...
def test_latex_list_nested(): ...
def test_latex_label(): ...
def test_latex_includegraphics(): ...
def test_latex_includegraphics_missing_image(): ...
def test_latex_citations(): ...
def test_latex_title_macro(): ...
def test_latex_various_math_environments(): ...
def test_latex_heading_levels(): ...
def test_latex_text_formatting(): ...
def test_latex_figure_environment(): ...
def test_latex_is_valid(): ...
def test_latex_supports_pagination(): ...
def test_latex_supported_formats(): ...
def test_latex_file_path_loading(tmp_path): ...
def test_latex_empty_table(): ...
def test_latex_marginpar(): ...
def test_latex_no_document_env(): ...
def test_latex_starred_table_and_figure(): ...
def test_latex_newline_macro(): ...
def test_latex_filecontents_ignored(): ...
def test_latex_tilde_macro(): ...
def test_latex_math_environment(): ...
def test_latex_displaymath_brackets(): ...
def test_latex_citet_macro_2(): ...
def latex_paths() -> list[Path]: ...
def get_latex_converter(): ...
def test_e2e_latex_conversions(latex_paths): ...
def test_latex_document_with_leading_comments(): ...
def test_latex_custom_macro_with_backslash(): ...
def test_latex_figure_with_caption(): ...
def extract_macro_name_old(raw_string): ...
def extract_macro_name_new(raw_string): ...
def test_macro_extraction(): ...
def test_edge_cases(): ...
def test_debug_macro_extraction(): ...
# Task:
Write a Python test function `test_latex_table_environment` to test table environment (wrapper around tabular).
Module under test: io, pathlib, docling_core.types.doc | def test_latex_table_environment():
"""Test table environment (wrapper around tabular)"""
latex_content = b"""
\\documentclass{article}
\\begin{document}
\\begin{table}
\\begin{tabular}{cc}
A & B \\\\
C & D
\\end{tabular}
\\caption{Sample table}
\\end{table}
\\end{document}
"""
in_doc = InputDocument(
path_or_stream=BytesIO(latex_content),
format=InputFormat.LATEX,
backend=LatexDocumentBackend,
filename="test.tex",
)
backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content))
doc = backend.convert()
assert len(doc.tables) >= 1 | test | 1 | {"function_name": "test_latex_table_environment", "class_name": null, "qualname": "test_latex_table_environment", "file_path": "tests/test_backend_latex.py", "repo_id": "docling-project/docling", "loc": 24, "tested_modules": ["io", "pathlib", "docling_core.types.doc", "docling.backend.latex_backend", "docling.datamodel.base_models"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/jamba/modular_jamba.py:JambaSparseMoeBlock:class_doc | Write a class-level docstring for `JambaSparseMoeBlock` (inherits from nn.Module) which has methods: `__init__`, `route_tokens_to_experts`, `forward`. | This implementation is
strictly equivalent to standard MoE with full capacity (no
dropped tokens). It's faster since it formulates MoE operations
in terms of block-sparse operations to accommodate imbalanced
assignments of tokens to experts, whereas standard MoE either
(1) drop tokens at the cost of reduced performance or (2) set
capacity factor to number of experts and thus waste computation
and memory on padding. | documentation | 0 | {"doc_type": "class", "class_name": "JambaSparseMoeBlock", "file_path": "src/transformers/models/jamba/modular_jamba.py", "repo_id": "huggingface/transformers", "char_length": 418, "methods": ["__init__", "route_tokens_to_experts", "forward"]} |
huggingface/transformers:tests/models/doge/test_modeling_doge.py:DogeModelTest.test_doge_sequence_classification_model | # Context:
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from transformers import (
DogeForCausalLM,
DogeForSequenceClassification,
DogeModel,
)
class DogeModelTester: ...
class DogeIntegrationTest(unittest.TestCase): ...
class DogeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
all_generative_model_classes = (DogeForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
has_attentions = False
model_split_percents = [0.5, 0.7, 0.8]
_torch_compile_train_cls = DogeForCausalLM if is_torch_available() else None
def setUp(self): ...
def test_config(self): ...
def test_model(self): ...
def test_doge_sequence_classification_model_for_single_label(self): ...
def test_doge_sequence_classification_model_for_multi_label(self): ...
def test_save_load_fast_init_from_base(self): ...
def test_tp_plan_matches_params(self): ...
# Task:
Write a Python test method `test_doge_sequence_classification_model` in test class `DogeModelTest` to verify the behavior of `doge_sequence_classification_model`.
Module under test: transformers, transformers.testing_utils, generation.test_utils | def test_doge_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = DogeForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) | test | 0 | {"function_name": "test_doge_sequence_classification_model", "class_name": "DogeModelTest", "qualname": "DogeModelTest.test_doge_sequence_classification_model", "file_path": "tests/models/doge/test_modeling_doge.py", "repo_id": "huggingface/transformers", "loc": 11, "tested_modules": ["transformers", "transformers.testing_utils", "generation.test_utils", "test_configuration_common", "test_modeling_common"], "has_docstring": false, "runnable_level": "project_runnable"} |
geekcomputers/Python:Tic-Tac-Toe Games/tic-tac-toe3.py:module_doc | Write a module-level docstring for the Python module `tic-tac-toe3` which contains function `check_winner`, function `is_board_full`, function `minimax`, function `best_move`, function `make_move`. | Tic-Tac-Toe with AI (Minimax) using CustomTkinter.
Player = "X", AI = "O". Click a button to play.
>>> check_winner([['X','X','X'],[' ',' ',' '],[' ',' ',' ']], 'X')
True
>>> check_winner([['X','O','X'],['O','O','O'],['X',' ',' ']], 'O')
True
>>> check_winner([['X','O','X'],['O','X','O'],['O','X','O']], 'X')
False | documentation | 1 | {"doc_type": "module", "module_name": "tic-tac-toe3", "file_path": "Tic-Tac-Toe Games/tic-tac-toe3.py", "repo_id": "geekcomputers/Python", "char_length": 317} |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_app_test.py:test_websocket_ignores_debug_disconnect_in_production | # Context:
from pathlib import Path
import pytest
from starlette.testclient import TestClient
from streamlit import file_util
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.web.server.starlette.starlette_app import (
_RESERVED_ROUTE_PREFIXES,
App,
create_starlette_app,
)
from tests.testutil import patch_config_options
class _DummyStatsManager: ...
class _DummyComponentRegistry: ...
class _DummyBidiComponentRegistry: ...
class _DummyRuntime: ...
def starlette_client(tmp_path: Path) -> Iterator[tuple[TestClient, _DummyRuntime]]: ...
def test_health_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint_filters_single_family(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint_filters_multiple_families(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint_unknown_family_returns_eof(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint_protobuf(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_serves_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_download_headers(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_supports_range_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_rejects_invalid_ranges(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_supports_head_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_no_content_encoding_for_video(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_no_content_encoding_for_audio(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_no_content_encoding_for_range_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_upload_put_adds_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_upload_put_enforces_max_size(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_component_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_component_endpoint_sets_content_type(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_bidi_component_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_script_health_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_websocket_rejects_text_frames(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_upload_delete_removes_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_upload_rejects_without_xsrf_token(tmp_path: Path) -> None: ...
def test_upload_accepts_with_valid_xsrf_token(tmp_path: Path) -> None: ...
def test_host_config_excludes_localhost_when_not_dev(tmp_path: Path) -> None: ...
def test_host_config_includes_localhost_in_dev(tmp_path: Path) -> None: ...
def test_host_config_custom_allowed_origins(tmp_path: Path) -> None: ...
def test_host_config_empty_allowed_origins(tmp_path: Path) -> None: ...
def test_host_config_custom_origins_with_dev_mode(tmp_path: Path) -> None: ...
def test_static_files_skipped_in_dev_mode(tmp_path: Path) -> None: ...
def test_websocket_auth_cookie_yields_user_info(tmp_path: Path) -> None: ...
def test_websocket_accepts_existing_session(tmp_path: Path) -> None: ...
def test_static_files_fall_back_to_index(tmp_path: Path) -> None: ...
def test_static_files_apply_cache_headers(tmp_path: Path) -> None: ...
def test_websocket_rejects_auth_cookie_without_valid_xsrf(tmp_path: Path) -> None: ...
def test_websocket_ignores_debug_shutdown_in_production(tmp_path: Path) -> None: ...
def test_websocket_allows_debug_shutdown_in_dev_mode(tmp_path: Path) -> None: ...
class TestAppInit: ...
class TestAppRouteValidation: ...
class TestAppLifespan: ...
class TestAppServerModeTracking: ...
class TestAppScriptPathResolution: ...
class TestAppExports: ...
def simple_script(tmp_path: Path) -> Path: ...
def reset_runtime() -> Iterator[None]: ...
class TestAppAsgi: ...
# Task:
Write a Python test function `test_websocket_ignores_debug_disconnect_in_production` to test that debug_disconnect_websocket is ignored in production mode.
Module under test: __future__, contextlib, http | def test_websocket_ignores_debug_disconnect_in_production(tmp_path: Path) -> None:
"""Test that debug_disconnect_websocket is ignored in production mode."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
with client.websocket_connect("/_stcore/stream") as websocket:
# Send a debug_disconnect_websocket message
back_msg = BackMsg()
back_msg.debug_disconnect_websocket = True
websocket.send_bytes(back_msg.SerializeToString())
# Send a valid rerun message to verify connection is still alive
back_msg2 = BackMsg()
back_msg2.rerun_script.query_string = ""
websocket.send_bytes(back_msg2.SerializeToString())
# Close gracefully
websocket.close(code=1000)
# The runtime should have received the rerun message (connection wasn't closed)
assert runtime.last_backmsg is not None
_session_id, msg = runtime.last_backmsg
assert msg.WhichOneof("type") == "rerun_script"
monkeypatch.undo() | test | 1 | {"function_name": "test_websocket_ignores_debug_disconnect_in_production", "class_name": null, "qualname": "test_websocket_ignores_debug_disconnect_in_production", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_app_test.py", "repo_id": "streamlit/streamlit", "loc": 36, "tested_modules": ["__future__", "contextlib", "http", "pathlib", "typing"], "has_docstring": true, "runnable_level": "project_runnable"} |
commaai/openpilot:system/ui/lib/egl.py:init_egl | # Context:
import cffi
from openpilot.common.swaglog import cloudlog
class EGLImage: ...
class EGLState: ...
def create_egl_image(width: int, height: int, stride: int, fd: int, uv_offset: int) -> EGLImage | None: ...
def destroy_egl_image(egl_image: EGLImage) -> None: ...
def bind_egl_image_to_texture(texture_id: int, egl_image: EGLImage) -> None: ...
# Task:
Write a Python function `init_egl` to initialize EGL and load necessary functions.
Returns: bool | def init_egl() -> bool:
"""Initialize EGL and load necessary functions"""
global _egl
# Don't re-initialize if already done
if _egl.initialized:
return True
try:
_egl.ffi = cffi.FFI()
_egl.ffi.cdef("""
typedef int EGLint;
typedef unsigned int EGLBoolean;
typedef unsigned int EGLenum;
typedef unsigned int GLenum;
typedef void *EGLContext;
typedef void *EGLDisplay;
typedef void *EGLClientBuffer;
typedef void *EGLImageKHR;
typedef void *GLeglImageOES;
EGLDisplay eglGetCurrentDisplay(void);
EGLint eglGetError(void);
EGLImageKHR eglCreateImageKHR(EGLDisplay dpy, EGLContext ctx,
EGLenum target, EGLClientBuffer buffer,
const EGLint *attrib_list);
EGLBoolean eglDestroyImageKHR(EGLDisplay dpy, EGLImageKHR image);
void glEGLImageTargetTexture2DOES(GLenum target, GLeglImageOES image);
void glBindTexture(GLenum target, unsigned int texture);
void glActiveTexture(GLenum texture);
""")
# Load libraries
_egl.egl_lib = _egl.ffi.dlopen("libEGL.so")
_egl.gles_lib = _egl.ffi.dlopen("libGLESv2.so")
# Cast NULL pointers
_egl.NO_CONTEXT = _egl.ffi.cast("void *", 0)
_egl.NO_DISPLAY = _egl.ffi.cast("void *", 0)
_egl.NO_IMAGE_KHR = _egl.ffi.cast("void *", 0)
# Bind functions
_egl.get_current_display = _egl.egl_lib.eglGetCurrentDisplay
_egl.create_image_khr = _egl.egl_lib.eglCreateImageKHR
_egl.destroy_image_khr = _egl.egl_lib.eglDestroyImageKHR
_egl.image_target_texture = _egl.gles_lib.glEGLImageTargetTexture2DOES
_egl.get_error = _egl.egl_lib.eglGetError
_egl.bind_texture = _egl.gles_lib.glBindTexture
_egl.active_texture = _egl.gles_lib.glActiveTexture
# Initialize EGL display once here
_egl.display = _egl.get_current_display()
if _egl.display == _egl.NO_DISPLAY:
raise RuntimeError("Failed to get EGL display")
_egl.initialized = True
return True
except Exception as e:
cloudlog.exception(f"EGL initialization failed: {e}")
_egl.initialized = False
return False | function_simple | 0 | {"cognitive_complexity": 4, "loc": 61, "code_loc": 46, "docstring_loc": 1, "function_name": "init_egl", "class_name": null, "qualname": "init_egl", "file_path": "system/ui/lib/egl.py", "repo_id": "commaai/openpilot", "has_docstring": true, "runnable_level": "project_runnable"} |
mem0ai/mem0:mem0/vector_stores/cassandra.py:CassandraDB.list | # Context:
import json
from typing import Any, Dict, List, Optional
class OutputData(BaseModel): ...
class CassandraDB(VectorStoreBase):
def __init__(
self,
contact_points: List[str],
port: int = 9042,
username: Optional[str] = None,
password: Optional[str] = None,
keyspace: str = "mem0",
collection_name: str = "memories",
embedding_model_dims: int = 1536,
secure_connect_bundle: Optional[str] = None,
protocol_version: int = 4,
load_balancing_policy: Optional[Any] = None,
):
"""
Initialize the Apache Cassandra vector store.
Args:
contact_points (List[str]): List of contact point addresses (e.g., ['127.0.0.1'])
port (int): Cassandra port (default: 9042)
username (str, optional): Database username
password (str, optional): Database password
keyspace (str): Keyspace name (default: "mem0")
collection_name (str): Table name (default: "memories")
embedding_model_dims (int): Dimension of the embedding vector (default: 1536)
secure_connect_bundle (str, optional): Path to secure connect bundle for Astra DB
protocol_version (int): CQL protocol version (default: 4)
load_balancing_policy (Any, optional): Custom load balancing policy
"""
self.contact_points = contact_points
self.port = port
self.username = username
self.password = password
self.keyspace = keyspace
self.collection_name = collection_name
self.embedding_model_dims = embedding_model_dims
self.secure_connect_bundle = secure_connect_bundle
self.protocol_version = protocol_version
self.load_balancing_policy = load_balancing_policy
# Initialize connection
self.cluster = None
self.session = None
self._setup_connection()
# Create keyspace and table if they don't exist
self._create_keyspace()
self._create_table()
def _setup_connection(self): ...
def _create_keyspace(self): ...
def _create_table(self): ...
def create_col(self, name: str, vector_size: int, distance: str): ...
def insert(self, vectors: List[List[float]], payloads: Optional[List[Dict]], ids: Optional[List[str]]): ...
def search(self, query: str, vectors: List[float], limit: int, filters: Optional[Dict]) -> List[OutputData]: ...
def delete(self, vector_id: str): ...
def update(self, vector_id: str, vector: Optional[List[float]], payload: Optional[Dict]): ...
def get(self, vector_id: str) -> Optional[OutputData]: ...
def list_cols(self) -> List[str]: ...
def delete_col(self): ...
def col_info(self) -> Dict[str, Any]: ...
def reset(self): ...
def __del__(self): ...
# Task:
Write a Python method `list` for the class `CassandraDB` to list all vectors in the collection.
Parameters: filters: Optional[Dict], limit: int
Returns: List[List[OutputData]] | def list(
self,
filters: Optional[Dict] = None,
limit: int = 100
) -> List[List[OutputData]]:
"""
List all vectors in the collection.
Args:
filters (Dict, optional): Filters to apply
limit (int): Number of vectors to return
Returns:
List[List[OutputData]]: List of vectors
"""
try:
query = f"""
SELECT id, vector, payload
FROM {self.keyspace}.{self.collection_name}
LIMIT {limit}
"""
rows = self.session.execute(query)
results = []
for row in rows:
# Apply filters if provided
if filters:
try:
payload = json.loads(row.payload) if row.payload else {}
match = all(payload.get(k) == v for k, v in filters.items())
if not match:
continue
except json.JSONDecodeError:
continue
results.append(
OutputData(
id=row.id,
score=None,
payload=json.loads(row.payload) if row.payload else {}
)
)
return [results]
except Exception as e:
logger.error(f"Failed to list vectors: {e}")
return [[]] | function_complex | 1 | {"cognitive_complexity": 17, "loc": 47, "code_loc": 28, "docstring_loc": 10, "function_name": "list", "class_name": "CassandraDB", "qualname": "CassandraDB.list", "file_path": "mem0/vector_stores/cassandra.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "runnable_level": "file_runnable"} |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent.py:AgentExecutor.input_keys | # Context:
class BaseSingleActionAgent(BaseModel): ...
class BaseMultiActionAgent(BaseModel): ...
class AgentOutputParser(BaseOutputParser[AgentAction | AgentFinish]): ...
class MultiActionAgentOutputParser(BaseOutputParser[list[AgentAction] | AgentFinish]): ...
class RunnableAgent(BaseSingleActionAgent): ...
class RunnableMultiActionAgent(BaseMultiActionAgent): ...
class LLMSingleActionAgent(BaseSingleActionAgent): ...
class Agent(BaseSingleActionAgent): ...
class ExceptionTool(BaseTool): ...
class AgentExecutor(Chain):
def from_agent_and_tools(cls, agent: BaseSingleActionAgent | BaseMultiActionAgent | Runnable, tools: Sequence[BaseTool], callbacks: Callbacks, **kwargs) -> AgentExecutor: ...
def validate_tools(self) -> Self: ...
def validate_runnable_agent(cls, values: dict) -> Any: ...
def _action_agent(self) -> BaseSingleActionAgent | BaseMultiActionAgent: ...
def save(self, file_path: Path | str) -> None: ...
def save_agent(self, file_path: Path | str) -> None: ...
def iter(self, inputs: Any, callbacks: Callbacks, include_run_info: bool, async_: bool) -> AgentExecutorIterator: ...
def output_keys(self) -> list[str]: ...
def lookup_tool(self, name: str) -> BaseTool: ...
def _should_continue(self, iterations: int, time_elapsed: float) -> bool: ...
def _return(self, output: AgentFinish, intermediate_steps: list, run_manager: CallbackManagerForChainRun | None) -> dict[str, Any]: ...
async def _areturn(self, output: AgentFinish, intermediate_steps: list, run_manager: AsyncCallbackManagerForChainRun | None) -> dict[str, Any]: ...
def _consume_next_step(self, values: NextStepOutput) -> AgentFinish | list[tuple[AgentAction, str]]: ...
def _take_next_step(self, name_to_tool_map: dict[str, BaseTool], color_mapping: dict[str, str], inputs: dict[str, str], intermediate_steps: list[tuple[AgentAction, str]], run_manager: CallbackManagerForChainRun | None) -> AgentFinish | list[tuple[AgentAction, str]]: ...
def _iter_next_step(self, name_to_tool_map: dict[str, BaseTool], color_mapping: dict[str, str], inputs: dict[str, str], intermediate_steps: list[tuple[AgentAction, str]], run_manager: CallbackManagerForChainRun | None) -> Iterator[AgentFinish | AgentAction | AgentStep]: ...
def _perform_agent_action(self, name_to_tool_map: dict[str, BaseTool], color_mapping: dict[str, str], agent_action: AgentAction, run_manager: CallbackManagerForChainRun | None) -> AgentStep: ...
async def _atake_next_step(self, name_to_tool_map: dict[str, BaseTool], color_mapping: dict[str, str], inputs: dict[str, str], intermediate_steps: list[tuple[AgentAction, str]], run_manager: AsyncCallbackManagerForChainRun | None) -> AgentFinish | list[tuple[AgentAction, str]]: ...
async def _aiter_next_step(self, name_to_tool_map: dict[str, BaseTool], color_mapping: dict[str, str], inputs: dict[str, str], intermediate_steps: list[tuple[AgentAction, str]], run_manager: AsyncCallbackManagerForChainRun | None) -> AsyncIterator[AgentFinish | AgentAction | AgentStep]: ...
async def _aperform_agent_action(self, name_to_tool_map: dict[str, BaseTool], color_mapping: dict[str, str], agent_action: AgentAction, run_manager: AsyncCallbackManagerForChainRun | None) -> AgentStep: ...
def _call(self, inputs: dict[str, str], run_manager: CallbackManagerForChainRun | None) -> dict[str, Any]: ...
async def _acall(self, inputs: dict[str, str], run_manager: AsyncCallbackManagerForChainRun | None) -> dict[str, str]: ...
def _get_tool_return(self, next_step_output: tuple[AgentAction, str]) -> AgentFinish | None: ...
def _prepare_intermediate_steps(self, intermediate_steps: list[tuple[AgentAction, str]]) -> list[tuple[AgentAction, str]]: ...
def stream(self, input: dict[str, Any] | Any, config: RunnableConfig | None, **kwargs) -> Iterator[AddableDict]: ...
async def astream(self, input: dict[str, Any] | Any, config: RunnableConfig | None, **kwargs) -> AsyncIterator[AddableDict]: ...
# Task:
Write a Python method `input_keys` for the class `AgentExecutor` to return the input keys.
Returns: list[str] | def input_keys(self) -> list[str]:
"""Return the input keys."""
return self._action_agent.input_keys | function_simple | 1 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "input_keys", "class_name": "AgentExecutor", "qualname": "AgentExecutor.input_keys", "file_path": "libs/langchain/langchain_classic/agents/agent.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "class_runnable"} |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/tests/test_alibabacloud_mysql.py:test_initialize_success | # Context:
from unittest.mock import Mock, patch
from llama_index.vector_stores.alibabacloud_mysql import AlibabaCloudMySQLVectorStore
def test_class_name() -> None: ...
def test_client_property() -> None: ...
def test_create_engine() -> None: ...
def test_get_connection_context_manager() -> None: ...
def test_check_vector_support_success() -> None: ...
def test_check_vector_support_no_vector_functions() -> None: ...
def test_check_vector_support_old_release_date() -> None: ...
def test_check_vector_support_no_release_date_variable() -> None: ...
def test_check_vector_support_function_error() -> None: ...
def test_initialize_invalid_distance_method() -> None: ...
def test_initialize_without_setup() -> None: ...
def test_node_to_table_row() -> None: ...
def test_to_mysql_operator() -> None: ...
def test_build_filter_clause() -> None: ...
def test_filters_to_where_clause() -> None: ...
def test_db_rows_to_query_result() -> None: ...
def test_query_unsupported_mode() -> None: ...
def test_close() -> None: ...
def test_from_params() -> None: ...
def test_get_nodes() -> None: ...
def test_add() -> None: ...
def test_query() -> None: ...
def test_query_with_filters() -> None: ...
def test_delete() -> None: ...
def test_delete_nodes() -> None: ...
def test_count() -> None: ...
def test_drop() -> None: ...
def test_clear() -> None: ...
def test_create_table_if_not_exists() -> None: ...
def test_validate_identifier() -> None: ...
def test_validate_positive_int() -> None: ...
def test_validate_table_name() -> None: ...
def test_init_validation_table_name() -> None: ...
def test_init_validation_embed_dim() -> None: ...
def test_init_validation_default_m() -> None: ...
# Task:
Write a Python test function `test_initialize_success` to test _initialize method success case.
Module under test: llama_index.core.schema, llama_index.core.vector_stores.types, llama_index.vector_stores.alibabacloud_mysql | def test_initialize_success() -> None:
"""Test _initialize method success case."""
with patch.object(
AlibabaCloudMySQLVectorStore, "_check_vector_support"
) as mock_check:
with patch.object(
AlibabaCloudMySQLVectorStore, "_create_table_if_not_exists"
) as mock_create_table:
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=True,
)
# Verify methods were called
mock_check.assert_called_once()
mock_create_table.assert_called_once()
assert store._is_initialized is True | test | 1 | {"function_name": "test_initialize_success", "class_name": null, "qualname": "test_initialize_success", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/tests/test_alibabacloud_mysql.py", "repo_id": "run-llama/llama_index", "loc": 23, "tested_modules": ["llama_index.core.schema", "llama_index.core.vector_stores.types", "llama_index.vector_stores.alibabacloud_mysql", "llama_index.vector_stores.alibabacloud_mysql.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/langchain/langchain_classic/chains/natbot/base.py:NatBotChain.from_default | # Context:
from typing import Any
class NatBotChain(Chain):
model_config = ConfigDict(
def _raise_deprecation(cls, values: dict) -> Any: ...
def from_llm(cls, llm: BaseLanguageModel, objective: str, **kwargs) -> NatBotChain: ...
def input_keys(self) -> list[str]: ...
def output_keys(self) -> list[str]: ...
def _call(self, inputs: dict[str, str], run_manager: CallbackManagerForChainRun | None) -> dict[str, str]: ...
def execute(self, url: str, browser_content: str) -> str: ...
def _chain_type(self) -> str: ...
# Task:
Write a Python method `from_default` for the class `NatBotChain` to load with default LLMChain.
Parameters: objective: str
Returns: NatBotChain | def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain:
"""Load with default LLMChain."""
msg = (
"This method is no longer implemented. Please use from_llm."
"model = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)"
"For example, NatBotChain.from_llm(model, objective)"
)
raise NotImplementedError(msg) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 8, "code_loc": 6, "docstring_loc": 1, "function_name": "from_default", "class_name": "NatBotChain", "qualname": "NatBotChain.from_default", "file_path": "libs/langchain/langchain_classic/chains/natbot/base.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "file_runnable"} |
apache/airflow:providers/teradata/tests/unit/teradata/utils/test_bteq_util.py:TestBteqUtils.test_prepare_bteq_script_for_remote_execution | # Context:
from airflow.providers.teradata.utils.bteq_util import (
identify_os,
is_valid_encoding,
is_valid_file,
is_valid_remote_bteq_script_file,
prepare_bteq_script_for_local_execution,
prepare_bteq_script_for_remote_execution,
read_file,
transfer_file_sftp,
verify_bteq_installed,
verify_bteq_installed_remote,
)
class TestBteqUtils:
def test_identify_os_linux(self): ...
def test_identify_os_windows(self): ...
def test_identify_os_macos(self): ...
def test_identify_os_empty_response(self): ...
def test_verify_bteq_installed_success(self, mock_which): ...
def test_verify_bteq_installed_fail(self, mock_which): ...
def test_prepare_bteq_script_for_local_execution(self): ...
def test_verify_bteq_installed_remote_linux(self, mock_os): ...
def test_verify_bteq_installed_remote_windows(self, mock_os): ...
def test_verify_bteq_installed_remote_macos(self, mock_os): ...
def test_verify_bteq_installed_remote_macos_which_called_when_no_zsh(self, mock_os): ...
def test_verify_bteq_installed_remote_macos_which_fails_no_zsh(self, mock_os): ...
def test_verify_bteq_installed_remote_fail(self, mock_os): ...
def test_verify_bteq_installed_remote_success(self, mock_exec): ...
def test_transfer_file_sftp(self, mock_open_sftp): ...
def test_is_valid_file(self): ...
def test_is_valid_encoding(self): ...
def test_read_file_success(self): ...
def test_read_file_file_not_found(self): ...
def test_is_valid_remote_bteq_script_file_exists(self, mock_open_sftp): ...
def test_is_valid_remote_bteq_script_file_not_exists(self, mock_open_sftp): ...
def test_is_valid_remote_bteq_script_file_none_path(self): ...
# Task:
Write a Python test method `test_prepare_bteq_script_for_remote_execution` in test class `TestBteqUtils` to verify the behavior of `prepare_bteq_script_for_remote_execution`.
Module under test: __future__, airflow.providers.common.compat.sdk, airflow.providers.teradata.utils.bteq_util | def test_prepare_bteq_script_for_remote_execution(self):
conn = {"host": "myhost", "login": "user", "password": "pass"}
sql = "SELECT * FROM DUAL;"
script = prepare_bteq_script_for_remote_execution(conn, sql)
assert ".LOGON myhost/user,pass" in script
assert "SELECT * FROM DUAL;" in script
assert ".EXIT" in script | test | 1 | {"function_name": "test_prepare_bteq_script_for_remote_execution", "class_name": "TestBteqUtils", "qualname": "TestBteqUtils.test_prepare_bteq_script_for_remote_execution", "file_path": "providers/teradata/tests/unit/teradata/utils/test_bteq_util.py", "repo_id": "apache/airflow", "loc": 7, "tested_modules": ["__future__", "airflow.providers.common.compat.sdk", "airflow.providers.teradata.utils.bteq_util"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/glm4v/image_processing_glm4v.py:Glm4vImageProcessor.preprocess | # Context:
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...utils import TensorType, logging
class Glm4vImageProcessorKwargs(ImagesKwargs): ...
def smart_resize(num_frames: int, height: int, width: int, temporal_factor: int, factor: int, min_pixels: int, max_pixels: int): ...
class Glm4vImageProcessor(BaseImageProcessor):
model_input_names = ["pixel_values", "image_grid_thw"]
valid_kwargs = Glm4vImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: dict[str, int] | None = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_convert_rgb: bool = True,
patch_size: int = 14,
temporal_patch_size: int = 2,
merge_size: int = 2,
**kwargs,
) -> None:
super().__init__(**kwargs)
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
elif size is None:
size = {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000}
self.size = size
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.merge_size = merge_size
self.do_convert_rgb = do_convert_rgb
def _preprocess(self, images: ImageInput | VideoInput, do_resize: bool | None, size: dict[str, int] | None, resample: PILImageResampling | None, do_rescale: bool | None, rescale_factor: float | None, do_normalize: bool | None, image_mean: float | list[float] | None, image_std: float | list[float] | None, patch_size: int | None, temporal_patch_size: int | None, merge_size: int | None, do_convert_rgb: bool | None, data_format: ChannelDimension | None, input_data_format: str | ChannelDimension | None): ...
def get_number_of_image_patches(self, height: int, width: int, images_kwargs): ...
# Task:
Write a Python method `preprocess` for the class `Glm4vImageProcessor` to args:.
Parameters: images: ImageInput, do_resize: bool | None, size: dict[str, int] | None, resample: PILImageResampling | None, do_rescale: bool | None, rescale_factor: float | None, do_normalize: bool | None, image_mean: float | list[float] | None, image_std: float | list[float] | None, patch_size: int | None, temporal_patch_size: int | None, merge_size: int | None, do_convert_rgb: bool | None, return_tensors: str | TensorType | None, data_format: ChannelDimension | None, input_data_format: str | ChannelDimension | None | def preprocess(
self,
images: ImageInput,
do_resize: bool | None = None,
size: dict[str, int] | None = None,
resample: PILImageResampling | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
do_convert_rgb: bool | None = None,
return_tensors: str | TensorType | None = None,
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
size = size if size is not None else self.size
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
patch_size = patch_size if patch_size is not None else self.patch_size
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
merge_size = merge_size if merge_size is not None else self.merge_size
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
if images is not None:
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if images is not None and not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
data = {}
if images is not None:
pixel_values, vision_grid_thws = [], []
for image in images:
patches, image_grid_thw = self._preprocess(
image,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
merge_size=merge_size,
data_format=data_format,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
)
pixel_values.extend(patches)
vision_grid_thws.append(image_grid_thw)
pixel_values = np.array(pixel_values)
vision_grid_thws = np.array(vision_grid_thws)
data.update({"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws})
return BatchFeature(data=data, tensor_type=return_tensors) | function_complex | 0 | {"cognitive_complexity": 21, "loc": 130, "code_loc": 55, "docstring_loc": 50, "function_name": "preprocess", "class_name": "Glm4vImageProcessor", "qualname": "Glm4vImageProcessor.preprocess", "file_path": "src/transformers/models/glm4v/image_processing_glm4v.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/serve/task_processor.py:CeleryTaskProcessorAdapter._handle_unknown_task | # Context:
from typing import Any, Dict, List, Optional
class CeleryTaskProcessorAdapter(TaskProcessorAdapter):
def __init__(self, config: TaskProcessorConfig, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(config.adapter_config, CeleryAdapterConfig):
raise TypeError(
"TaskProcessorConfig.adapter_config must be an instance of CeleryAdapterConfig"
)
# Check if any app_custom_config keys conflict with default Celery app config
if config.adapter_config.app_custom_config:
conflicting_keys = set(
config.adapter_config.app_custom_config.keys()
) & set(CELERY_DEFAULT_APP_CONFIG)
if conflicting_keys:
raise ValueError(
f"The following configuration keys cannot be changed via app_custom_config: {sorted(conflicting_keys)}. "
f"These are managed internally by the CeleryTaskProcessorAdapter."
)
self._config = config
def initialize(self, consumer_concurrency: int): ...
def register_task_handle(self, func, name): ...
def enqueue_task_sync(self, task_name, args, kwargs, **options) -> TaskResult: ...
def get_task_status_sync(self, task_id) -> TaskResult: ...
def start_consumer(self, **kwargs): ...
def stop_consumer(self, timeout: float): ...
def cancel_task_sync(self, task_id): ...
def get_metrics_sync(self) -> Dict[str, Any]: ...
def health_check_sync(self) -> List[Dict]: ...
def _handle_task_failure(self, sender: Any, task_id: str, args: Any, kwargs: Any, einfo: Any, **kw): ...
def _move_task_to_queue(self, queue_name: str, task_name: str, args: list): ...
# Task:
Write a Python method `_handle_unknown_task` for the class `CeleryTaskProcessorAdapter` to handle unknown or unregistered tasks received by Celery.
Parameters: sender: Any, name: str, id: str, message: Any, exc: Any | def _handle_unknown_task(
self,
sender: Any = None,
name: str = None,
id: str = None,
message: Any = None,
exc: Any = None,
**kwargs,
):
"""Handle unknown or unregistered tasks received by Celery.
This method is called when Celery receives a task that it doesn't recognize
(i.e., a task that hasn't been registered with the Celery app). These tasks
are moved to the unprocessable task queue if configured.
Args:
sender: The Celery app or worker that detected the unknown task
name: Name of the unknown task
id: Task ID of the unknown task
message: The raw message received for the unknown task
exc: The exception raised when trying to process the unknown task
**kwargs: Additional context information from Celery
"""
logger.info(
f"Unknown task detected by Celery. Name: {name}, ID: {id}, Exc: {str(exc)}"
)
if self._config.unprocessable_task_queue_name:
self._move_task_to_queue(
self._config.unprocessable_task_queue_name,
name,
[
name,
id,
str(message),
str(exc),
str(kwargs),
],
) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 39, "code_loc": 15, "docstring_loc": 14, "function_name": "_handle_unknown_task", "class_name": "CeleryTaskProcessorAdapter", "qualname": "CeleryTaskProcessorAdapter._handle_unknown_task", "file_path": "python/ray/serve/task_processor.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
crewAIInc/crewAI:lib/crewai-files/tests/processing/test_validators.py:TestValidateImage.test_validate_valid_image | # Context:
from crewai_files import AudioFile, FileBytes, ImageFile, PDFFile, TextFile, VideoFile
from crewai_files.processing.constraints import (
ANTHROPIC_CONSTRAINTS,
AudioConstraints,
ImageConstraints,
PDFConstraints,
ProviderConstraints,
VideoConstraints,
)
from crewai_files.processing.validators import (
_get_audio_duration,
_get_video_duration,
validate_audio,
validate_file,
validate_image,
validate_pdf,
validate_text,
validate_video,
)
class TestValidatePDF: ...
class TestValidateText: ...
class TestValidateFile: ...
class TestValidateAudio: ...
class TestValidateVideo: ...
class TestGetAudioDuration: ...
class TestGetVideoDuration: ...
class TestRealVideoFile: ...
class TestValidateImage:
def test_validate_image_too_large(self): ...
def test_validate_image_unsupported_format(self): ...
def test_validate_image_no_raise(self): ...
# Task:
Write a Python test method `test_validate_valid_image` in test class `TestValidateImage` to test validating a valid image within constraints.
Module under test: crewai_files, crewai_files.processing.constraints, crewai_files.processing.exceptions | def test_validate_valid_image(self):
"""Test validating a valid image within constraints."""
constraints = ImageConstraints(
max_size_bytes=10 * 1024 * 1024,
supported_formats=("image/png",),
)
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
errors = validate_image(file, constraints, raise_on_error=False)
assert len(errors) == 0 | test | 0 | {"function_name": "test_validate_valid_image", "class_name": "TestValidateImage", "qualname": "TestValidateImage.test_validate_valid_image", "file_path": "lib/crewai-files/tests/processing/test_validators.py", "repo_id": "crewAIInc/crewAI", "loc": 11, "tested_modules": ["crewai_files", "crewai_files.processing.constraints", "crewai_files.processing.exceptions", "crewai_files.processing.validators", "pathlib"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:task-sdk/src/airflow/sdk/configuration.py:get_custom_secret_backend | # Context:
from airflow.sdk.configuration import conf
def _default_config_file_path(file_name: str) -> str: ...
def retrieve_configuration_description() -> dict[str, dict[str, Any]]: ...
def create_default_config_parser(configuration_description: dict[str, dict[str, Any]]) -> ConfigParser: ...
def get_sdk_expansion_variables() -> dict[str, Any]: ...
def get_airflow_config() -> str: ...
class AirflowSDKConfigParser(_SharedAirflowConfigParser): ...
def initialize_secrets_backends(default_backends: list[str]): ...
def ensure_secrets_loaded(default_backends: list[str]) -> list: ...
def initialize_config() -> AirflowSDKConfigParser: ...
def __getattr__(name: str): ...
# Task:
Write a Python function `get_custom_secret_backend` to get Secret Backend if defined in airflow.cfg.
Parameters: worker_mode: bool | def get_custom_secret_backend(worker_mode: bool = False):
"""
Get Secret Backend if defined in airflow.cfg.
Conditionally selects the section, key and kwargs key based on whether it is called from worker or not.
This is a convenience function that calls conf._get_custom_secret_backend().
Uses SDK's conf instead of Core's conf.
"""
# Lazy import to trigger __getattr__ and lazy initialization
from airflow.sdk.configuration import conf
return conf._get_custom_secret_backend(worker_mode=worker_mode) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 13, "code_loc": 2, "docstring_loc": 8, "function_name": "get_custom_secret_backend", "class_name": null, "qualname": "get_custom_secret_backend", "file_path": "task-sdk/src/airflow/sdk/configuration.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/browser/session.py:BrowserSession.is_local | # Context:
class Target(BaseModel): ...
class CDPSession(BaseModel): ...
class BrowserSession(BaseModel):
model_config = ConfigDict(
def __init__(
self,
*,
# Cloud browser params - use these for cloud mode
cloud_profile_id: UUID | str | None = None,
cloud_proxy_country_code: ProxyCountryCode | None = None,
cloud_timeout: int | None = None,
# Backward compatibility aliases
profile_id: UUID | str | None = None,
proxy_country_code: ProxyCountryCode | None = None,
timeout: int | None = None,
use_cloud: bool | None = None,
cloud_browser: bool | None = None, # Backward compatibility alias
cloud_browser_params: CloudBrowserParams | None = None,
# Common params that work with cloud
id: str | None = None,
headers: dict[str, str] | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
captcha_solver: bool | None = None,
auto_download_pdfs: bool | None = None,
cookie_whitelist_domains: list[str] | None = None,
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
) -> None: ...
def __init__(
self,
*,
# Core configuration for local
id: str | None = None,
cdp_url: str | None = None,
browser_profile: BrowserProfile | None = None,
# Local browser launch params
executable_path: str | Path | None = None,
headless: bool | None = None,
user_data_dir: str | Path | None = None,
args: list[str] | None = None,
downloads_path: str | Path | None = None,
# Common params
headers: dict[str, str] | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
auto_download_pdfs: bool | None = None,
cookie_whitelist_domains: list[str] | None = None,
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
# All other local params
env: dict[str, str | float | bool] | None = None,
ignore_default_args: list[str] | Literal[True] | None = None,
channel: str | None = None,
chromium_sandbox: bool | None = None,
devtools: bool | None = None,
traces_dir: str | Path | None = None,
accept_downloads: bool | None = None,
permissions: list[str] | None = None,
user_agent: str | None = None,
screen: dict | None = None,
viewport: dict | None = None,
no_viewport: bool | None = None,
device_scale_factor: float | None = None,
record_har_content: str | None = None,
record_har_mode: str | None = None,
record_har_path: str | Path | None = None,
record_video_dir: str | Path | None = None,
record_video_framerate: int | None = None,
record_video_size: dict | None = None,
storage_state: str | Path | dict[str, Any] | None = None,
disable_security: bool | None = None,
deterministic_rendering: bool | None = None,
proxy: ProxySettings | None = None,
enable_default_extensions: bool | None = None,
captcha_solver: bool | None = None,
window_size: dict | None = None,
window_position: dict | None = None,
filter_highlight_ids: bool | None = None,
profile_directory: str | None = None,
) -> None: ...
def __init__(
self,
# Core configuration
id: str | None = None,
cdp_url: str | None = None,
is_local: bool = False,
browser_profile: BrowserProfile | None = None,
# Cloud browser params (don't mix with local browser params)
cloud_profile_id: UUID | str | None = None,
cloud_proxy_country_code: ProxyCountryCode | None = None,
cloud_timeout: int | None = None,
# Backward compatibility aliases for cloud params
profile_id: UUID | str | None = None,
proxy_country_code: ProxyCountryCode | None = None,
timeout: int | None = None,
# BrowserProfile fields that can be passed directly
# From BrowserConnectArgs
headers: dict[str, str] | None = None,
# From BrowserLaunchArgs
env: dict[str, str | float | bool] | None = None,
executable_path: str | Path | None = None,
headless: bool | None = None,
args: list[str] | None = None,
ignore_default_args: list[str] | Literal[True] | None = None,
channel: str | None = None,
chromium_sandbox: bool | None = None,
devtools: bool | None = None,
downloads_path: str | Path | None = None,
traces_dir: str | Path | None = None,
# From BrowserContextArgs
accept_downloads: bool | None = None,
permissions: list[str] | None = None,
user_agent: str | None = None,
screen: dict | None = None,
viewport: dict | None = None,
no_viewport: bool | None = None,
device_scale_factor: float | None = None,
record_har_content: str | None = None,
record_har_mode: str | None = None,
record_har_path: str | Path | None = None,
record_video_dir: str | Path | None = None,
record_video_framerate: int | None = None,
record_video_size: dict | None = None,
# From BrowserLaunchPersistentContextArgs
user_data_dir: str | Path | None = None,
# From BrowserNewContextArgs
storage_state: str | Path | dict[str, Any] | None = None,
# BrowserProfile specific fields
## Cloud Browser Fields
use_cloud: bool | None = None,
cloud_browser: bool | None = None, # Backward compatibility alias
cloud_browser_params: CloudBrowserParams | None = None,
## Other params
disable_security: bool | None = None,
deterministic_rendering: bool | None = None,
allowed_domains: list[str] | None = None,
prohibited_domains: list[str] | None = None,
keep_alive: bool | None = None,
proxy: ProxySettings | None = None,
enable_default_extensions: bool | None = None,
captcha_solver: bool | None = None,
window_size: dict | None = None,
window_position: dict | None = None,
minimum_wait_page_load_time: float | None = None,
wait_for_network_idle_page_load_time: float | None = None,
wait_between_actions: float | None = None,
filter_highlight_ids: bool | None = None,
auto_download_pdfs: bool | None = None,
profile_directory: str | None = None,
cookie_whitelist_domains: list[str] | None = None,
# DOM extraction layer configuration
cross_origin_iframes: bool | None = None,
highlight_elements: bool | None = None,
dom_highlight_elements: bool | None = None,
paint_order_filtering: bool | None = None,
# Iframe processing limits
max_iframes: int | None = None,
max_iframe_depth: int | None = None,
):
# Following the same pattern as AgentSettings in service.py
# Only pass non-None values to avoid validation errors
profile_kwargs = {
k: v
for k, v in locals().items()
if k
not in [
'self',
'browser_profile',
'id',
'cloud_profile_id',
'cloud_proxy_country_code',
'cloud_timeout',
'profile_id',
'proxy_country_code',
'timeout',
]
and v is not None
}
# Handle backward compatibility: prefer cloud_* params over old names
final_profile_id = cloud_profile_id if cloud_profile_id is not None else profile_id
final_proxy_country_code = cloud_proxy_country_code if cloud_proxy_country_code is not None else proxy_country_code
final_timeout = cloud_timeout if cloud_timeout is not None else timeout
# If any cloud params are provided, create cloud_browser_params
if final_profile_id is not None or final_proxy_country_code is not None or final_timeout is not None:
cloud_params = CreateBrowserRequest(
cloud_profile_id=final_profile_id,
cloud_proxy_country_code=final_proxy_country_code,
cloud_timeout=final_timeout,
)
profile_kwargs['cloud_browser_params'] = cloud_params
profile_kwargs['use_cloud'] = True
# Handle backward compatibility: map cloud_browser to use_cloud
if 'cloud_browser' in profile_kwargs:
profile_kwargs['use_cloud'] = profile_kwargs.pop('cloud_browser')
# If cloud_browser_params is set, force use_cloud=True
if cloud_browser_params is not None:
profile_kwargs['use_cloud'] = True
# if is_local is False but executable_path is provided, set is_local to True
if is_local is False and executable_path is not None:
profile_kwargs['is_local'] = True
# Only set is_local=True when cdp_url is missing if we're not using cloud browser
# (cloud browser will provide cdp_url later)
use_cloud = profile_kwargs.get('use_cloud') or profile_kwargs.get('cloud_browser')
if not cdp_url and not use_cloud:
profile_kwargs['is_local'] = True
# Create browser profile from direct parameters or use provided one
if browser_profile is not None:
# Merge any direct kwargs into the provided browser_profile (direct kwargs take precedence)
merged_kwargs = {**browser_profile.model_dump(exclude_unset=True), **profile_kwargs}
resolved_browser_profile = BrowserProfile(**merged_kwargs)
else:
resolved_browser_profile = BrowserProfile(**profile_kwargs)
# Initialize the Pydantic model
super().__init__(
id=id or str(uuid7str()),
browser_profile=resolved_browser_profile,
)
def from_system_chrome(cls, profile_directory: str | None, **kwargs) -> Self: ...
def list_chrome_profiles(cls) -> list[dict[str, str]]: ...
def cdp_url(self) -> str | None: ...
def is_cdp_connected(self) -> bool: ...
async def wait_if_captcha_solving(self, timeout: float | None) -> 'CaptchaWaitResult | None': ...
def is_reconnecting(self) -> bool: ...
def cloud_browser(self) -> bool: ...
def demo_mode(self) -> 'DemoMode | None': ...
def logger(self) -> Any: ...
def _id_for_logs(self) -> str: ...
def _tab_id_for_logs(self) -> str: ...
def __repr__(self) -> str: ...
def __str__(self) -> str: ...
async def reset(self) -> None: ...
def model_post_init(self, __context) -> None: ...
async def start(self) -> None: ...
async def kill(self) -> None: ...
async def stop(self) -> None: ...
async def on_BrowserStartEvent(self, event: BrowserStartEvent) -> dict[str, str]: ...
async def on_NavigateToUrlEvent(self, event: NavigateToUrlEvent) -> None: ...
async def _navigate_and_wait(self, url: str, target_id: str, timeout: float | None, wait_until: str) -> None: ...
async def on_SwitchTabEvent(self, event: SwitchTabEvent) -> TargetID: ...
async def on_CloseTabEvent(self, event: CloseTabEvent) -> None: ...
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None: ...
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None: ...
async def on_AgentFocusChangedEvent(self, event: AgentFocusChangedEvent) -> None: ...
async def on_FileDownloadedEvent(self, event: FileDownloadedEvent) -> None: ...
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None: ...
def cdp_client(self) -> CDPClient: ...
async def new_page(self, url: str | None) -> 'Page': ...
async def get_current_page(self) -> 'Page | None': ...
async def must_get_current_page(self) -> 'Page': ...
async def get_pages(self) -> list['Page']: ...
def get_focused_target(self) -> 'Target | None': ...
def get_page_targets(self) -> list['Target']: ...
async def close_page(self, page: 'Union[Page, str]') -> None: ...
async def cookies(self) -> list['Cookie']: ...
async def clear_cookies(self) -> None: ...
async def export_storage_state(self, output_path: str | Path | None) -> dict[str, Any]: ...
async def get_or_create_cdp_session(self, target_id: TargetID | None, focus: bool) -> CDPSession: ...
async def set_extra_headers(self, headers: dict[str, str], target_id: TargetID | None) -> None: ...
async def get_browser_state_summary(self, include_screenshot: bool, cached: bool, include_recent_events: bool) -> BrowserStateSummary: ...
async def get_state_as_text(self) -> str: ...
async def attach_all_watchdogs(self) -> None: ...
async def connect(self, cdp_url: str | None) -> Self: ...
async def _setup_proxy_auth(self) -> None: ...
async def reconnect(self) -> None: ...
async def _auto_reconnect(self, max_attempts: int) -> None: ...
def _attach_ws_drop_callback(self) -> None: ...
async def get_tabs(self) -> list[TabInfo]: ...
async def get_current_target_info(self) -> TargetInfo | None: ...
async def get_current_page_url(self) -> str: ...
async def get_current_page_title(self) -> str: ...
async def navigate_to(self, url: str, new_tab: bool) -> None: ...
async def get_dom_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ...
def update_cached_selector_map(self, selector_map: dict[int, EnhancedDOMTreeNode]) -> None: ...
async def get_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ...
async def get_dom_element_at_coordinates(self, x: int, y: int) -> EnhancedDOMTreeNode | None: ...
async def get_target_id_from_tab_id(self, tab_id: str) -> TargetID: ...
async def get_target_id_from_url(self, url: str) -> TargetID: ...
async def get_most_recently_opened_target_id(self) -> TargetID: ...
def is_file_input(self, element: Any) -> bool: ...
async def get_selector_map(self) -> dict[int, EnhancedDOMTreeNode]: ...
async def get_index_by_id(self, element_id: str) -> int | None: ...
async def get_index_by_class(self, class_name: str) -> int | None: ...
async def remove_highlights(self) -> None: ...
async def get_element_coordinates(self, backend_node_id: int, cdp_session: CDPSession) -> DOMRect | None: ...
async def highlight_interaction_element(self, node: 'EnhancedDOMTreeNode') -> None: ...
async def highlight_coordinate_click(self, x: int, y: int) -> None: ...
async def add_highlights(self, selector_map: dict[int, 'EnhancedDOMTreeNode']) -> None: ...
async def _close_extension_options_pages(self) -> None: ...
async def send_demo_mode_log(self, message: str, level: str, metadata: dict[str, Any] | None) -> None: ...
def downloaded_files(self) -> list[str]: ...
async def _cdp_get_all_pages(self, include_http: bool, include_about: bool, include_pages: bool, include_iframes: bool, include_workers: bool, include_chrome: bool, include_chrome_extensions: bool, include_chrome_error: bool) -> list[TargetInfo]: ...
async def _cdp_create_new_page(self, url: str, background: bool, new_window: bool) -> str: ...
async def _cdp_close_page(self, target_id: TargetID) -> None: ...
async def _cdp_get_cookies(self) -> list[Cookie]: ...
async def _cdp_set_cookies(self, cookies: list[Cookie]) -> None: ...
async def _cdp_clear_cookies(self) -> None: ...
async def _cdp_grant_permissions(self, permissions: list[str], origin: str | None) -> None: ...
async def _cdp_set_geolocation(self, latitude: float, longitude: float, accuracy: float) -> None: ...
async def _cdp_clear_geolocation(self) -> None: ...
async def _cdp_add_init_script(self, script: str) -> str: ...
async def _cdp_remove_init_script(self, identifier: str) -> None: ...
async def _cdp_set_viewport(self, width: int, height: int, device_scale_factor: float, mobile: bool, target_id: str | None) -> None: ...
async def _cdp_get_origins(self) -> list[dict[str, Any]]: ...
async def _cdp_get_storage_state(self) -> dict: ...
async def _cdp_navigate(self, url: str, target_id: TargetID | None) -> None: ...
def _is_valid_target(target_info: TargetInfo, include_http: bool, include_chrome: bool, include_chrome_extensions: bool, include_chrome_error: bool, include_about: bool, include_iframes: bool, include_pages: bool, include_workers: bool) -> bool: ...
async def get_all_frames(self) -> tuple[dict[str, dict], dict[str, str]]: ...
async def _populate_frame_metadata(self, all_frames: dict[str, dict], target_sessions: dict[str, str]) -> None: ...
async def find_frame_target(self, frame_id: str, all_frames: dict[str, dict] | None) -> dict | None: ...
async def cdp_client_for_target(self, target_id: TargetID) -> CDPSession: ...
async def cdp_client_for_frame(self, frame_id: str) -> CDPSession: ...
async def cdp_client_for_node(self, node: EnhancedDOMTreeNode) -> CDPSession: ...
async def take_screenshot(self, path: str | None, full_page: bool, format: str, quality: int | None, clip: dict | None) -> bytes: ...
async def screenshot_element(self, selector: str, path: str | None, format: str, quality: int | None) -> bytes: ...
async def _get_element_bounds(self, selector: str) -> dict | None: ...
# Task:
Write a Python method `is_local` for the class `BrowserSession` to whether this is a local browser instance from browser profile.
Returns: bool | def is_local(self) -> bool:
"""Whether this is a local browser instance from browser profile."""
return self.browser_profile.is_local | function_simple | 0 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "is_local", "class_name": "BrowserSession", "qualname": "BrowserSession.is_local", "file_path": "browser_use/browser/session.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"} |
google/langextract:langextract/data_lib.py:annotated_document_to_dict | # Context:
import dataclasses
from typing import Any, Iterable, Mapping
from langextract.core import data
def enum_asdict_factory(items: Iterable[tuple[str, Any]]) -> dict[str, Any]: ...
def dict_to_annotated_document(adoc_dic: Mapping[str, Any]) -> data.AnnotatedDocument: ...
# Task:
Write a Python function `annotated_document_to_dict` to converts an AnnotatedDocument into a Python dict.
Parameters: adoc: data.AnnotatedDocument | None
Returns: dict[str, Any] | def annotated_document_to_dict(
adoc: data.AnnotatedDocument | None,
) -> dict[str, Any]:
"""Converts an AnnotatedDocument into a Python dict.
This function converts an AnnotatedDocument object into a Python dict, making
it easier to serialize or deserialize the document. Enum values and NumPy
integers are converted to their underlying values, while other data types are
left unchanged. Private fields with an underscore prefix are not included in
the output.
Args:
adoc: The AnnotatedDocument object to convert.
Returns:
A Python dict representing the AnnotatedDocument.
"""
if not adoc:
return {}
result = dataclasses.asdict(adoc, dict_factory=enum_asdict_factory)
result["document_id"] = adoc.document_id
return result | function_simple | 1 | {"cognitive_complexity": 1, "loc": 26, "code_loc": 5, "docstring_loc": 14, "function_name": "annotated_document_to_dict", "class_name": null, "qualname": "annotated_document_to_dict", "file_path": "langextract/data_lib.py", "repo_id": "google/langextract", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/test_shuffle_diagnostics.py:test_sort_inlined_objects_warnings | # Context:
import logging
import pytest
import ray
from ray.data.context import DataContext, ShuffleStrategy
def test_debug_limit_shuffle_execution_to_num_blocks(ray_start_regular, restore_data_context, configure_shuffle_method, shuffle_op): ...
def test_memory_usage(ray_start_regular, restore_data_context, configure_shuffle_method): ...
def test_sort_object_ref_warnings(ray_start_regular, restore_data_context, configure_shuffle_method, under_threshold, propagate_logs, caplog): ...
# Task:
Write a Python test function `test_sort_inlined_objects_warnings` to verify the behavior of `sort_inlined_objects_warnings`.
Module under test: ray.data.context, ray.data.dataset | def test_sort_inlined_objects_warnings(
ray_start_regular,
restore_data_context,
configure_shuffle_method,
under_threshold,
propagate_logs,
caplog,
):
# Test that we warn iff expected driver memory usage from
# storing tiny Ray objects on driver heap is higher than
# the configured threshold.
if configure_shuffle_method == ShuffleStrategy.SORT_SHUFFLE_PUSH_BASED:
warning_strs = [
"More than 3MB of driver memory used",
"More than 7MB of driver memory used",
]
else:
warning_strs = [
"More than 8MB of driver memory used",
]
if not under_threshold:
DataContext.get_current().warn_on_driver_memory_usage_bytes = 3_000_000
ds = ray.data.range(int(1e6), override_num_blocks=10)
with caplog.at_level(logging.WARNING, logger="ray.data.dataset"):
ds = ds.random_shuffle().materialize()
if under_threshold:
assert all(warning_str not in caplog.text for warning_str in warning_strs)
else:
assert all(warning_str in caplog.text for warning_str in warning_strs) | test | 0 | {"function_name": "test_sort_inlined_objects_warnings", "class_name": null, "qualname": "test_sort_inlined_objects_warnings", "file_path": "python/ray/data/tests/test_shuffle_diagnostics.py", "repo_id": "ray-project/ray", "loc": 32, "tested_modules": ["ray.data.context", "ray.data.dataset"], "has_docstring": false, "runnable_level": "project_runnable"} |
gradio-app/gradio:gradio/mcp.py:GradioMCPServer.launch_mcp_on_sse | # Context:
from starlette.applications import Starlette
from starlette.responses import JSONResponse, Response
from starlette.routing import Mount, Route
def resource(uri_template: str, description: str | None, mime_type: str | None): ...
def prompt(name: str | None, description: str | None): ...
def tool(name: str | None, description: str | None, structured_output: bool, _meta: dict[str, Any] | None): ...
class GradioMCPServer:
def __init__(self, blocks: "Blocks"):
try:
import mcp # noqa: F401
except ImportError as e:
raise ImportError(
"The `mcp` package is required to use the Gradio MCP integration. Please install it with the `mcp` extra: `pip install gradio[mcp]`."
) from e
self.blocks = blocks
self.api_info = self.blocks.get_api_info()
self.mcp_server = self.create_mcp_server()
self.root_path = ""
space_id = utils.get_space()
self.tool_prefix = space_id.split("/")[-1] + "_" if space_id else ""
self.tool_to_endpoint = self.get_tool_to_endpoint()
self.warn_about_state_inputs()
self._local_url: str | None = None
self._client_instance: Client | None = None
manager = self.StreamableHTTPSessionManager( # type: ignore
app=self.mcp_server, json_response=False, stateless=True
)
async def handle_streamable_http(
scope: Scope, receive: Receive, send: Send
) -> None:
path = scope.get("path", "")
if not path.endswith(
(
"/gradio_api/mcp",
"/gradio_api/mcp/",
"/gradio_api/mcp/http",
"/gradio_api/mcp/http/",
)
):
response = Response(
content=f"Path '{path}' not found. The MCP HTTP transport is available at /gradio_api/mcp.",
status_code=404,
)
await response(scope, receive, send)
return
await manager.handle_request(scope, receive, send)
@contextlib.asynccontextmanager
async def lifespan(app: Starlette) -> AsyncIterator[None]: # noqa: ARG001
"""Context manager for managing session manager lifecycle."""
async with manager.run():
try:
yield
finally:
pass
self.lifespan = lifespan
self.manager = manager
self.handle_streamable_http = handle_streamable_http
def local_url(self) -> str | None: ...
def get_route_path(self, request: Request) -> str: ...
def get_selected_tools_from_request(self) -> list[str] | None: ...
def valid_and_unique_tool_name(tool_name: str, existing_tool_names: set[str]) -> str: ...
def get_tool_to_endpoint(self) -> dict[str, str]: ...
def warn_about_state_inputs(self) -> None: ...
def _get_or_create_client(self) -> Client: ...
def _prepare_tool_call_args(self, name: str, arguments: dict[str, Any]) -> tuple[str, list[Any], dict[str, str], 'BlockFunction']: ...
async def _execute_tool_without_progress(self, job: Any) -> list[Any]: ...
def _format_progress_message(update: StatusUpdate) -> str | None: ...
async def _execute_tool_with_progress(self, job: Any, progress_token: str) -> dict[str, Any]: ...
def create_mcp_server(self) -> 'Server': ...
def get_block_fn_from_endpoint_name(self, endpoint_name: str) -> 'BlockFunction | None': ...
def _file_data_tool_description(self) -> str: ...
def get_fn_description(self, block_fn: 'BlockFunction', tool_name: str) -> tuple[str, dict[str, str]]: ...
def insert_empty_state(inputs: Sequence['Component | BlockContext'], data: list) -> list: ...
def pop_returned_state(components: Sequence['Component | BlockContext'], data: Any) -> list: ...
def get_input_schema(self, tool_name: str, parameters: dict[str, str] | None) -> tuple[dict[str, Any], list[list[str | int]]]: ...
async def get_complete_schema(self, request) -> JSONResponse: ...
def simplify_filedata_schema(self, schema: dict[str, Any]) -> tuple[dict[str, Any], list[list[str | int]]]: ...
def convert_strings_to_filedata(self, value: Any, filedata_positions: list[list[str | int]]) -> Any: ...
def get_image(file_path: str) -> Image.Image | None: ...
def get_svg(file_data: Any) -> bytes | None: ...
def get_base64_data(image: Image.Image, format: str) -> str: ...
def postprocess_output_data(self, data: Any, root_url: str) -> list['types.TextContent | types.ImageContent']: ...
# Task:
Write a Python method `launch_mcp_on_sse` for the class `GradioMCPServer` to launch the MCP server on the SSE transport.
Parameters: app: Starlette, subpath: str, root_path: str
Returns: None | def launch_mcp_on_sse(self, app: Starlette, subpath: str, root_path: str) -> None:
"""
Launch the MCP server on the SSE transport.
Parameters:
app: The Gradio app to mount the MCP server on.
subpath: The subpath to mount the MCP server on. E.g. "/gradio_api/mcp"
root_path: The root path of the Gradio Blocks app.
"""
messages_path = "/messages/"
sse = self.SseServerTransport(messages_path) # type: ignore
self.root_path = root_path
async def handle_sse(request):
try:
async with sse.connect_sse(
request.scope, request.receive, request._send
) as streams:
await self.mcp_server.run(
streams[0],
streams[1],
self.mcp_server.create_initialization_options(),
)
return Response()
except Exception as e:
print(f"MCP SSE connection error: {str(e)}")
raise
app.mount(
subpath,
Starlette(
routes=[
Route(
"/schema",
endpoint=self.get_complete_schema, # Not required for MCP but used by the Hugging Face MCP server to get the schema for MCP Spaces without needing to establish an SSE connection
),
Route("/sse", endpoint=handle_sse),
Mount("/messages/", app=sse.handle_post_message),
Mount("/", app=self.handle_streamable_http),
],
),
) | function_simple | 1 | {"cognitive_complexity": 1, "loc": 42, "code_loc": 31, "docstring_loc": 8, "function_name": "launch_mcp_on_sse", "class_name": "GradioMCPServer", "qualname": "GradioMCPServer.launch_mcp_on_sse", "file_path": "gradio/mcp.py", "repo_id": "gradio-app/gradio", "has_docstring": true, "runnable_level": "file_runnable"} |
browser-use/browser-use:browser_use/skill_cli/commands/cloud_task.py:poll_until_complete | # Context:
from browser_use_sdk.types.task_view import TaskView
from browser_use.skill_cli.commands.utils import format_duration, get_sdk_client
import asyncio
def _filter_none(kwargs: dict[str, Any]) -> dict[str, Any]: ...
def create_task(task: str, **kwargs) -> TaskCreatedResponse: ...
def get_task(task_id: str) -> TaskView: ...
def list_tasks(limit: int, status: str | None, session_id: str | None) -> list[TaskItemView]: ...
def stop_task(task_id: str) -> TaskView: ...
def get_task_logs(task_id: str) -> TaskLogFileResponse: ...
def handle_task_command(args: argparse.Namespace) -> int: ...
def _task_item_to_dict(task: Any) -> dict[str, Any]: ...
def _task_to_dict(task: Any) -> dict[str, Any]: ...
def _step_to_dict(step: Any) -> dict[str, Any]: ...
def _handle_list(args: argparse.Namespace) -> int: ...
def _handle_status(args: argparse.Namespace) -> int: ...
def _print_step(step: Any, verbose: bool) -> None: ...
def _handle_stop(args: argparse.Namespace) -> int: ...
def _handle_logs(args: argparse.Namespace) -> int: ...
# Task:
Write a Python async function `poll_until_complete` to poll task status until finished.
Parameters: task_id: str, stream: bool, poll_interval: float
Returns: TaskView | async def poll_until_complete(
task_id: str,
stream: bool = False,
poll_interval: float = 1.0,
) -> TaskView:
"""Poll task status until finished."""
import asyncio
client = get_sdk_client()
last_status = None
while True:
# Run blocking SDK call in thread to avoid blocking event loop
task = await asyncio.to_thread(client.tasks.get_task, task_id)
current_status = task.status
if stream and current_status != last_status:
print(f'Status: {current_status}')
last_status = current_status
if current_status in ('finished', 'stopped', 'failed'):
return task
await asyncio.sleep(poll_interval) | function_complex | 0 | {"cognitive_complexity": 6, "loc": 24, "code_loc": 12, "docstring_loc": 1, "function_name": "poll_until_complete", "class_name": null, "qualname": "poll_until_complete", "file_path": "browser_use/skill_cli/commands/cloud_task.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/langchain/langchain_classic/evaluation/loading.py:load_dataset | Write a Python function `load_dataset` to load a dataset from the [LangChainDatasets on HuggingFace](https://huggingface.co/LangChainDatasets).
Parameters: uri: str
Returns: list[dict] | def load_dataset(uri: str) -> list[dict]:
"""Load a dataset from the [LangChainDatasets on HuggingFace](https://huggingface.co/LangChainDatasets).
Args:
uri: The uri of the dataset to load.
Returns:
A list of dictionaries, each representing a row in the dataset.
**Prerequisites**
```bash
pip install datasets
```
Examples:
--------
```python
from langchain_classic.evaluation import load_dataset
ds = load_dataset("llm-math")
```
"""
try:
from datasets import load_dataset
except ImportError as e:
msg = (
"load_dataset requires the `datasets` package."
" Please install with `pip install datasets`"
)
raise ImportError(msg) from e
dataset = load_dataset(f"LangChainDatasets/{uri}")
return list(dataset["train"]) | function_simple | 1 | {"cognitive_complexity": 1, "loc": 34, "code_loc": 10, "docstring_loc": 22, "function_name": "load_dataset", "class_name": null, "qualname": "load_dataset", "file_path": "libs/langchain/langchain_classic/evaluation/loading.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "self_contained"} |
apache/airflow:providers/keycloak/src/airflow/providers/keycloak/auth_manager/cli/commands.py:create_permissions_command | # Context:
from airflow.utils import cli as cli_utils
def _get_resource_methods() -> list[str]: ...
def _get_extended_resource_methods() -> list[str]: ...
def create_scopes_command(args): ...
def create_resources_command(args): ...
def create_all_command(args): ...
def _get_client(args): ...
def _get_client_uuid(args): ...
def _create_group_membership_mapper(client: KeycloakAdmin, client_uuid: str, _dry_run: bool) -> None: ...
def _get_scopes_to_create() -> list[dict]: ...
def _parse_teams(teams: str | None) -> list[str]: ...
def _ensure_multi_team_enabled(teams: list[str], command_name: str) -> None: ...
def _preview_scopes(*args, **kwargs): ...
def _create_scopes(client: KeycloakAdmin, client_uuid: str, _dry_run: bool): ...
def _get_resources_to_create(client: KeycloakAdmin, client_uuid: str, teams: list[str]) -> tuple[list[tuple[str, list[dict]]], list[tuple[str, list[dict]]]]: ...
def _preview_resources(client: KeycloakAdmin, client_uuid: str, teams: list[str]): ...
def _create_resources(client: KeycloakAdmin, client_uuid: str, teams: list[str], _dry_run: bool): ...
def _get_permissions_to_create(client: KeycloakAdmin, client_uuid: str, teams: list[str], include_global_admin: bool) -> list[dict]: ...
def _preview_permissions(client: KeycloakAdmin, client_uuid: str, teams: list[str]): ...
def _create_permissions(client: KeycloakAdmin, client_uuid: str, teams: list[str], include_global_admin: bool, _dry_run: bool): ...
def _create_scope_based_permission(client: KeycloakAdmin, client_uuid: str, name: str, scope_ids: list[str], resource_ids: list[str] | None, decision_strategy: str): ...
def _create_resource_based_permission(client: KeycloakAdmin, client_uuid: str, name: str, resource_ids: list[str]): ...
def _ensure_scope_permission(client: KeycloakAdmin, client_uuid: str, name: str, scope_names: list[str], resource_names: list[str], decision_strategy: str, _dry_run: bool) -> None: ...
def _update_admin_permission_resources(client: KeycloakAdmin, client_uuid: str, _dry_run: bool) -> None: ...
def create_team_command(args): ...
def add_user_to_team_command(args): ...
def _ensure_team_policies(client: KeycloakAdmin, client_uuid: str, team: str, _dry_run: bool) -> None: ...
def _attach_team_permissions(client: KeycloakAdmin, client_uuid: str, team: str, _dry_run: bool) -> None: ...
def _attach_global_list_permissions(client: KeycloakAdmin, client_uuid: str, _dry_run: bool) -> None: ...
def _attach_team_menu_permissions(client: KeycloakAdmin, client_uuid: str, team: str, _dry_run: bool) -> None: ...
def _attach_superadmin_permissions(client: KeycloakAdmin, client_uuid: str, team: str, _dry_run: bool) -> None: ...
def _team_group_name(team: str) -> str: ...
def _team_group_policy_name(team: str) -> str: ...
def _role_policy_name(role_name: str) -> str: ...
def _team_role_policy_name(team: str, role_name: str) -> str: ...
def _ensure_group(client: KeycloakAdmin, team: str, _dry_run: bool) -> dict | None: ...
def _ensure_group_policy(client: KeycloakAdmin, client_uuid: str, team: str, _dry_run: bool) -> None: ...
def _policy_url(client: KeycloakAdmin, client_uuid: str, policy_type: str | None) -> str: ...
def _get_policy_id(client: KeycloakAdmin, client_uuid: str, policy_name: str, policy_type: str | None) -> str | None: ...
def _get_role_id(client: KeycloakAdmin, client_uuid: str, role_name: str) -> str: ...
def _ensure_role_policy(client: KeycloakAdmin, client_uuid: str, role_name: str, _dry_run: bool) -> None: ...
def _ensure_aggregate_policy(client: KeycloakAdmin, client_uuid: str, policy_name: str, policy_refs: list[tuple[str, str | None]], _dry_run: bool) -> None: ...
def _attach_policy_to_scope_permission(client: KeycloakAdmin, client_uuid: str, permission_name: str, policy_name: str, scope_names: list[str], resource_names: list[str], decision_strategy: str, _dry_run: bool) -> None: ...
def _get_permission_policy_ids(client: KeycloakAdmin, client_uuid: str, permission_id: str) -> list[str]: ...
def _attach_policy_to_resource_permission(client: KeycloakAdmin, client_uuid: str, permission_name: str, policy_name: str, resource_names: list[str], decision_strategy: str, _dry_run: bool) -> None: ...
def _get_resource_permission_policy_ids(client: KeycloakAdmin, client_uuid: str, permission_id: str) -> list[str]: ...
def _add_user_to_group(client: KeycloakAdmin, username: str, team: str, _dry_run: bool) -> None: ...
# Task:
Write a Python function `create_permissions_command` to create Keycloak auth manager permissions in Keycloak.
Parameters: args | def create_permissions_command(args):
"""Create Keycloak auth manager permissions in Keycloak."""
client = _get_client(args)
client_uuid = _get_client_uuid(args)
teams = _parse_teams(args.teams)
_ensure_multi_team_enabled(teams=teams, command_name="create-permissions")
if teams:
# Role policies are only needed for team-scoped (group+role) authorization.
for role_name in TEAM_ROLE_NAMES:
_ensure_role_policy(client, client_uuid, role_name, _dry_run=args.dry_run)
_ensure_role_policy(client, client_uuid, SUPER_ADMIN_ROLE_NAME, _dry_run=args.dry_run)
_create_permissions(client, client_uuid, teams=teams, _dry_run=args.dry_run) | function_simple | 1 | {"cognitive_complexity": 3, "loc": 13, "code_loc": 9, "docstring_loc": 1, "function_name": "create_permissions_command", "class_name": null, "qualname": "create_permissions_command", "file_path": "providers/keycloak/src/airflow/providers/keycloak/auth_manager/cli/commands.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/browser/profile.py:BrowserProfile._download_extension | # Context:
from pathlib import Path
def _get_enable_default_extensions_default() -> bool: ...
class ViewportSize(BaseModel): ...
def get_display_size() -> ViewportSize | None: ...
def get_window_adjustments() -> tuple[int, int]: ...
def validate_url(url: str, schemes: Iterable[str]) -> str: ...
def validate_float_range(value: float, min_val: float, max_val: float) -> float: ...
def validate_cli_arg(arg: str) -> str: ...
class RecordHarContent(str, Enum): ...
class RecordHarMode(str, Enum): ...
class BrowserChannel(str, Enum): ...
class BrowserContextArgs(BaseModel): ...
class BrowserConnectArgs(BaseModel): ...
class BrowserLaunchArgs(BaseModel): ...
class BrowserNewContextArgs(BrowserContextArgs): ...
class BrowserLaunchPersistentContextArgs(BrowserLaunchArgs, BrowserContextArgs): ...
class ProxySettings(BaseModel): ...
class BrowserProfile(BrowserConnectArgs, BrowserLaunchPersistentContextArgs, BrowserLaunchArgs, BrowserNewContextArgs):
model_config = ConfigDict(
def cloud_browser(self) -> bool: ...
def __repr__(self) -> str: ...
def __str__(self) -> str: ...
def optimize_large_domain_lists(cls, v: list[str] | set[str] | None) -> list[str] | set[str] | None: ...
def copy_old_config_names_to_new(self) -> Self: ...
def warn_storage_state_user_data_dir_conflict(self) -> Self: ...
def warn_user_data_dir_non_default_version(self) -> Self: ...
def warn_deterministic_rendering_weirdness(self) -> Self: ...
def validate_proxy_settings(self) -> Self: ...
def validate_highlight_elements_conflict(self) -> Self: ...
def model_post_init(self, __context: Any) -> None: ...
def _copy_profile(self) -> None: ...
def get_args(self) -> list[str]: ...
def _get_extension_args(self) -> list[str]: ...
def _ensure_default_extensions_downloaded(self) -> list[str]: ...
def _apply_minimal_extension_patch(self, ext_dir: Path, whitelist_domains: list[str]) -> None: ...
def _extract_extension(self, crx_path: Path, extract_dir: Path) -> None: ...
def detect_display_configuration(self) -> None: ...
# Task:
Write a Python method `_download_extension` for the class `BrowserProfile` to download extension .crx file.
Parameters: url: str, output_path: Path
Returns: None | def _download_extension(self, url: str, output_path: Path) -> None:
"""Download extension .crx file."""
import urllib.request
try:
with urllib.request.urlopen(url) as response:
with open(output_path, 'wb') as f:
f.write(response.read())
except Exception as e:
raise Exception(f'Failed to download extension: {e}') | function_simple | 0 | {"cognitive_complexity": 1, "loc": 10, "code_loc": 7, "docstring_loc": 1, "function_name": "_download_extension", "class_name": "BrowserProfile", "qualname": "BrowserProfile._download_extension", "file_path": "browser_use/browser/profile.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "file_runnable"} |
browser-use/browser-use:tests/ci/test_multi_act_guards.py:TestTerminatesSequenceMetadata.test_navigate_terminates | # Context:
def http_server(): ...
def base_url(http_server): ...
async def browser_session(): ...
def tools(): ...
class TestStaticGuard: ...
class TestRuntimeGuard: ...
class TestSafeChain: ...
class TestTerminatesSequenceMetadata:
def test_search_terminates(self, tools): ...
def test_go_back_terminates(self, tools): ...
def test_switch_terminates(self, tools): ...
def test_click_does_not_terminate(self, tools): ...
def test_input_does_not_terminate(self, tools): ...
def test_scroll_does_not_terminate(self, tools): ...
def test_extract_does_not_terminate(self, tools): ...
def test_evaluate_terminates(self, tools): ...
# Task:
Write a Python test method `test_navigate_terminates` in test class `TestTerminatesSequenceMetadata` to verify the behavior of `navigate_terminates`.
Module under test: browser_use.agent.service, browser_use.browser, browser_use.browser.profile | def test_navigate_terminates(self, tools):
action = tools.registry.registry.actions.get('navigate')
assert action is not None
assert action.terminates_sequence is True | test | 0 | {"function_name": "test_navigate_terminates", "class_name": "TestTerminatesSequenceMetadata", "qualname": "TestTerminatesSequenceMetadata.test_navigate_terminates", "file_path": "tests/ci/test_multi_act_guards.py", "repo_id": "browser-use/browser-use", "loc": 4, "tested_modules": ["browser_use.agent.service", "browser_use.browser", "browser_use.browser.profile", "browser_use.tools.service", "tests.ci.conftest"], "has_docstring": false, "runnable_level": "file_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py:TestTextOperationsTail.test_tail_negative_characters | # Context:
import pytest
from lfx.components.processing.text_operations import TextOperations
class TestTextOperationsComponent(ComponentTestBaseWithoutClient): ...
class TestTextOperationsWordCount: ...
class TestTextOperationsCaseConversion: ...
class TestTextOperationsReplace: ...
class TestTextOperationsExtract: ...
class TestTextOperationsHead: ...
class TestTextOperationsStrip: ...
class TestTextOperationsJoin: ...
class TestTextOperationsClean: ...
class TestTextOperationsToDataFrame: ...
class TestTextOperationsUpdateBuildConfig: ...
class TestTextOperationsUpdateOutputs: ...
class TestTextOperationsOutputMethods: ...
class TestBugFixWordCountEmptyText: ...
class TestBugFixTextJoinEmptyFirst: ...
class TestBugFixTextStripTabs: ...
class TestBugFixDataFrameHeaderValidation: ...
class TestBugFixInputValidation: ...
class TestTextOperationsTail:
def test_tail_basic(self): ...
def test_tail_longer_than_text(self): ...
def test_tail_zero_characters(self): ...
# Task:
Write a Python test method `test_tail_negative_characters` in test class `TestTextOperationsTail` to test tail with negative characters raises ValueError (Bug #7 fix).
Module under test: lfx.components.processing.text_operations, lfx.schema.data, lfx.schema.dataframe | def test_tail_negative_characters(self):
"""Test tail with negative characters raises ValueError (Bug #7 fix)."""
component = TextOperations()
component.tail_characters = -5
with pytest.raises(ValueError, match="non-negative"):
component._text_tail("Hello") | test | 1 | {"function_name": "test_tail_negative_characters", "class_name": "TestTextOperationsTail", "qualname": "TestTextOperationsTail.test_tail_negative_characters", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "repo_id": "langflow-ai/langflow", "loc": 7, "tested_modules": ["lfx.components.processing.text_operations", "lfx.schema.data", "lfx.schema.dataframe", "lfx.schema.message", "tests.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/olmo_hybrid/convert_olmo_hybrid_weights_to_hf.py:get_layer_types_from_config | Write a Python function `get_layer_types_from_config` to determine the layer types (full_attention, linear_attention).
Parameters: olmo_config: dict
Returns: list[str] | def get_layer_types_from_config(olmo_config: dict) -> list[str]:
"""
Determine the layer types (full_attention, linear_attention)
from the OLMo config.
"""
model_config = olmo_config["model"]
block_config = model_config["block"]
n_layers = model_config["n_layers"]
fla_hybrid_attention_indices = block_config.get("fla_hybrid_attention_indices", [])
layer_types = []
for i in range(n_layers):
if i in fla_hybrid_attention_indices:
layer_types.append("full_attention")
else:
layer_types.append("linear_attention")
return layer_types | function_simple | 0 | {"cognitive_complexity": 4, "loc": 19, "code_loc": 11, "docstring_loc": 4, "function_name": "get_layer_types_from_config", "class_name": null, "qualname": "get_layer_types_from_config", "file_path": "src/transformers/models/olmo_hybrid/convert_olmo_hybrid_weights_to_hf.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "self_contained"} |
crewAIInc/crewAI:lib/crewai/tests/llms/anthropic/test_anthropic.py:test_anthropic_thinking | # Context:
from unittest.mock import patch, MagicMock
import pytest
from crewai.llm import LLM
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
from unittest.mock import patch
def mock_anthropic_api_key(): ...
def test_anthropic_completion_is_used_when_anthropic_provider(): ...
def test_anthropic_completion_is_used_when_claude_provider(): ...
def test_anthropic_completion_module_is_imported(): ...
def test_native_anthropic_raises_error_when_initialization_fails(): ...
def test_anthropic_completion_initialization_parameters(): ...
def test_anthropic_specific_parameters(): ...
def test_anthropic_completion_call(): ...
def test_anthropic_completion_called_during_crew_execution(): ...
def test_anthropic_completion_call_arguments(): ...
def test_multiple_anthropic_calls_in_crew(): ...
def test_anthropic_completion_with_tools(): ...
def test_anthropic_raises_error_when_model_not_supported(): ...
def test_anthropic_client_params_setup(): ...
def test_anthropic_client_params_override_defaults(): ...
def test_anthropic_client_params_none(): ...
def test_anthropic_client_params_empty_dict(): ...
def test_anthropic_model_detection(): ...
def test_anthropic_supports_stop_words(): ...
def test_anthropic_context_window_size(): ...
def test_anthropic_message_formatting(): ...
def test_anthropic_streaming_parameter(): ...
def test_anthropic_tool_conversion(): ...
def test_anthropic_environment_variable_api_key(): ...
def test_anthropic_token_usage_tracking(): ...
def test_anthropic_stop_sequences_sync(): ...
def test_anthropic_stop_sequences_sent_to_api(): ...
def test_anthropic_thinking_blocks_preserved_across_turns(): ...
def test_anthropic_function_calling(): ...
def test_anthropic_tool_execution_with_available_functions(): ...
def test_anthropic_tool_execution_returns_tool_result_directly(): ...
def test_anthropic_agent_kickoff_structured_output_without_tools(): ...
def test_anthropic_agent_kickoff_structured_output_with_tools(): ...
def test_anthropic_cached_prompt_tokens(): ...
def test_anthropic_streaming_cached_prompt_tokens(): ...
def test_anthropic_cached_prompt_tokens_with_tools(): ...
# Task:
Write a Python test function `test_anthropic_thinking` to test that thinking is properly handled and thinking params are passed to messages.create.
Module under test: crewai.llm, crewai.crew, crewai.agent | def test_anthropic_thinking():
"""Test that thinking is properly handled and thinking params are passed to messages.create"""
from unittest.mock import patch
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
llm = LLM(
model="anthropic/claude-sonnet-4-5",
thinking={"type": "enabled", "budget_tokens": 5000},
max_tokens=10000
)
assert isinstance(llm, AnthropicCompletion)
original_create = llm.client.messages.create
captured_params = {}
def capture_and_call(**kwargs):
captured_params.update(kwargs)
return original_create(**kwargs)
with patch.object(llm.client.messages, 'create', side_effect=capture_and_call):
result = llm.call("What is the weather in Tokyo?")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
assert "thinking" in captured_params
assert captured_params["thinking"] == {"type": "enabled", "budget_tokens": 5000}
assert captured_params["model"] == "claude-sonnet-4-5"
assert captured_params["max_tokens"] == 10000
assert "messages" in captured_params
assert len(captured_params["messages"]) > 0 | test | 0 | {"function_name": "test_anthropic_thinking", "class_name": null, "qualname": "test_anthropic_thinking", "file_path": "lib/crewai/tests/llms/anthropic/test_anthropic.py", "repo_id": "crewAIInc/crewAI", "loc": 34, "tested_modules": ["crewai.llm", "crewai.crew", "crewai.agent", "crewai.task", "crewai.llms.providers.anthropic.completion"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/v1/metrics/test_perf_metrics.py:module_doc | Write a module-level docstring for the Python module `test_perf_metrics` which contains class `MockModelConfig`, function `create_mock_vllm_config`, function `test_base_config_parser`, function `test_base_attention_config_parser_with_gqa`, function `test_base_attention_config_parser_without_gqa`. | Tests for the analytic estimators in metrics/flops.py. | documentation | 1 | {"doc_type": "module", "module_name": "test_perf_metrics", "file_path": "tests/v1/metrics/test_perf_metrics.py", "repo_id": "vllm-project/vllm", "char_length": 54} |
ray-project/ray:python/ray/serve/task_processor.py:CeleryTaskProcessorAdapter.cancel_task_sync | # Context:
class CeleryTaskProcessorAdapter(TaskProcessorAdapter):
def __init__(self, config: TaskProcessorConfig, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(config.adapter_config, CeleryAdapterConfig):
raise TypeError(
"TaskProcessorConfig.adapter_config must be an instance of CeleryAdapterConfig"
)
# Check if any app_custom_config keys conflict with default Celery app config
if config.adapter_config.app_custom_config:
conflicting_keys = set(
config.adapter_config.app_custom_config.keys()
) & set(CELERY_DEFAULT_APP_CONFIG)
if conflicting_keys:
raise ValueError(
f"The following configuration keys cannot be changed via app_custom_config: {sorted(conflicting_keys)}. "
f"These are managed internally by the CeleryTaskProcessorAdapter."
)
self._config = config
def initialize(self, consumer_concurrency: int): ...
def register_task_handle(self, func, name): ...
def enqueue_task_sync(self, task_name, args, kwargs, **options) -> TaskResult: ...
def get_task_status_sync(self, task_id) -> TaskResult: ...
def start_consumer(self, **kwargs): ...
def stop_consumer(self, timeout: float): ...
def get_metrics_sync(self) -> Dict[str, Any]: ...
def health_check_sync(self) -> List[Dict]: ...
def _handle_task_failure(self, sender: Any, task_id: str, args: Any, kwargs: Any, einfo: Any, **kw): ...
def _handle_unknown_task(self, sender: Any, name: str, id: str, message: Any, exc: Any, **kwargs): ...
def _move_task_to_queue(self, queue_name: str, task_name: str, args: list): ...
# Task:
Write a Python method `cancel_task_sync` for the class `CeleryTaskProcessorAdapter` to cancels a task synchronously. Only supported for Redis and RabbitMQ brokers by Celery.
Parameters: task_id | def cancel_task_sync(self, task_id):
"""
Cancels a task synchronously. Only supported for Redis and RabbitMQ brokers by Celery.
More details can be found here: https://docs.celeryq.dev/en/stable/userguide/workers.html#revoke-revoking-tasks
"""
self._app.control.revoke(task_id) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 6, "code_loc": 1, "docstring_loc": 4, "function_name": "cancel_task_sync", "class_name": "CeleryTaskProcessorAdapter", "qualname": "CeleryTaskProcessorAdapter.cancel_task_sync", "file_path": "python/ray/serve/task_processor.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"} |
ray-project/ray:python/ray/_common/tests/test_signature.py:TestExtractSignature.test_method_with_ignore_first | # Context:
from ray._common.signature import (
DUMMY_TYPE,
extract_signature,
flatten_args,
get_signature,
recover_args,
validate_args,
)
class TestGetSignature: ...
class TestValidateArgs: ...
class TestFlattenArgs: ...
class TestRecoverArgs: ...
class TestIntegration: ...
class TestExtractSignature:
def test_function_without_ignore_first(self): ...
def test_function_with_ignore_first(self): ...
def test_empty_parameters_with_ignore_first(self): ...
def test_single_parameter_with_ignore_first(self): ...
def test_varargs_and_kwargs(self): ...
# Task:
Write a Python test method `test_method_with_ignore_first` in test class `TestExtractSignature` to test extracting signature from method ignoring 'self' parameter.
Module under test: typing, ray._common.signature | def test_method_with_ignore_first(self):
"""Test extracting signature from method ignoring 'self' parameter."""
class TestClass:
def test_method(self, a, b=20):
return a + b
params = extract_signature(TestClass.test_method, ignore_first=True)
assert len(params) == 2
assert params[0].name == "a"
assert params[1].name == "b"
assert params[1].default == 20 | test | 0 | {"function_name": "test_method_with_ignore_first", "class_name": "TestExtractSignature", "qualname": "TestExtractSignature.test_method_with_ignore_first", "file_path": "python/ray/_common/tests/test_signature.py", "repo_id": "ray-project/ray", "loc": 12, "tested_modules": ["typing", "ray._common.signature"], "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/data/namespace_expressions/string_namespace.py:_StringNamespace.extract | # Context:
from typing import TYPE_CHECKING, Any, Callable, Literal
import pyarrow.compute as pc
from ray.data.datatype import DataType
def _create_str_udf(pc_func: Callable[..., pyarrow.Array], return_dtype: DataType) -> Callable[..., 'UDFExpr']: ...
class _StringNamespace:
def len(self) -> 'UDFExpr': ...
def byte_len(self) -> 'UDFExpr': ...
def upper(self) -> 'UDFExpr': ...
def lower(self) -> 'UDFExpr': ...
def capitalize(self) -> 'UDFExpr': ...
def title(self) -> 'UDFExpr': ...
def swapcase(self) -> 'UDFExpr': ...
def is_alpha(self) -> 'UDFExpr': ...
def is_alnum(self) -> 'UDFExpr': ...
def is_digit(self) -> 'UDFExpr': ...
def is_decimal(self) -> 'UDFExpr': ...
def is_numeric(self) -> 'UDFExpr': ...
def is_space(self) -> 'UDFExpr': ...
def is_lower(self) -> 'UDFExpr': ...
def is_upper(self) -> 'UDFExpr': ...
def is_title(self) -> 'UDFExpr': ...
def is_printable(self) -> 'UDFExpr': ...
def is_ascii(self) -> 'UDFExpr': ...
def starts_with(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def ends_with(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def contains(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def match(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def find(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def count(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def find_regex(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def count_regex(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def match_regex(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def reverse(self) -> 'UDFExpr': ...
def slice(self, *args, **kwargs) -> 'UDFExpr': ...
def replace(self, pattern: str, replacement: str, *args, **kwargs) -> 'UDFExpr': ...
def replace_regex(self, pattern: str, replacement: str, *args, **kwargs) -> 'UDFExpr': ...
def replace_slice(self, start: int, stop: int, replacement: str, *args, **kwargs) -> 'UDFExpr': ...
def split(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def split_regex(self, pattern: str, *args, **kwargs) -> 'UDFExpr': ...
def split_whitespace(self, *args, **kwargs) -> 'UDFExpr': ...
def repeat(self, n: int, *args, **kwargs) -> 'UDFExpr': ...
def center(self, width: int, padding: str, *args, **kwargs) -> 'UDFExpr': ...
def lpad(self, width: int, padding: str, *args, **kwargs) -> 'UDFExpr': ...
def rpad(self, width: int, padding: str, *args, **kwargs) -> 'UDFExpr': ...
def strip(self, characters: str | None) -> 'UDFExpr': ...
def lstrip(self, characters: str | None) -> 'UDFExpr': ...
def rstrip(self, characters: str | None) -> 'UDFExpr': ...
def pad(self, width: int, fillchar: str, side: Literal['left', 'right', 'both']) -> 'UDFExpr': ...
# Task:
Write a Python method `extract` for the class `_StringNamespace` to extract a substring matching a regex pattern.
Parameters: pattern: str
Returns: 'UDFExpr' | def extract(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Extract a substring matching a regex pattern."""
return _create_str_udf(pc.extract_regex, DataType.string())(
self._expr, pattern, *args, **kwargs
) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "extract", "class_name": "_StringNamespace", "qualname": "_StringNamespace.extract", "file_path": "python/ray/data/namespace_expressions/string_namespace.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"} |
github/spec-kit:src/specify_cli/extensions.py:ExtensionManifest._load_yaml | # Context:
from pathlib import Path
import yaml
class ExtensionError(Exception): ...
class ValidationError(ExtensionError): ...
class CompatibilityError(ExtensionError): ...
class ExtensionRegistry: ...
class ExtensionManager: ...
def version_satisfies(current: str, required: str) -> bool: ...
class CommandRegistrar: ...
class ExtensionCatalog: ...
class ConfigManager: ...
class HookExecutor: ...
class ExtensionManifest:
SCHEMA_VERSION = "1.0"
REQUIRED_FIELDS = ["schema_version", "extension", "requires", "provides"]
def __init__(self, manifest_path: Path):
"""Load and validate extension manifest.
Args:
manifest_path: Path to extension.yml file
Raises:
ValidationError: If manifest is invalid
"""
self.path = manifest_path
self.data = self._load_yaml(manifest_path)
self._validate()
def _validate(self): ...
def id(self) -> str: ...
def name(self) -> str: ...
def version(self) -> str: ...
def description(self) -> str: ...
def requires_speckit_version(self) -> str: ...
def commands(self) -> List[Dict[str, Any]]: ...
def hooks(self) -> Dict[str, Any]: ...
def get_hash(self) -> str: ...
# Task:
Write a Python method `_load_yaml` for the class `ExtensionManifest` to load YAML file safely.
Parameters: path: Path
Returns: dict | def _load_yaml(self, path: Path) -> dict:
"""Load YAML file safely."""
try:
with open(path, 'r') as f:
return yaml.safe_load(f) or {}
except yaml.YAMLError as e:
raise ValidationError(f"Invalid YAML in {path}: {e}")
except FileNotFoundError:
raise ValidationError(f"Manifest not found: {path}") | function_simple | 0 | {"cognitive_complexity": 3, "loc": 9, "code_loc": 7, "docstring_loc": 1, "function_name": "_load_yaml", "class_name": "ExtensionManifest", "qualname": "ExtensionManifest._load_yaml", "file_path": "src/specify_cli/extensions.py", "repo_id": "github/spec-kit", "has_docstring": true, "runnable_level": "file_runnable"} |
huggingface/transformers:src/transformers/models/dinov3_convnext/convert_dinov3_convnext_to_hf.py:convert_old_keys_to_new_keys | # Context:
import re
def get_dinov3_config(model_name: str) -> DINOv3ConvNextConfig: ...
def prepare_img(): ...
def get_transform(resize_size: int): ...
def get_image_processor(resize_size: int): ...
def convert_and_test_dinov3_checkpoint(args): ...
# Task:
Write a Python function `convert_old_keys_to_new_keys` to this function should be applied only once, on the concatenated keys to efficiently rename using.
Parameters: state_dict_keys: dict | None | def convert_old_keys_to_new_keys(state_dict_keys: dict | None = None):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict | function_complex | 0 | {"cognitive_complexity": 6, "loc": 16, "code_loc": 11, "docstring_loc": 4, "function_name": "convert_old_keys_to_new_keys", "class_name": null, "qualname": "convert_old_keys_to_new_keys", "file_path": "src/transformers/models/dinov3_convnext/convert_dinov3_convnext_to_hf.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/data/tests/test_map_batches.py:test_map_batches_async_exception_propagation | # Context:
import pytest
import ray
def process_timestamp_data(row): ...
def process_timestamp_data_batch_arrow(batch: pa.Table) -> pa.Table: ...
def process_timestamp_data_batch_pandas(batch: pd.DataFrame) -> pd.DataFrame: ...
def test_map_batches_basic(ray_start_regular_shared, tmp_path, restore_data_context, target_max_block_size_infinite_or_default): ...
def test_map_batches_extra_args(shutdown_only, tmp_path, target_max_block_size_infinite_or_default): ...
def test_map_with_memory_resources(method, shutdown_only, target_max_block_size_infinite_or_default): ...
def test_map_batches_generator(ray_start_regular_shared, tmp_path, target_max_block_size_infinite_or_default): ...
def test_map_batches_actors_preserves_order(shutdown_only, target_max_block_size_infinite_or_default): ...
def test_map_batches_batch_mutation(ray_start_regular_shared, num_rows, num_blocks, batch_size, restore_data_context, target_max_block_size_infinite_or_default): ...
def test_map_batches_batch_zero_copy(ray_start_regular_shared, num_rows, num_blocks, batch_size, target_max_block_size_infinite_or_default): ...
def test_map_batches_block_bundling_auto(ray_start_regular_shared, block_size, batch_size, target_max_block_size_infinite_or_default): ...
def test_map_batches_block_bundling_skewed_manual(ray_start_regular_shared, block_sizes, batch_size, expected_num_blocks, target_max_block_size_infinite_or_default): ...
def test_map_batches_block_bundling_skewed_auto(ray_start_regular_shared, block_sizes, batch_size, target_max_block_size_infinite_or_default): ...
def test_map_batches_preserve_empty_blocks(ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_map_batches_combine_empty_blocks(ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_map_batches_timestamp_nanosecs(df, expected_df, ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_map_batches_async_generator_fast_yield(shutdown_only, target_max_block_size_infinite_or_default): ...
def test_map_batches_struct_field_type_divergence(shutdown_only): ...
# Task:
Write a Python test function `test_map_batches_async_exception_propagation` to verify the behavior of `map_batches_async_exception_propagation`.
Module under test: typing, ray.data._internal.arrow_ops.transform_pyarrow, ray.data._internal.utils.arrow_utils | def test_map_batches_async_exception_propagation(shutdown_only):
ray.shutdown()
ray.init(num_cpus=2)
class MyUDF:
def __init__(self):
pass
async def __call__(self, batch):
# This will trigger an assertion error.
assert False
yield batch
ds = ray.data.range(20)
ds = ds.map_batches(MyUDF, concurrency=2)
with pytest.raises(ray.exceptions.RayTaskError) as exc_info:
ds.materialize()
assert "AssertionError" in str(exc_info.value)
assert "assert False" in str(exc_info.value) | test | 0 | {"function_name": "test_map_batches_async_exception_propagation", "class_name": null, "qualname": "test_map_batches_async_exception_propagation", "file_path": "python/ray/data/tests/test_map_batches.py", "repo_id": "ray-project/ray", "loc": 21, "tested_modules": ["typing", "ray.data._internal.arrow_ops.transform_pyarrow", "ray.data._internal.utils.arrow_utils", "ray.data.context", "ray.data.dataset"], "has_docstring": false, "runnable_level": "project_runnable"} |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/ci/nb2sh.py:_extract_config_path | # Context:
def _is_shell_cell(source: str) -> bool: ...
def _extract_bash_fences(source: str) -> list[str]: ...
def _postprocess(lines: list[str]) -> list[str]: ...
def nb2sh(notebook_path: str, output_path: str) -> None: ...
# Task:
Write a Python function `_extract_config_path` to return the config file path from deploy-style CLI flags.
Parameters: flags: str
Returns: str | def _extract_config_path(flags: str) -> str:
"""Return the config file path from deploy-style CLI flags."""
m = _CONFIG_PATH_RE.search(flags)
return m.group(1) if m else flags.strip() | function_simple | 0 | {"cognitive_complexity": 1, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "_extract_config_path", "class_name": null, "qualname": "_extract_config_path", "file_path": "doc/source/ray-overview/examples/multi_agent_a2a/ci/nb2sh.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py:module_doc | Write a module-level docstring for the Python module `handler` which contains function `_handle_push_error`, class `PushNotificationHandler`. | Push notification (webhook) update mechanism handler. | documentation | 0 | {"doc_type": "module", "module_name": "handler", "file_path": "lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py", "repo_id": "crewAIInc/crewAI", "char_length": 53} |
browser-use/browser-use:browser_use/llm/groq/serializer.py:GroqMessageSerializer.serialize | # Context:
from groq.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionContentPartImageParam,
ChatCompletionContentPartTextParam,
ChatCompletionMessageParam,
ChatCompletionMessageToolCallParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class GroqMessageSerializer:
def _serialize_content_part_text(part: ContentPartTextParam) -> ChatCompletionContentPartTextParam: ...
def _serialize_content_part_image(part: ContentPartImageParam) -> ChatCompletionContentPartImageParam: ...
def _serialize_user_content(content: str | list[ContentPartTextParam | ContentPartImageParam]) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam]: ...
def _serialize_system_content(content: str | list[ContentPartTextParam]) -> str: ...
def _serialize_assistant_content(content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None) -> str | None: ...
def _serialize_tool_call(tool_call: ToolCall) -> ChatCompletionMessageToolCallParam: ...
def serialize(message: UserMessage) -> ChatCompletionUserMessageParam: ...
def serialize(message: SystemMessage) -> ChatCompletionSystemMessageParam: ...
def serialize(message: AssistantMessage) -> ChatCompletionAssistantMessageParam: ...
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]: ...
# Task:
Write a Python method `serialize` for the class `GroqMessageSerializer` to serialize a custom message to an OpenAI message param.
Parameters: message: BaseMessage
Returns: ChatCompletionMessageParam | def serialize(message: BaseMessage) -> ChatCompletionMessageParam:
"""Serialize a custom message to an OpenAI message param."""
if isinstance(message, UserMessage):
user_result: ChatCompletionUserMessageParam = {
'role': 'user',
'content': GroqMessageSerializer._serialize_user_content(message.content),
}
if message.name is not None:
user_result['name'] = message.name
return user_result
elif isinstance(message, SystemMessage):
system_result: ChatCompletionSystemMessageParam = {
'role': 'system',
'content': GroqMessageSerializer._serialize_system_content(message.content),
}
if message.name is not None:
system_result['name'] = message.name
return system_result
elif isinstance(message, AssistantMessage):
# Handle content serialization
content = None
if message.content is not None:
content = GroqMessageSerializer._serialize_assistant_content(message.content)
assistant_result: ChatCompletionAssistantMessageParam = {'role': 'assistant'}
# Only add content if it's not None
if content is not None:
assistant_result['content'] = content
if message.name is not None:
assistant_result['name'] = message.name
if message.tool_calls:
assistant_result['tool_calls'] = [GroqMessageSerializer._serialize_tool_call(tc) for tc in message.tool_calls]
return assistant_result
else:
raise ValueError(f'Unknown message type: {type(message)}') | function_complex | 0 | {"cognitive_complexity": 16, "loc": 43, "code_loc": 30, "docstring_loc": 1, "function_name": "serialize", "class_name": "GroqMessageSerializer", "qualname": "GroqMessageSerializer.serialize", "file_path": "browser_use/llm/groq/serializer.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/test_dataset_limits.py:test_limit_no_redundant_read | # Context:
import sys
import pandas as pd
import pyarrow as pa
import pytest
import ray
from ray.data.block import BlockMetadata
from ray.data.datasource.datasource import Datasource, ReadTask
def test_limit_execution(ray_start_regular): ...
def test_limit(ray_start_regular_shared, lazy): ...
def test_limit_no_num_row_info(ray_start_regular_shared): ...
def test_per_task_row_limit_basic(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_with_custom_readtask(ray_start_regular_shared): ...
def test_per_task_row_limit_multiple_blocks_per_task(ray_start_regular_shared): ...
def test_per_task_row_limit_larger_than_data(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_exact_block_boundary(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_various_sizes(ray_start_regular_shared, limit, restore_data_context): ...
def test_per_task_row_limit_with_transformations(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_with_filter(ray_start_regular_shared, restore_data_context): ...
def test_per_task_row_limit_readtask_properties(ray_start_regular_shared): ...
def test_per_task_row_limit_edge_cases(ray_start_regular_shared, restore_data_context): ...
# Task:
Write a Python test function `test_limit_no_redundant_read` to verify the behavior of `limit_no_redundant_read`.
Module under test: ray.data.block, ray.data.context, ray.data.datasource.datasource | def test_limit_no_redundant_read(
ray_start_regular_shared,
limit,
):
# Test that dataset truncation eliminates redundant reads.
@ray.remote
class Counter:
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
def get(self):
return self.count
def reset(self):
self.count = 0
class CountingRangeDatasource(Datasource):
def __init__(self):
self.counter = Counter.remote()
def prepare_read(self, parallelism, n):
def range_(i):
ray.get(self.counter.increment.remote())
return [
pd.DataFrame({"id": range(parallelism * i, parallelism * i + n)})
]
return [
ReadTask(
lambda i=i: range_(i),
BlockMetadata(
num_rows=n,
size_bytes=sum(
sys.getsizeof(i)
for i in range(parallelism * i, parallelism * i + n)
),
input_files=None,
exec_stats=None,
),
schema=pa.lib.Schema.from_pandas(pd.DataFrame({"id": []})),
)
for i in range(parallelism)
]
source = CountingRangeDatasource()
total_rows = 1000
override_num_blocks = 100
ds = ray.data.read_datasource(
source,
override_num_blocks=override_num_blocks,
n=total_rows // override_num_blocks,
)
# Apply multiple limit ops.
# Once the smallest limit is reached, the entire dataset should stop execution.
ds = ds.limit(total_rows)
ds = ds.limit(limit)
ds = ds.limit(total_rows)
# Check content.
assert len(ds.take(limit)) == limit
# Check number of read tasks launched.
# min_read_tasks is the minimum number of read tasks needed for the limit.
# We may launch more tasks than this number, in order to to maximize throughput.
# But the actual number of read tasks should be less than the parallelism.
count = ray.get(source.counter.get.remote())
min_read_tasks = limit // (total_rows // override_num_blocks)
assert min_read_tasks <= count < override_num_blocks | test | 0 | {"function_name": "test_limit_no_redundant_read", "class_name": null, "qualname": "test_limit_no_redundant_read", "file_path": "python/ray/data/tests/test_dataset_limits.py", "repo_id": "ray-project/ray", "loc": 70, "tested_modules": ["ray.data.block", "ray.data.context", "ray.data.datasource.datasource", "ray.data.tests.conftest", "ray.data.tests.conftest"], "has_docstring": false, "runnable_level": "project_runnable"} |
commaai/openpilot:selfdrive/ui/mici/onroad/torque_bar.py:arc_bar_pts | # Context:
import math
import time
import numpy as np
import pyray as rl
def quantized_lru_cache(maxsize): ...
class TorqueBar(Widget): ...
# Task:
Write a Python function `arc_bar_pts` to return Nx2 np.float32 points for a single closed polygon (rounded thick arc).
Parameters: cx: float, cy: float, r_mid: float, thickness: float, a0_deg: float, a1_deg: float
Returns: np.ndarray | def arc_bar_pts(cx: float, cy: float,
r_mid: float, thickness: float,
a0_deg: float, a1_deg: float,
*, max_points: int = 100, cap_segs: int = 10,
cap_radius: float = 7, px_per_seg: float = 2.0) -> np.ndarray:
"""Return Nx2 np.float32 points for a single closed polygon (rounded thick arc)."""
def get_cap(left: bool, a_deg: float):
# end cap at a1: center (a1), sweep a1→a1+180 (skip endpoints to avoid dupes)
# quarter arc (outer corner) at a1 with fixed pixel radius cap_radius
nx, ny = math.cos(math.radians(a_deg)), math.sin(math.radians(a_deg)) # outward normal
tx, ty = -ny, nx # tangent (CCW)
mx, my = cx + nx * r_mid, cy + ny * r_mid # mid-point at a1
if DEBUG:
rl.draw_circle(int(mx), int(my), 4, rl.PURPLE)
ex = mx + nx * (half - cap_radius)
ey = my + ny * (half - cap_radius)
if DEBUG:
rl.draw_circle(int(ex), int(ey), 2, rl.WHITE)
# sweep 90° in the local (t,n) frame: from outer edge toward inside
if not left:
alpha = np.deg2rad(np.linspace(90, 0, cap_segs + 2))[1:-1]
else:
alpha = np.deg2rad(np.linspace(180, 90, cap_segs + 2))[1:-1]
cap_end = np.c_[ex + np.cos(alpha) * cap_radius * tx + np.sin(alpha) * cap_radius * nx,
ey + np.cos(alpha) * cap_radius * ty + np.sin(alpha) * cap_radius * ny]
# bottom quarter (inner corner) at a1
ex2 = mx + nx * (-half + cap_radius)
ey2 = my + ny * (-half + cap_radius)
if DEBUG:
rl.draw_circle(int(ex2), int(ey2), 2, rl.WHITE)
if not left:
alpha2 = np.deg2rad(np.linspace(0, -90, cap_segs + 1))[:-1] # include 0 once, exclude -90
else:
alpha2 = np.deg2rad(np.linspace(90 - 90 - 90, 0 - 90 - 90, cap_segs + 1))[:-1]
cap_end_bot = np.c_[ex2 + np.cos(alpha2) * cap_radius * tx + np.sin(alpha2) * cap_radius * nx,
ey2 + np.cos(alpha2) * cap_radius * ty + np.sin(alpha2) * cap_radius * ny]
# append to the top quarter
if not left:
cap_end = np.vstack((cap_end, cap_end_bot))
else:
cap_end = np.vstack((cap_end_bot, cap_end))
return cap_end
if a1_deg < a0_deg:
a0_deg, a1_deg = a1_deg, a0_deg
half = thickness * 0.5
cap_radius = min(cap_radius, half)
span = max(1e-3, a1_deg - a0_deg)
# pick arc segment count from arc length, clamp to shader points[] budget
arc_len = r_mid * math.radians(span)
arc_segs = max(6, int(arc_len / px_per_seg))
max_arc = (max_points - (4 * cap_segs + 3)) // 2
arc_segs = max(6, min(arc_segs, max_arc))
# outer arc a0→a1
ang_o = np.deg2rad(np.linspace(a0_deg, a1_deg, arc_segs + 1))
outer = np.c_[cx + np.cos(ang_o) * (r_mid + half),
cy + np.sin(ang_o) * (r_mid + half)]
# end cap at a1
cap_end = get_cap(False, a1_deg)
# inner arc a1→a0
ang_i = np.deg2rad(np.linspace(a1_deg, a0_deg, arc_segs + 1))
inner = np.c_[cx + np.cos(ang_i) * (r_mid - half),
cy + np.sin(ang_i) * (r_mid - half)]
# start cap at a0
cap_start = get_cap(True, a0_deg)
pts = np.vstack((outer, cap_end, inner, cap_start, outer[:1])).astype(np.float32)
# Rotate to start from middle of cap for proper triangulation
pts = np.roll(pts, cap_segs, axis=0)
if DEBUG:
n = len(pts)
idx = int(time.monotonic() * 12) % max(1, n) # speed: 12 pts/sec
for i, (x, y) in enumerate(pts):
j = (i - idx) % n # rotate the gradient
t = j / n
color = rl.Color(255, int(255 * (1 - t)), int(255 * t), 255)
rl.draw_circle(int(x), int(y), 2, color)
return pts | function_complex | 0 | {"cognitive_complexity": 19, "loc": 98, "code_loc": 59, "docstring_loc": 1, "function_name": "arc_bar_pts", "class_name": null, "qualname": "arc_bar_pts", "file_path": "selfdrive/ui/mici/onroad/torque_bar.py", "repo_id": "commaai/openpilot", "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/lfx/tests/unit/run/test_base.py:TestRunFlowPythonScript.test_no_graph_variable_raises_error | # Context:
import pytest
from lfx.run.base import RunError, output_error, run_flow
class TestRunError: ...
class TestOutputError: ...
class TestRunFlowInputValidation: ...
class TestRunFlowFileValidation: ...
class TestRunFlowJsonInput: ...
class TestRunFlowStdinInput: ...
class TestRunFlowGlobalVariables: ...
class TestRunFlowOutputFormats: ...
class TestRunFlowTiming: ...
class TestRunFlowVerbosity: ...
class TestRunFlowVariableValidation: ...
class TestRunFlowInputValueHandling: ...
class TestRunFlowJsonFileExecution: ...
class TestRunFlowEnvironmentIntegration: ...
class TestRunFlowExecutionErrors: ...
class TestRunFlowPythonScript:
def valid_script(self, tmp_path): ...
def no_graph_script(self, tmp_path): ...
# Task:
Write a Python test method `test_no_graph_variable_raises_error` in test class `TestRunFlowPythonScript` to test that a script without graph variable raises RunError.
Module under test: io, lfx.run.base | async def test_no_graph_variable_raises_error(self, no_graph_script):
"""Test that a script without graph variable raises RunError."""
with pytest.raises(RunError) as exc_info:
await run_flow(script_path=no_graph_script)
assert "No 'graph' variable found" in str(exc_info.value) | test | 1 | {"function_name": "test_no_graph_variable_raises_error", "class_name": "TestRunFlowPythonScript", "qualname": "TestRunFlowPythonScript.test_no_graph_variable_raises_error", "file_path": "src/lfx/tests/unit/run/test_base.py", "repo_id": "langflow-ai/langflow", "loc": 5, "tested_modules": ["io", "lfx.run.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/skills/service.py:SkillService.__init__ | # Context:
import os
from typing import Any, Literal
from browser_use_sdk import AsyncBrowserUse
from browser_use.skills.views import (
MissingCookieException,
Skill,
)
class SkillService:
async def async_init(self) -> None: ...
async def get_skill(self, skill_id: str) -> Skill | None: ...
async def get_all_skills(self) -> list[Skill]: ...
async def execute_skill(self, skill_id: str, parameters: dict[str, Any] | BaseModel, cookies: list[Cookie]) -> ExecuteSkillResponse: ...
async def close(self) -> None: ...
# Task:
Write a Python method `__init__` for the class `SkillService` to initialize the skills service.
Parameters: skill_ids: list[str | Literal['*']], api_key: str | None | def __init__(self, skill_ids: list[str | Literal['*']], api_key: str | None = None):
"""Initialize the skills service
Args:
skill_ids: List of skill IDs to fetch and cache, or ['*'] to fetch all available skills
api_key: Browser Use API key (optional, will use env var if not provided)
"""
self.skill_ids = skill_ids
self.api_key = api_key or os.getenv('BROWSER_USE_API_KEY') or ''
if not self.api_key:
raise ValueError('BROWSER_USE_API_KEY environment variable is not set')
self._skills: dict[str, Skill] = {}
self._client: AsyncBrowserUse | None = None
self._initialized = False | function_simple | 0 | {"cognitive_complexity": 2, "loc": 16, "code_loc": 7, "docstring_loc": 6, "function_name": "__init__", "class_name": "SkillService", "qualname": "SkillService.__init__", "file_path": "browser_use/skills/service.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
unclecode/crawl4ai:docs/examples/link_head_extraction_example.py:module_doc | Write a module-level docstring for the Python module `link_head_extraction_example` which contains various utilities. | Link Head Extraction & Scoring Example
This example demonstrates Crawl4AI's advanced link analysis capabilities:
1. Basic link head extraction
2. Three-layer scoring system (intrinsic, contextual, total)
3. Pattern-based filtering
4. Multiple practical use cases
Requirements:
- crawl4ai installed
- Internet connection
Usage:
python link_head_extraction_example.py | documentation | 1 | {"doc_type": "module", "module_name": "link_head_extraction_example", "file_path": "docs/examples/link_head_extraction_example.py", "repo_id": "unclecode/crawl4ai", "char_length": 372} |
ray-project/ray:python/ray/train/v2/tests/test_config.py:test_storage_filesystem_repr | # Context:
from ray.train import RunConfig, ScalingConfig
def test_scaling_config_validation(): ...
def test_scaling_config_accelerator_type(): ...
def test_scaling_config_tpu_min_workers_multiple(): ...
def test_scaling_config_default_workers(): ...
# Task:
Write a Python test function `test_storage_filesystem_repr` to test for https://github.com/ray-project/ray/pull/40851.
Module under test: ray.train | def test_storage_filesystem_repr():
"""Test for https://github.com/ray-project/ray/pull/40851"""
config = RunConfig(storage_filesystem=pyarrow.fs.S3FileSystem())
repr(config) | test | 0 | {"function_name": "test_storage_filesystem_repr", "class_name": null, "qualname": "test_storage_filesystem_repr", "file_path": "python/ray/train/v2/tests/test_config.py", "repo_id": "ray-project/ray", "loc": 4, "tested_modules": ["ray.train"], "has_docstring": true, "runnable_level": "file_runnable"} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.