sample_id
string
instruction
string
completion
string
category
string
label
int64
metadata
string
ray-project/ray:python/ray/data/tests/test_dataset_limits.py:test_per_task_row_limit_with_custom_readtask
# Context: import pandas as pd import pyarrow as pa from ray.data.block import BlockMetadata from ray.data.datasource.datasource import Datasource, ReadTask def test_limit_execution(ray_start_regular): ... def test_limit(ray_start_regular_shared, lazy): ... def test_limit_no_redundant_read(ray_start_regular_shared, li...
def test_per_task_row_limit_with_custom_readtask(ray_start_regular_shared): """Test per-block limiting directly with ReadTask implementation.""" def read_data_with_limit(): # This simulates a ReadTask that reads 200 rows return [pd.DataFrame({"id": range(200)})] # Create ReadTask with per-...
test
0
{"function_name": "test_per_task_row_limit_with_custom_readtask", "class_name": null, "qualname": "test_per_task_row_limit_with_custom_readtask", "file_path": "python/ray/data/tests/test_dataset_limits.py", "repo_id": "ray-project/ray", "loc": 24, "tested_modules": ["ray.data.block", "ray.data.context", "ray.data.datas...
crewAIInc/crewAI:lib/crewai/tests/llms/openai/test_openai.py:test_extra_arguments_are_passed_to_openai_completion
# Context: from unittest.mock import patch, MagicMock from crewai.llm import LLM def test_openai_completion_is_used_when_openai_provider(): ... def test_openai_completion_is_used_when_no_provider_prefix(): ... def test_openai_is_default_provider_without_explicit_llm_set_on_agent(): ... def test_openai_completion_modul...
def test_extra_arguments_are_passed_to_openai_completion(): """ Test that extra arguments are passed to OpenAICompletion """ llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=1000, top_p=0.5, max_retries=3) with patch.object(llm.client.chat.completions, 'create') as mock_create: mock_cr...
test
0
{"function_name": "test_extra_arguments_are_passed_to_openai_completion", "class_name": null, "qualname": "test_extra_arguments_are_passed_to_openai_completion", "file_path": "lib/crewai/tests/llms/openai/test_openai.py", "repo_id": "crewAIInc/crewAI", "loc": 21, "tested_modules": ["typing", "crewai.llm", "crewai.llms....
Zie619/n8n-workflows:src/user_management.py:update_user
# Context: from fastapi import FastAPI, HTTPException, Depends, status class User(BaseModel): ... class UserCreate(BaseModel): ... class UserLogin(BaseModel): ... class UserUpdate(BaseModel): ... class Token(BaseModel): ... class UserManager: ... def get_current_user(credentials: HTTPAuthorizationCredentials) -> User:...
async def update_user( user_id: int, update_data: UserUpdate, current_user: User = Depends(get_current_user), ): """Update user data.""" # Users can only update their own profile unless they're admin if current_user.id != user_id and current_user.role != "admin": raise HTTPException(stat...
function_simple
0
{"cognitive_complexity": 5, "loc": 19, "code_loc": 8, "docstring_loc": 1, "function_name": "update_user", "class_name": null, "qualname": "update_user", "file_path": "src/user_management.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:tests/models/dia/test_modeling_dia.py:DiaModelTest.test_sdpa_can_dispatch_composite_models
# Context: import tempfile from transformers import ( DiaForConditionalGeneration, DiaModel, DiaProcessor, PreTrainedConfig, PreTrainedModel, ) class DiaModelTester: ... class DiaForConditionalGenerationIntegrationTest(unittest.TestCase): ... class DiaModelTest(ModelTesterM...
def test_sdpa_can_dispatch_composite_models(self): """ Overwritten as it relies on hardcoded namings atm - checking for our case here specifically """ for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() m...
test
0
{"function_name": "test_sdpa_can_dispatch_composite_models", "class_name": "DiaModelTest", "qualname": "DiaModelTest.test_sdpa_can_dispatch_composite_models", "file_path": "tests/models/dia/test_modeling_dia.py", "repo_id": "huggingface/transformers", "loc": 32, "tested_modules": ["transformers.models.dia", "transforme...
Zie619/n8n-workflows:src/performance_monitor.py:PerformanceMonitor._check_alerts
# Context: class PerformanceMetrics(BaseModel): ... class Alert(BaseModel): ... async def get_current_metrics(): ... async def get_historical_metrics(hours: int): ... async def get_alerts(): ... async def resolve_alert(alert_id: str): ... async def websocket_endpoint(websocket: WebSocket): ... async def get_monitoring...
def _check_alerts(self, metrics: PerformanceMetrics): """Check metrics against alert thresholds.""" # CPU alert if metrics.cpu_usage > 80: self._create_alert( "high_cpu", "warning", f"High CPU usage: {metrics.cpu_usage}%" ) # Memory alert ...
function_complex
0
{"cognitive_complexity": 7, "loc": 34, "code_loc": 23, "docstring_loc": 1, "function_name": "_check_alerts", "class_name": "PerformanceMonitor", "qualname": "PerformanceMonitor._check_alerts", "file_path": "src/performance_monitor.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "file_ru...
vllm-project/vllm:tests/lora/test_gptoss_tp.py:test_gpt_oss_lora
# Context: import pytest import vllm def generate_and_test(llm: vllm.LLM, lora_path: str, lora_id: int) -> None: ... def test_gpt_oss_lora_tp2(monkeypatch: pytest.MonkeyPatch, gptoss20b_lora_files, fully_sharded_loras, mxfp4_use_marlin): ... # Task: Write a Python test function `test_gpt_oss_lora` to verify the behav...
def test_gpt_oss_lora( monkeypatch: pytest.MonkeyPatch, gptoss20b_lora_files, mxfp4_use_marlin ): with monkeypatch.context() as m: m.setenv("VLLM_MXFP4_USE_MARLIN", "1" if mxfp4_use_marlin else "0") llm = vllm.LLM( MODEL_PATH, max_model_len=1024, enable_lora=T...
test
1
{"function_name": "test_gpt_oss_lora", "class_name": null, "qualname": "test_gpt_oss_lora", "file_path": "tests/lora/test_gptoss_tp.py", "repo_id": "vllm-project/vllm", "loc": 20, "tested_modules": ["vllm.lora.request", "utils"], "has_docstring": false, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/utils/torch_utils.py:guard_cuda_initialization
# Context: import contextlib import os from vllm.platforms import current_platform def is_strictly_contiguous(t: torch.Tensor) -> bool: ... def set_default_torch_dtype(dtype: torch.dtype): ... def set_default_torch_num_threads(num_threads: int | None): ... def get_dtype_size(dtype: torch.dtype) -> int: ... def _get_pr...
def guard_cuda_initialization(): """Avoid unexpected CUDA initialization.""" from vllm.platforms import current_platform if not current_platform.is_cuda(): yield return old_value = os.environ.get("CUDA_VISIBLE_DEVICES") os.environ["CUDA_VISIBLE_DEVICES"] = "" try: yield...
function_complex
1
{"cognitive_complexity": 8, "loc": 23, "code_loc": 19, "docstring_loc": 1, "function_name": "guard_cuda_initialization", "class_name": null, "qualname": "guard_cuda_initialization", "file_path": "vllm/utils/torch_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_call_limit.py:test_thread_limit_with_create_agent
# Context: from langchain_core.messages import AIMessage, HumanMessage, ToolMessage from langgraph.checkpoint.memory import InMemorySaver from langchain.agents.factory import create_agent from langchain.agents.middleware.model_call_limit import ( ModelCallLimitExceededError, ModelCallLimitMiddleware, ModelC...
def test_thread_limit_with_create_agent() -> None: """Test that thread limits work correctly with create_agent.""" model = FakeToolCallingModel() # Set thread limit to 1 (should be exceeded after 1 call) agent = create_agent( model=model, tools=[simple_tool], middleware=[ModelCa...
test
1
{"function_name": "test_thread_limit_with_create_agent", "class_name": null, "qualname": "test_thread_limit_with_create_agent", "file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_call_limit.py", "repo_id": "langchain-ai/langchain", "loc": 36, "tested_modules": ["langchain_core...
apache/airflow:providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/base_alibaba.py:AlibabaBaseHook._get_field
# Context: from typing import Any, NamedTuple class AccessKeyCredentials(NamedTuple): ... class AlibabaBaseHook(BaseHook): conn_name_attr = "alibabacloud_conn_id" default_conn_name = "alibabacloud_default" conn_type = "alibaba_cloud" hook_name = "Alibaba Cloud" def __init__( self, ...
def _get_field(self, field_name: str, default: Any = None) -> Any: """Fetch a field from extras, and returns it.""" value = self.extras.get(field_name) return value if value is not None else default
function_simple
1
{"cognitive_complexity": 1, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "_get_field", "class_name": "AlibabaBaseHook", "qualname": "AlibabaBaseHook._get_field", "file_path": "providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/base_alibaba.py", "repo_id": "apache/airflow", "has_docstring": tru...
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-solr/llama_index/readers/solr/base.py:SolrReader.load_data
# Context: from typing import Any, Optional from llama_index.core.schema import Document class SolrReader(BasePydanticReader): def __init__( self, endpoint: str, ): """Initialize with parameters.""" super().__init__(endpoint=endpoint) self._client = pysolr.Solr(endpoint)...
def load_data( self, query: dict[str, Any], field: str, id_field: str = "id", metadata_fields: Optional[list[str]] = None, embedding: Optional[str] = None, ) -> list[Document]: r""" Read data from the Solr index. At least one field argument must be spe...
function_complex
1
{"cognitive_complexity": 12, "loc": 68, "code_loc": 34, "docstring_loc": 22, "function_name": "load_data", "class_name": "SolrReader", "qualname": "SolrReader.load_data", "file_path": "llama-index-integrations/readers/llama-index-readers-solr/llama_index/readers/solr/base.py", "repo_id": "run-llama/llama_index", "has_d...
crewAIInc/crewAI:lib/crewai/tests/hooks/test_human_approval.py:TestToolHookHumanInput.test_request_human_input_returns_user_response
# Context: from unittest.mock import Mock, patch from crewai.hooks.tool_hooks import ToolCallHookContext def mock_executor(): ... def mock_tool(): ... def mock_agent(): ... def mock_task(): ... class TestLLMHookHumanInput: ... class TestApprovalHookIntegration: ... class TestCostControlApproval: ... class TestToolHoo...
def test_request_human_input_returns_user_response( self, mock_event_listener, mock_input, mock_tool, mock_agent, mock_task ): """Test that request_human_input returns the user's input.""" mock_formatter = Mock() mock_event_listener.formatter = mock_formatter context = ToolC...
test
0
{"function_name": "test_request_human_input_returns_user_response", "class_name": "TestToolHookHumanInput", "qualname": "TestToolHookHumanInput.test_request_human_input_returns_user_response", "file_path": "lib/crewai/tests/hooks/test_human_approval.py", "repo_id": "crewAIInc/crewAI", "loc": 21, "tested_modules": ["__f...
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-anthropic/tests/test_anthropic_utils.py:TestAnthropicPromptCachingSupport.test_claude_3_5_sonnet_supported
# Context: from llama_index.llms.anthropic.utils import ( is_anthropic_prompt_caching_supported_model, ANTHROPIC_PROMPT_CACHING_SUPPORTED_MODELS, update_tool_calls, is_anthropic_structured_output_supported, STRUCTURED_OUTPUT_SUPPORT, messages_to_anthropic_beta_messages, ) def test_update_tool_c...
def test_claude_3_5_sonnet_supported(self): """Test Claude 3.5 Sonnet models support prompt caching.""" assert is_anthropic_prompt_caching_supported_model("claude-3-5-sonnet-20241022") assert is_anthropic_prompt_caching_supported_model("claude-3-5-sonnet-20240620") assert is_anthropic_pr...
test
1
{"function_name": "test_claude_3_5_sonnet_supported", "class_name": "TestAnthropicPromptCachingSupport", "qualname": "TestAnthropicPromptCachingSupport.test_claude_3_5_sonnet_supported", "file_path": "llama-index-integrations/llms/llama-index-llms-anthropic/tests/test_anthropic_utils.py", "repo_id": "run-llama/llama_in...
crewAIInc/crewAI:lib/crewai/tests/test_human_feedback_decorator.py:TestHumanFeedbackResult.test_result_creation
# Context: from datetime import datetime from crewai.flow.human_feedback import ( HumanFeedbackConfig, HumanFeedbackResult, ) class TestHumanFeedbackValidation: ... class TestHumanFeedbackConfig: ... class TestDecoratorAttributePreservation: ... class TestAsyncSupport: ... class TestHumanFeedbackExecution: ......
def test_result_creation(self): """Test HumanFeedbackResult can be created with all fields.""" result = HumanFeedbackResult( output={"title": "Test"}, feedback="Looks good", outcome="approved", method_name="test_method", ) assert result.ou...
test
0
{"function_name": "test_result_creation", "class_name": "TestHumanFeedbackResult", "qualname": "TestHumanFeedbackResult.test_result_creation", "file_path": "lib/crewai/tests/test_human_feedback_decorator.py", "repo_id": "crewAIInc/crewAI", "loc": 15, "tested_modules": ["__future__", "datetime", "typing", "crewai.flow",...
crewAIInc/crewAI:lib/crewai/src/crewai/llms/base_llm.py:BaseLLM._apply_stop_words
# Context: import logging def llm_call_context() -> Generator[str, None, None]: ... def get_current_call_id() -> str: ... class BaseLLM(ABC): def __init__( self, model: str, temperature: float | None = None, api_key: str | None = None, base_url: str | None = None, p...
def _apply_stop_words(self, content: str) -> str: """Apply stop words to truncate response content. This method provides consistent stop word behavior across all native SDK providers. Native providers should call this method to post-process their responses. Args: content: T...
function_complex
0
{"cognitive_complexity": 7, "loc": 42, "code_loc": 16, "docstring_loc": 19, "function_name": "_apply_stop_words", "class_name": "BaseLLM", "qualname": "BaseLLM._apply_stop_words", "file_path": "lib/crewai/src/crewai/llms/base_llm.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "class_runnab...
exo-explore/exo:src/exo/utils/keyed_backoff.py:KeyedBackoff.reset
# Context: class KeyedBackoff(Generic[K]): def __init__(self, base: float = 0.5, cap: float = 10.0): self._base = base self._cap = cap self._attempts: dict[K, int] = {} self._last_time: dict[K, float] = {} def should_proceed(self, key: K) -> bool: ... def record_attempt(self...
def reset(self, key: K) -> None: """Reset backoff state for a key (e.g., on success).""" self._attempts.pop(key, None) self._last_time.pop(key, None)
function_simple
0
{"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "reset", "class_name": "KeyedBackoff", "qualname": "KeyedBackoff.reset", "file_path": "src/exo/utils/keyed_backoff.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "file_runnable"}
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/social_media_router.py:read_top_users
# Context: from fastapi import APIRouter, Query from typing import List, Optional, Dict, Any from services.social_media_service import social_media_service async def read_posts(page: int, per_page: int, platform: Optional[str], user_handle: Optional[str], sentiment: Optional[str], category: Optional[str], date_from: O...
async def read_top_users( platform: Optional[str] = Query(None, description="Filter by platform"), limit: int = Query(10, ge=1, le=50, description="Number of top users to return"), date_from: Optional[str] = Query(None, description="Filter by start date (format: YYYY-MM-DD)"), date_to: Optional[str] = Q...
function_simple
0
{"cognitive_complexity": 0, "loc": 8, "code_loc": 1, "docstring_loc": 1, "function_name": "read_top_users", "class_name": null, "qualname": "read_top_users", "file_path": "advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/social_media_router.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", ...
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/model_retry.py:ModelRetryMiddleware._handle_failure
# Context: from langchain_core.messages import AIMessage from langchain.agents.middleware.types import ( AgentMiddleware, AgentState, ContextT, ModelRequest, ModelResponse, ResponseT, ) class ModelRetryMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]): def __init__( ...
def _handle_failure(self, exc: Exception, attempts_made: int) -> ModelResponse[ResponseT]: """Handle failure when all retries are exhausted. Args: exc: The exception that caused the failure. attempts_made: Number of attempts actually made. Returns: `ModelRes...
function_simple
1
{"cognitive_complexity": 3, "loc": 23, "code_loc": 8, "docstring_loc": 12, "function_name": "_handle_failure", "class_name": "ModelRetryMiddleware", "qualname": "ModelRetryMiddleware._handle_failure", "file_path": "libs/langchain_v1/langchain/agents/middleware/model_retry.py", "repo_id": "langchain-ai/langchain", "has_...
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_async_event_bus.py:test_aemit_with_async_handlers
# Context: import asyncio import pytest from crewai.events.base_events import BaseEvent from crewai.events.event_bus import crewai_event_bus class AsyncTestEvent(BaseEvent): ... async def test_async_handler_execution(): ... async def test_multiple_async_handlers(): ... async def test_mixed_sync_and_async_handlers(): ....
async def test_aemit_with_async_handlers(): received_events = [] with crewai_event_bus.scoped_handlers(): @crewai_event_bus.on(AsyncTestEvent) async def async_handler(source: object, event: BaseEvent) -> None: await asyncio.sleep(0.01) received_events.append(event) ...
test
0
{"function_name": "test_aemit_with_async_handlers", "class_name": null, "qualname": "test_aemit_with_async_handlers", "file_path": "lib/crewai/tests/utilities/events/test_async_event_bus.py", "repo_id": "crewAIInc/crewAI", "loc": 15, "tested_modules": ["crewai.events.base_events", "crewai.events.event_bus"], "has_docst...
Zie619/n8n-workflows:src/analytics_engine.py:WorkflowAnalytics.get_workflow_analytics
# Context: from typing import List, Dict, Any import json from datetime import datetime from collections import Counter, defaultdict class AnalyticsResponse(BaseModel): ... async def get_analytics_overview(): ... async def get_trend_analysis(days: int): ... async def get_usage_insights(): ... async def get_analytics_d...
def get_workflow_analytics(self) -> Dict[str, Any]: """Get comprehensive workflow analytics.""" conn = self.get_db_connection() # Basic statistics cursor = conn.execute("SELECT COUNT(*) as total FROM workflows") total_workflows = cursor.fetchone()["total"] cursor = conn...
function_simple
0
{"cognitive_complexity": 3, "loc": 92, "code_loc": 72, "docstring_loc": 1, "function_name": "get_workflow_analytics", "class_name": "WorkflowAnalytics", "qualname": "WorkflowAnalytics.get_workflow_analytics", "file_path": "src/analytics_engine.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_lev...
browser-use/browser-use:examples/use-cases/onepassword.py:fill_field
# Context: from browser_use import ActionResult, Agent, Browser, ChatOpenAI, Tools from browser_use.browser.session import BrowserSession async def main(): ... # Task: Write a Python async function `fill_field` to fills in a specific field for a website using the value from 1Password. Parameters: vault_name: str, it...
async def fill_field(vault_name: str, item_name: str, field_name: str, browser_session: BrowserSession): """ Fills in a specific field for a website using the value from 1Password. Note: Use blur_page before calling this if you want visual security. """ try: # Resolve field value from 1Password field_va...
function_simple
0
{"cognitive_complexity": 1, "loc": 21, "code_loc": 10, "docstring_loc": 4, "function_name": "fill_field", "class_name": null, "qualname": "fill_field", "file_path": "examples/use-cases/onepassword.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/tracing/otel.py:instrument_otel
# Context: import functools import inspect from vllm.tracing.utils import TRACE_HEADERS, LoadingSpanAttributes from opentelemetry import trace def is_otel_available() -> bool: ... def init_otel_tracer(instrumenting_module_name: str, otlp_traces_endpoint: str, extra_attributes: dict[str, str] | None) -> Tracer: ... def...
def instrument_otel(func, span_name, attributes, record_exception): """Internal wrapper logic for sync and async functions.""" # Pre-calculate static code attributes once (these don't change) code_attrs = { LoadingSpanAttributes.CODE_FUNCTION: func.__qualname__, LoadingSpanAttributes.CODE_N...
function_simple
1
{"cognitive_complexity": 3, "loc": 47, "code_loc": 39, "docstring_loc": 1, "function_name": "instrument_otel", "class_name": null, "qualname": "instrument_otel", "file_path": "vllm/tracing/otel.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/tool_selection.py:LLMToolSelectorMiddleware:class_doc
Write a class-level docstring for `LLMToolSelectorMiddleware` (inherits from AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]) which has methods: `__init__`, `_prepare_selection_request`, `_process_selection_response`, `wrap_model_call`, `awrap_model_call`.
Uses an LLM to select relevant tools before calling the main model. When an agent has many tools available, this middleware filters them down to only the most relevant ones for the user's query. This reduces token usage and helps the main model focus on the right tools. Examples: !!! example "Limit to 3 tools" ...
documentation
1
{"doc_type": "class", "class_name": "LLMToolSelectorMiddleware", "file_path": "libs/langchain_v1/langchain/agents/middleware/tool_selection.py", "repo_id": "langchain-ai/langchain", "char_length": 823, "methods": ["__init__", "_prepare_selection_request", "_process_selection_response", "wrap_model_call", "awrap_model_c...
ray-project/ray:python/ray/data/tests/unit/expressions/test_predicate.py:TestIsIn.test_is_in_structural_equality
# Context: from ray.data.expressions import BinaryExpr, Operation, UnaryExpr, col, lit class TestIsNull: ... class TestIsNotNull: ... class TestNullPredicateCombinations: ... class TestNotIn: ... class TestMembershipWithNulls: ... class TestMembershipCombinations: ... class TestIsIn: def sample_data(self): ... ...
def test_is_in_structural_equality(self): """Test structural equality for is_in expressions.""" expr1 = col("status").is_in(["active", "pending"]) expr2 = col("status").is_in(["active", "pending"]) expr3 = col("status").is_in(["active"]) assert expr1.structurally_equals(expr2) ...
test
0
{"function_name": "test_is_in_structural_equality", "class_name": "TestIsIn", "qualname": "TestIsIn.test_is_in_structural_equality", "file_path": "python/ray/data/tests/unit/expressions/test_predicate.py", "repo_id": "ray-project/ray", "loc": 8, "tested_modules": ["ray.data._internal.planner.plan_expression.expression_...
huggingface/transformers:tests/models/ministral/test_modeling_ministral.py:MinistralIntegrationTest.test_model_8b_logits
# Context: import gc from transformers.testing_utils import ( backend_empty_cache, cleanup, require_bitsandbytes, require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device, ) import torch from transformers import ( AutoModelForCausalLM, MinistralFor...
def test_model_8b_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = AutoModelForCausalLM.from_pretrained("mistralai/Ministral-8B-Instruct-2410", device_map="auto") assert isinstance(model, MinistralForCausalLM) input_ids = torch.tensor([input_ids]).to(model.mode...
test
0
{"function_name": "test_model_8b_logits", "class_name": "MinistralIntegrationTest", "qualname": "MinistralIntegrationTest.test_model_8b_logits", "file_path": "tests/models/ministral/test_modeling_ministral.py", "repo_id": "huggingface/transformers", "loc": 20, "tested_modules": ["transformers", "transformers.testing_ut...
666ghj/BettaFish:MindSpider/DeepSentimentCrawling/keyword_manager.py:KeywordManager.__init__
# Context: from sqlalchemy.engine import Engine class KeywordManager: def connect(self): ... def get_latest_keywords(self, target_date: date, max_keywords: int) -> List[str]: ... def get_daily_topics(self, extract_date: date) -> Optional[Dict]: ... def get_recent_topics(self, days: int) -> List[Dict]: ...
def __init__(self): """初始化关键词管理器""" self.engine: Engine = None self.connect()
function_simple
1
{"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "__init__", "class_name": "KeywordManager", "qualname": "KeywordManager.__init__", "file_path": "MindSpider/DeepSentimentCrawling/keyword_manager.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "class_...
crewAIInc/crewAI:lib/devtools/src/crewai_devtools/cli.py:translate_release_notes
# Context: from openai import OpenAI from crewai_devtools.prompts import RELEASE_NOTES_PROMPT, TRANSLATE_RELEASE_NOTES_PROMPT def run_command(cmd: list[str], cwd: Path | None) -> str: ... def check_gh_installed() -> None: ... def check_git_clean() -> None: ... def update_version_in_file(file_path: Path, new_version: s...
def translate_release_notes( release_notes: str, lang: str, client: OpenAI, ) -> str: """Translate release notes into the target language using OpenAI. Args: release_notes: English release notes markdown. lang: Language code (e.g., "pt-BR", "ko"). client: OpenAI client insta...
function_simple
0
{"cognitive_complexity": 3, "loc": 43, "code_loc": 26, "docstring_loc": 10, "function_name": "translate_release_notes", "class_name": null, "qualname": "translate_release_notes", "file_path": "lib/devtools/src/crewai_devtools/cli.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runn...
ray-project/ray:python/ray/data/tests/test_repartition_e2e.py:test_key_based_repartition_shuffle
# Context: import numpy as np import ray from ray.data.context import DataContext, ShuffleStrategy def test_repartition_shuffle(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension): ... def test_repartition_noshuffle(ray_start_regular_shared_2_cpus, disable_fallback_to_object_extension): ... def test...
def test_key_based_repartition_shuffle( ray_start_regular_shared_2_cpus, restore_data_context, disable_fallback_to_object_extension, ): context = DataContext.get_current() context.shuffle_strategy = ShuffleStrategy.HASH_SHUFFLE context.hash_shuffle_operator_actor_num_cpus_override = 0.001 ...
test
0
{"function_name": "test_key_based_repartition_shuffle", "class_name": null, "qualname": "test_key_based_repartition_shuffle", "file_path": "python/ray/data/tests/test_repartition_e2e.py", "repo_id": "ray-project/ray", "loc": 32, "tested_modules": ["ray.data._internal.logical.optimizers", "ray.data._internal.planner", "...
ray-project/ray:python/ray/train/v2/_internal/execution/controller/placement_group_cleaner.py:PlacementGroupCleaner.start_monitoring
# Context: import threading class PlacementGroupCleaner: def __init__(self, check_interval_s: float = 1.0): self._check_interval_s = check_interval_s self._pg_queue: queue.Queue = queue.Queue() self._stop_event = threading.Event() self._controller_actor_id: Optional[str] = None ...
def start_monitoring(self): """Start monitoring the controller and placement group.""" if self._monitor_thread is not None and self._monitor_thread.is_alive(): # Thread already running, just return True logger.debug("Monitor thread already running") return True ...
function_simple
0
{"cognitive_complexity": 2, "loc": 15, "code_loc": 11, "docstring_loc": 1, "function_name": "start_monitoring", "class_name": "PlacementGroupCleaner", "qualname": "PlacementGroupCleaner.start_monitoring", "file_path": "python/ray/train/v2/_internal/execution/controller/placement_group_cleaner.py", "repo_id": "ray-proje...
ansible/ansible:test/units/_internal/_yaml/test_dumper.py:test_yaml_dump_iterables
# Context: import pytest from ansible.template import Templar, trust_as_template, is_trusted_as_template def test_yaml_dump(filter_name: str, _vault_secrets_context: VaultTestHelper) -> None: ... def test_yaml_dump_undefined() -> None: ... # Task: Write a Python test function `test_yaml_dump_iterables` to verify the ...
def test_yaml_dump_iterables(value: object, expected: object) -> None: result = Templar(variables=dict(value=value)).template(trust_as_template("{{ value | to_yaml }}")) assert result == expected
test
1
{"function_name": "test_yaml_dump_iterables", "class_name": null, "qualname": "test_yaml_dump_iterables", "file_path": "test/units/_internal/_yaml/test_dumper.py", "repo_id": "ansible/ansible", "loc": 4, "tested_modules": ["__future__", "ansible.errors", "ansible.parsing.utils.yaml", "ansible.parsing.vault", "ansible.t...
vllm-project/vllm:tests/v1/e2e/test_pooling_chunked_prefill.py:test_pooling_prefix_cache
# Context: import pytest from vllm.platforms import current_platform class WrapperPooler(nn.Module): ... def inject_pooler(self): ... def retrieve_chunks(self): ... def test_pooling_chunked_prefill(vllm_runner, monkeypatch): ... # Task: Write a Python test function `test_pooling_prefix_cache` to test chunked prefill ...
def test_pooling_prefix_cache(vllm_runner, monkeypatch): """Test chunked prefill for pooling models with LastPool.""" verses = prompt.split("\n\n") with monkeypatch.context() as m: m.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1") model_id = "Qwen/Qwen3-Embedding-0.6B" with vllm_...
test
1
{"function_name": "test_pooling_prefix_cache", "class_name": null, "qualname": "test_pooling_prefix_cache", "file_path": "tests/v1/e2e/test_pooling_chunked_prefill.py", "repo_id": "vllm-project/vllm", "loc": 44, "tested_modules": ["vllm.platforms"], "has_docstring": true, "runnable_level": "project_runnable"}
exo-explore/exo:src/exo/master/api.py:API.bench_image_edits
# Context: import contextlib from typing import Annotated, Literal, cast from fastapi import FastAPI, File, Form, HTTPException, Query, Request, UploadFile from exo.shared.models.model_cards import ( ModelCard, ModelId, delete_custom_card, get_model_cards, is_custom_card, ) from exo.shared.types.api...
async def bench_image_edits( self, request: Request, image: UploadFile = File(...), # noqa: B008 prompt: str = Form(...), model: str = Form(...), n: int = Form(1), size: str | None = Form(None), response_format: Literal["url", "b64_json"] = Form("b64_json...
function_simple
0
{"cognitive_complexity": 1, "loc": 44, "code_loc": 27, "docstring_loc": 1, "function_name": "bench_image_edits", "class_name": "API", "qualname": "API.bench_image_edits", "file_path": "src/exo/master/api.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/test_envs.py:TestEnvWithChoices.test_valid_lowercase_value_returned_case_insensitive
# Context: import os from unittest.mock import patch from vllm.envs import ( disable_envs_cache, enable_envs_cache, env_list_with_choices, env_set_with_choices, env_with_choices, environment_variables, ) def test_getattr_without_cache(monkeypatch: pytest.MonkeyPatch): ... def test_getattr_with_...
def test_valid_lowercase_value_returned_case_insensitive(self): """Test that lowercase value is accepted in case insensitive mode.""" with patch.dict(os.environ, {"TEST_ENV": "option1"}): env_func = env_with_choices( "TEST_ENV", "default", ["OPTION1", "OPTION2"], case_sensiti...
test
1
{"function_name": "test_valid_lowercase_value_returned_case_insensitive", "class_name": "TestEnvWithChoices", "qualname": "TestEnvWithChoices.test_valid_lowercase_value_returned_case_insensitive", "file_path": "tests/test_envs.py", "repo_id": "vllm-project/vllm", "loc": 7, "tested_modules": ["vllm.envs"], "has_docstrin...
exo-explore/exo:bench/eval_tool_calls.py:_claude_parse_response
# Context: import json from typing import Any, Literal class Scenario: ... def load_scenarios(path: Path) -> list[Scenario]: ... class ParsedResponse: ... class ScenarioResult: ... def validate_args(args_str: str, required_keys: list[str]) -> tuple[bool, str | None]: ... def validate_nested_args(args_str: str, array_k...
def _claude_parse_response(data: dict[str, Any]) -> ParsedResponse: """Parse Claude Messages response into common format.""" stop_reason = data.get("stop_reason", "") content_blocks = data.get("content", []) if stop_reason == "tool_use": finish_reason = "tool_calls" elif stop_reason == "end...
function_complex
0
{"cognitive_complexity": 18, "loc": 42, "code_loc": 35, "docstring_loc": 1, "function_name": "_claude_parse_response", "class_name": null, "qualname": "_claude_parse_response", "file_path": "bench/eval_tool_calls.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:tests/models/sam3/test_modeling_sam3.py:Sam3ModelTest.test_forward_with_both_pixel_values_and_vision_embeds_raises_error
# Context: from transformers.testing_utils import ( backend_empty_cache, require_deterministic_for_xpu, require_torch, slow, torch_device, ) import torch class Sam3VisionModelTester: ... class Sam3VisionModelTest(ModelTesterMixin, unittest.TestCase): ... class Sam3ModelTester: ... def prepare_coco_...
def test_forward_with_both_pixel_values_and_vision_embeds_raises_error(self): """Test that passing both pixel_values and vision_embeds raises an error.""" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model ...
test
0
{"function_name": "test_forward_with_both_pixel_values_and_vision_embeds_raises_error", "class_name": "Sam3ModelTest", "qualname": "Sam3ModelTest.test_forward_with_both_pixel_values_and_vision_embeds_raises_error", "file_path": "tests/models/sam3/test_modeling_sam3.py", "repo_id": "huggingface/transformers", "loc": 23,...
apache/airflow:dev/verify_release_calendar.py:find_matching_entry
# Context: class Release: ... class CalendarEntry: ... def fetch_confluence_page() -> str: ... def print_confluence_debug_info(soup: BeautifulSoup) -> None: ... def get_release_sections() -> dict[str, list[str]]: ... def find_table_for_heading(heading: Any) -> Any | None: ... def find_section_and_parse(soup: Beautiful...
def find_matching_entry(release: Release, calendar_entries: list[CalendarEntry]) -> CalendarEntry | None: """Find a calendar entry that matches the given release, or None if not found.""" for entry in calendar_entries: if is_matching_entry(release, entry): return entry return None
function_simple
1
{"cognitive_complexity": 3, "loc": 6, "code_loc": 4, "docstring_loc": 1, "function_name": "find_matching_entry", "class_name": null, "qualname": "find_matching_entry", "file_path": "dev/verify_release_calendar.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "file_runnable"}
keras-team/keras:keras/src/export/litert_test.py:ExportLitertTest.test_export_invalid_filepath
# Context: import os import numpy as np class CustomModel(models.Model): ... def get_model(type, input_shape, layer_list): ... def _convert_to_numpy(structure): ... def _normalize_name(name): ... def _set_interpreter_inputs(interpreter, inputs): ... def _get_interpreter_outputs(interpreter): ... class ExportLitertTes...
def test_export_invalid_filepath(self): """Test that export fails with invalid file extension.""" model = get_model("sequential") dummy_input = np.random.random((3, 10)).astype(np.float32) model(dummy_input) temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.txt")...
test
1
{"function_name": "test_export_invalid_filepath", "class_name": "ExportLitertTest", "qualname": "ExportLitertTest.test_export_invalid_filepath", "file_path": "keras/src/export/litert_test.py", "repo_id": "keras-team/keras", "loc": 11, "tested_modules": ["absl.testing", "keras.src", "keras.src", "keras.src", "keras.src"...
huggingface/transformers:tests/models/deepseek_v2/test_modeling_deepseek_v2.py:DeepseekV2ModelTest.test_model_rope_scaling_frequencies
# Context: from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device import torch from transformers.models.deepseek_v2.modeling_deepseek_v2 import DeepseekV2RotaryEmbedding class DeepseekV2ModelTester(CausalLMModelTester): ... class DeepseekV2IntegrationTest(unittest.TestCase)...
def test_model_rope_scaling_frequencies(self): """ Overwritten: DeepseekV2 implements RoPE in the complex domain, as opposed to in the real domain with `sin` and `cos`. Nevertheless, the checks are the same as in the original test. """ config, _ = self.model_tester.prepare_config...
test
0
{"function_name": "test_model_rope_scaling_frequencies", "class_name": "DeepseekV2ModelTest", "qualname": "DeepseekV2ModelTest.test_model_rope_scaling_frequencies", "file_path": "tests/models/deepseek_v2/test_modeling_deepseek_v2.py", "repo_id": "huggingface/transformers", "loc": 56, "tested_modules": ["transformers", ...
huggingface/transformers:src/transformers/models/qwen3_next/modular_qwen3_next.py:Qwen3NextDynamicCache.get_mask_sizes
# Context: import torch class Qwen3NextRMSNormGated(nn.Module): ... class Qwen3NextRotaryEmbedding(Gemma2RotaryEmbedding): ... class Qwen3NextRMSNorm(Gemma3RMSNorm): ... class Qwen3NextAttention(Qwen3MoeAttention): ... def torch_causal_conv1d_update(hidden_states, conv_state, weight, bias, activation): ... def l2norm(...
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]: """ Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for the given layer at `layer_idx`. The masks are then prepared according to the given lengths...
function_simple
0
{"cognitive_complexity": 0, "loc": 11, "code_loc": 5, "docstring_loc": 5, "function_name": "get_mask_sizes", "class_name": "Qwen3NextDynamicCache", "qualname": "Qwen3NextDynamicCache.get_mask_sizes", "file_path": "src/transformers/models/qwen3_next/modular_qwen3_next.py", "repo_id": "huggingface/transformers", "has_doc...
apache/airflow:airflow-ctl/tests/airflow_ctl/ctl/commands/test_config_command.py:TestCliConfigCommands.test_lint_detects_removed_configs
# Context: import os from unittest.mock import patch from airflowctl.api.client import ClientKind from airflowctl.api.datamodels.generated import Config, ConfigOption, ConfigSection from airflowctl.ctl.commands import config_command from airflowctl.ctl.commands.config_command import ConfigChange, ConfigParameter class...
def test_lint_detects_removed_configs(self, mock_rich_print, api_client_maker): response_config = Config( sections=[ ConfigSection( name="test_section", options=[ ConfigOption( key="test_optio...
test
1
{"function_name": "test_lint_detects_removed_configs", "class_name": "TestCliConfigCommands", "qualname": "TestCliConfigCommands.test_lint_detects_removed_configs", "file_path": "airflow-ctl/tests/airflow_ctl/ctl/commands/test_config_command.py", "repo_id": "apache/airflow", "loc": 34, "tested_modules": ["__future__", ...
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_mdx_loader.py:TestMDXLoader.test_load_basic_mdx_file
# Context: from crewai_tools.rag.base_loader import LoaderResult class TestMDXLoader: def _write_temp_mdx(self, content): ... def _load_from_file(self, content): ... def test_mdx_multiple_imports_exports(self): ... def test_complex_jsx_cleanup(self): ... def test_whitespace_cleanup(self): ... d...
def test_load_basic_mdx_file(self): content = """ import Component from './Component' export const meta = { title: 'Test' } # Test MDX File This is a **markdown** file with JSX. <Component prop="value" /> Some more content. <div className="container"> <p>Nested content</p> </div> """ result, pa...
test
0
{"function_name": "test_load_basic_mdx_file", "class_name": "TestMDXLoader", "qualname": "TestMDXLoader.test_load_basic_mdx_file", "file_path": "lib/crewai-tools/tests/rag/test_mdx_loader.py", "repo_id": "crewAIInc/crewAI", "loc": 35, "tested_modules": ["crewai_tools.rag.base_loader", "crewai_tools.rag.loaders.mdx_load...
github/spec-kit:tests/test_extensions.py:TestExtensionManifest.test_invalid_command_name
# Context: import pytest from specify_cli.extensions import ( ExtensionManifest, ExtensionRegistry, ExtensionManager, CommandRegistrar, ExtensionCatalog, ExtensionError, ValidationError, CompatibilityError, version_satisfies, ) import yaml def temp_dir(): ... def valid_manifest_data...
def test_invalid_command_name(self, temp_dir, valid_manifest_data): """Test manifest with invalid command name format.""" import yaml valid_manifest_data["provides"]["commands"][0]["name"] = "invalid-name" manifest_path = temp_dir / "extension.yml" with open(manifest_path, 'w')...
test
0
{"function_name": "test_invalid_command_name", "class_name": "TestExtensionManifest", "qualname": "TestExtensionManifest.test_invalid_command_name", "file_path": "tests/test_extensions.py", "repo_id": "github/spec-kit", "loc": 12, "tested_modules": ["pathlib", "datetime", "specify_cli.extensions"], "has_docstring": tru...
infiniflow/ragflow:common/data_source/gmail_connector.py:GmailConnector.poll_source
# Context: from common.data_source.google_util.constant import DB_CREDENTIALS_PRIMARY_ADMIN_KEY, MISSING_SCOPES_ERROR_STR, SCOPE_INSTRUCTIONS, USER_FIELDS from common.data_source.interfaces import LoadConnector, PollConnector, SecondsSinceUnixEpoch, SlimConnectorWithPermSync from common.data_source.models import BasicE...
def poll_source(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> GenerateDocumentsOutput: """Poll Gmail for documents within time range.""" try: yield from self._fetch_threads(start, end) except Exception as e: if MISSING_SCOPES_ERROR_STR in str(e): ...
function_simple
1
{"cognitive_complexity": 3, "loc": 8, "code_loc": 6, "docstring_loc": 1, "function_name": "poll_source", "class_name": "GmailConnector", "qualname": "GmailConnector.poll_source", "file_path": "common/data_source/gmail_connector.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runn...
apache/airflow:shared/configuration/src/airflow_shared/configuration/parser.py:AirflowConfigParser._get_option_from_secrets
# Context: class ValueNotFound: ... def expand_env_var(env_var: None) -> None: ... def expand_env_var(env_var: str) -> str: ... def expand_env_var(env_var: str | None) -> str | None: ... def run_command(command: str) -> str: ... def _is_template(configuration_description: dict[str, dict[str, Any]], section: str, key: ...
def _get_option_from_secrets( self, deprecated_key: str | None, deprecated_section: str | None, key: str, section: str, issue_warning: bool = True, extra_stacklevel: int = 0, **kwargs, ) -> str | ValueNotFound: """Get config option from secrets...
function_complex
1
{"cognitive_complexity": 9, "loc": 25, "code_loc": 13, "docstring_loc": 1, "function_name": "_get_option_from_secrets", "class_name": "AirflowConfigParser", "qualname": "AirflowConfigParser._get_option_from_secrets", "file_path": "shared/configuration/src/airflow_shared/configuration/parser.py", "repo_id": "apache/airf...
ray-project/ray:python/ray/_common/tests/test_ray_option_utils.py:TestTaskActorOptionValidation.test_validate_actor_options_invalid_keyword
# Context: import pytest from ray._common.ray_option_utils import ( Option, _check_deprecate_placement_group, _counting_option, _resource_option, _validate_resource_quantity, _validate_resources, update_options, validate_actor_options, validate_task_options, ) class TestOptionValida...
def test_validate_actor_options_invalid_keyword(self): with pytest.raises(ValueError, match="Invalid option keyword"): validate_actor_options({"invalid_option": 1}, in_options=False)
test
0
{"function_name": "test_validate_actor_options_invalid_keyword", "class_name": "TestTaskActorOptionValidation", "qualname": "TestTaskActorOptionValidation.test_validate_actor_options_invalid_keyword", "file_path": "python/ray/_common/tests/test_ray_option_utils.py", "repo_id": "ray-project/ray", "loc": 3, "tested_modul...
vllm-project/vllm:tests/models/language/pooling/test_bge_m3.py:test_bge_m3_api_server_multi_vector
# Context: import httpx import openai import pytest import torch def server(): ... async def client(server): ... async def test_bge_m3_api_server_embedding(client: openai.AsyncOpenAI): ... async def tokenize(client: openai.AsyncOpenAI, sentences: list[str]) -> list[list[int]]: ... async def sparse_embeddings(client: o...
async def test_bge_m3_api_server_multi_vector(client: openai.AsyncOpenAI): result_1 = await client.post( "../pooling", body={"model": MODEL_NAME, "input": sentences_1, "task": "token_embed"}, cast_to=httpx.Response, ) embeddings_1 = [torch.tensor(data["data"]) for data in result_1.js...
test
1
{"function_name": "test_bge_m3_api_server_multi_vector", "class_name": null, "qualname": "test_bge_m3_api_server_multi_vector", "file_path": "tests/models/language/pooling/test_bge_m3.py", "repo_id": "vllm-project/vllm", "loc": 23, "tested_modules": ["utils", "embed_utils"], "has_docstring": false, "runnable_level": "p...
browser-use/browser-use:browser_use/skill_cli/python_session.py:PythonSession.execute
# Context: import asyncio import io import traceback from contextlib import redirect_stderr, redirect_stdout class ExecutionResult: ... class BrowserWrapper: ... class PythonSession: def __post_init__(self) -> None: ... def reset(self) -> None: ... def get_variables(self) -> dict[str, str]: ... # Task: W...
def execute( self, code: str, browser_session: 'BrowserSession', loop: asyncio.AbstractEventLoop | None = None ) -> ExecutionResult: """Execute code in persistent namespace. The `browser` variable is injected into the namespace before each execution, providing a convenient wrapper around the BrowserSession. ...
function_complex
0
{"cognitive_complexity": 9, "loc": 47, "code_loc": 25, "docstring_loc": 10, "function_name": "execute", "class_name": "PythonSession", "qualname": "PythonSession.execute", "file_path": "browser_use/skill_cli/python_session.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "file_runnabl...
google/langextract:tests/prompting_test.py:ContextAwarePromptBuilderTest.test_second_chunk_includes_previous_context
# Context: from langextract import prompting class QAPromptGeneratorTest(parameterized.TestCase): ... class PromptBuilderTest(absltest.TestCase): ... class ContextAwarePromptBuilderTest(absltest.TestCase): def _create_generator(self): ... def test_context_window_chars_property(self): ... def test_first_ch...
def test_second_chunk_includes_previous_context(self): """Verifies the second chunk includes text from the first chunk.""" generator = self._create_generator() builder = prompting.ContextAwarePromptBuilder( generator, context_window_chars=20 ) context_prefix = prompting.ContextAwarePromptBui...
test
1
{"function_name": "test_second_chunk_includes_previous_context", "class_name": "ContextAwarePromptBuilderTest", "qualname": "ContextAwarePromptBuilderTest.test_second_chunk_includes_previous_context", "file_path": "tests/prompting_test.py", "repo_id": "google/langextract", "loc": 16, "tested_modules": ["absl.testing", ...
infiniflow/ragflow:test/testcases/test_sdk_api/test_file_management_within_dataset/test_list_documents.py:TestDocumentsList.test_invalid_params
# Context: import pytest class TestDocumentsList: def test_default(self, add_documents): ... def test_page(self, add_documents, params, expected_page_size, expected_message): ... def test_page_size(self, add_documents, params, expected_page_size, expected_message): ... def test_orderby(self, add_docume...
def test_invalid_params(self, add_documents): dataset, _ = add_documents params = {"a": "b"} with pytest.raises(TypeError) as exception_info: dataset.list_documents(**params) assert "got an unexpected keyword argument" in str(exception_info.value), str(exception_info.value)
test
1
{"function_name": "test_invalid_params", "class_name": "TestDocumentsList", "qualname": "TestDocumentsList.test_invalid_params", "file_path": "test/testcases/test_sdk_api/test_file_management_within_dataset/test_list_documents.py", "repo_id": "infiniflow/ragflow", "loc": 6, "tested_modules": ["concurrent.futures"], "ha...
ray-project/ray:python/ray/dashboard/modules/aggregator/publisher/ray_event_publisher.py:RayEventPublisherInterface:class_doc
Write a class-level docstring for `RayEventPublisherInterface` (inherits from ABC) which has methods: `run_forever`, `wait_until_running`.
Abstract interface for publishing Ray event batches to external destinations.
documentation
0
{"doc_type": "class", "class_name": "RayEventPublisherInterface", "file_path": "python/ray/dashboard/modules/aggregator/publisher/ray_event_publisher.py", "repo_id": "ray-project/ray", "char_length": 77, "methods": ["run_forever", "wait_until_running"]}
infiniflow/ragflow:test/unit_test/common/test_misc_utils.py:TestHashStr2Int.test_unicode_string
# Context: from common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes class TestGetUuid: ... class TestDownloadImg: ... class TestConvertBytes: ... class TestHashStr2Int: def test_basic_hashing(self): ... def test_default_mod_value(self): ... def test_custom_mod_value(self): ... ...
def test_unicode_string(self): """Test hashing unicode strings""" test_strings = [ "中文", "🚀火箭", "café", "🎉", "Hello 世界" ] for test_str in test_strings: result = hash_str2int(test_str) assert isinstance...
test
1
{"function_name": "test_unicode_string", "class_name": "TestHashStr2Int", "qualname": "TestHashStr2Int.test_unicode_string", "file_path": "test/unit_test/common/test_misc_utils.py", "repo_id": "infiniflow/ragflow", "loc": 14, "tested_modules": ["common.misc_utils"], "has_docstring": true, "runnable_level": "project_run...
huggingface/transformers:src/transformers/integrations/moe.py:ExpertsInterface:class_doc
Write a class-level docstring for `ExpertsInterface` (inherits from GeneralInterface) which has methods: `get_interface`.
Interface for registering custom experts implementations.
documentation
0
{"doc_type": "class", "class_name": "ExpertsInterface", "file_path": "src/transformers/integrations/moe.py", "repo_id": "huggingface/transformers", "char_length": 57, "methods": ["get_interface"]}
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py:TestBugFixTextJoinEmptyFirst:class_doc
Write a class-level docstring for `TestBugFixTextJoinEmptyFirst` which has methods: `test_process_text_allows_empty_for_text_join`.
Bug #9: Text Join should return second text when first is empty.
documentation
1
{"doc_type": "class", "class_name": "TestBugFixTextJoinEmptyFirst", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "repo_id": "langflow-ai/langflow", "char_length": 64, "methods": ["test_process_text_allows_empty_for_text_join"]}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestForceAdditionalPropertiesFalse.test_adds_to_object
# Context: from copy import deepcopy from crewai.utilities.pydantic_schema_utils import ( build_rich_field_description, convert_oneof_to_anyof, create_model_from_schema, ensure_all_properties_required, ensure_type_in_schemas, force_additional_properties_false, resolve_refs, strip_null_fr...
def test_adds_to_object(self) -> None: schema = {"type": "object", "properties": {"x": {"type": "integer"}}} result = force_additional_properties_false(deepcopy(schema)) assert result["additionalProperties"] is False
test
0
{"function_name": "test_adds_to_object", "class_name": "TestForceAdditionalPropertiesFalse", "qualname": "TestForceAdditionalPropertiesFalse.test_adds_to_object", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["__future__", "copy", "t...
langchain-ai/langchain:libs/partners/openrouter/tests/unit_tests/test_chat_models.py:TestErrorPaths.test_max_retries_scales_elapsed_time
# Context: from unittest.mock import AsyncMock, MagicMock, patch from pydantic import BaseModel, Field, SecretStr from langchain_openrouter.chat_models import ( ChatOpenRouter, _convert_chunk_to_message_chunk, _convert_dict_to_message, _convert_file_block_to_openrouter, _convert_message_to_dict, ...
def test_max_retries_scales_elapsed_time(self) -> None: """Test that max_retries value scales max_elapsed_time.""" with patch("openrouter.OpenRouter") as mock_cls: mock_cls.return_value = MagicMock() ChatOpenRouter( model=MODEL_NAME, api_key=Secret...
test
1
{"function_name": "test_max_retries_scales_elapsed_time", "class_name": "TestErrorPaths", "qualname": "TestErrorPaths.test_max_retries_scales_elapsed_time", "file_path": "libs/partners/openrouter/tests/unit_tests/test_chat_models.py", "repo_id": "langchain-ai/langchain", "loc": 12, "tested_modules": ["__future__", "typ...
fastapi/fastapi:tests/test_request_params/test_query/test_required_str.py:test_required_alias_and_validation_alias_schema
# Context: import pytest from inline_snapshot import snapshot async def read_required_str(p: str): ... class QueryModelRequiredStr(BaseModel): ... async def read_model_required_str(p: Annotated[QueryModelRequiredStr, Query()]): ... def test_required_str_schema(path: str): ... def test_required_str_missing(path: str): ...
def test_required_alias_and_validation_alias_schema(path: str): assert app.openapi()["paths"][path]["get"]["parameters"] == snapshot( [ { "required": True, "schema": {"title": "P Val Alias", "type": "string"}, "name": "p_val_alias", ...
test
1
{"function_name": "test_required_alias_and_validation_alias_schema", "class_name": null, "qualname": "test_required_alias_and_validation_alias_schema", "file_path": "tests/test_request_params/test_query/test_required_str.py", "repo_id": "fastapi/fastapi", "loc": 11, "tested_modules": ["typing", "dirty_equals", "fastapi...
vnpy/vnpy:tests/test_alpha101.py:TestAlpha101.test_alpha6
# Context: import polars as pl from vnpy.alpha.dataset.utility import calculate_by_expression def create_test_df(n_symbols: int, n_days: int) -> pl.DataFrame: ... def test_df() -> pl.DataFrame: ... class TestAlpha101: def test_alpha1(self, test_df: pl.DataFrame) -> None: ... def test_alpha2(self, test_df: pl....
def test_alpha6(self, test_df: pl.DataFrame) -> None: """Test Alpha#6""" expr = "(-1) * ts_corr(open, volume, 10)" result = calculate_by_expression(test_df, expr) assert "data" in result.columns
test
1
{"function_name": "test_alpha6", "class_name": "TestAlpha101", "qualname": "TestAlpha101.test_alpha6", "file_path": "tests/test_alpha101.py", "repo_id": "vnpy/vnpy", "loc": 5, "tested_modules": ["datetime", "vnpy.alpha.dataset.utility"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/layers/batch_invariant.py:rms_norm
# Context: import torch def _matmul_launch_metadata(grid: Callable[..., Any], kernel: Any, args: dict[str, Any]) -> dict[str, Any]: ... def _compute_pid(tile_id, num_pid_in_group, num_pid_m, GROUP_SIZE_M, NUM_SMS): ... def matmul_kernel_persistent(a_ptr, b_ptr, c_ptr, bias_ptr, M, N, K, stride_am, stride_ak, stride_bk...
def rms_norm( input: torch.Tensor, weight: torch.Tensor, eps: float = 1e-6 ) -> torch.Tensor: """ Compute RMS normalization using Triton kernel. RMS Norm normalizes the input by the root mean square and scales by weight: output = input / sqrt(mean(input^2) + eps) * weight Args: input: ...
function_simple
1
{"cognitive_complexity": 0, "loc": 45, "code_loc": 24, "docstring_loc": 14, "function_name": "rms_norm", "class_name": null, "qualname": "rms_norm", "file_path": "vllm/model_executor/layers/batch_invariant.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"}
docling-project/docling:tests/test_backend_latex.py:test_latex_table_environment
# Context: from io import BytesIO from docling.backend.latex_backend import LatexDocumentBackend from docling.datamodel.base_models import InputFormat from docling.datamodel.document import ConversionResult, DoclingDocument, InputDocument from docling.datamodel.document import InputDocument def test_latex_basic_conver...
def test_latex_table_environment(): """Test table environment (wrapper around tabular)""" latex_content = b""" \\documentclass{article} \\begin{document} \\begin{table} \\begin{tabular}{cc} A & B \\\\ C & D \\end{tabular} \\caption{Sample table} \\end{table} \\end{documen...
test
1
{"function_name": "test_latex_table_environment", "class_name": null, "qualname": "test_latex_table_environment", "file_path": "tests/test_backend_latex.py", "repo_id": "docling-project/docling", "loc": 24, "tested_modules": ["io", "pathlib", "docling_core.types.doc", "docling.backend.latex_backend", "docling.datamodel...
huggingface/transformers:src/transformers/models/jamba/modular_jamba.py:JambaSparseMoeBlock:class_doc
Write a class-level docstring for `JambaSparseMoeBlock` (inherits from nn.Module) which has methods: `__init__`, `route_tokens_to_experts`, `forward`.
This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance...
documentation
0
{"doc_type": "class", "class_name": "JambaSparseMoeBlock", "file_path": "src/transformers/models/jamba/modular_jamba.py", "repo_id": "huggingface/transformers", "char_length": 418, "methods": ["__init__", "route_tokens_to_experts", "forward"]}
huggingface/transformers:tests/models/doge/test_modeling_doge.py:DogeModelTest.test_doge_sequence_classification_model
# Context: from transformers.testing_utils import ( require_torch, require_torch_accelerator, slow, torch_device, ) from ...test_modeling_common import ModelTesterMixin, ids_tensor from transformers import ( DogeForCausalLM, DogeForSequenceClassification, DogeModel, ) class ...
def test_doge_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_te...
test
0
{"function_name": "test_doge_sequence_classification_model", "class_name": "DogeModelTest", "qualname": "DogeModelTest.test_doge_sequence_classification_model", "file_path": "tests/models/doge/test_modeling_doge.py", "repo_id": "huggingface/transformers", "loc": 11, "tested_modules": ["transformers", "transformers.test...
geekcomputers/Python:Tic-Tac-Toe Games/tic-tac-toe3.py:module_doc
Write a module-level docstring for the Python module `tic-tac-toe3` which contains function `check_winner`, function `is_board_full`, function `minimax`, function `best_move`, function `make_move`.
Tic-Tac-Toe with AI (Minimax) using CustomTkinter. Player = "X", AI = "O". Click a button to play. >>> check_winner([['X','X','X'],[' ',' ',' '],[' ',' ',' ']], 'X') True >>> check_winner([['X','O','X'],['O','O','O'],['X',' ',' ']], 'O') True >>> check_winner([['X','O','X'],['O','X','O'],['O','X','O']], 'X') False
documentation
1
{"doc_type": "module", "module_name": "tic-tac-toe3", "file_path": "Tic-Tac-Toe Games/tic-tac-toe3.py", "repo_id": "geekcomputers/Python", "char_length": 317}
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_app_test.py:test_websocket_ignores_debug_disconnect_in_production
# Context: from pathlib import Path import pytest from starlette.testclient import TestClient from streamlit import file_util from streamlit.proto.BackMsg_pb2 import BackMsg from streamlit.web.server.starlette.starlette_app import ( _RESERVED_ROUTE_PREFIXES, App, create_starlette_app, ) from tests.testutil ...
def test_websocket_ignores_debug_disconnect_in_production(tmp_path: Path) -> None: """Test that debug_disconnect_websocket is ignored in production mode.""" component_dir = tmp_path / "component" component_dir.mkdir() (component_dir / "index.html").write_text("component") static_dir = tmp_path / "...
test
1
{"function_name": "test_websocket_ignores_debug_disconnect_in_production", "class_name": null, "qualname": "test_websocket_ignores_debug_disconnect_in_production", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_app_test.py", "repo_id": "streamlit/streamlit", "loc": 36, "tested_modules": ["__future__",...
commaai/openpilot:system/ui/lib/egl.py:init_egl
# Context: import cffi from openpilot.common.swaglog import cloudlog class EGLImage: ... class EGLState: ... def create_egl_image(width: int, height: int, stride: int, fd: int, uv_offset: int) -> EGLImage | None: ... def destroy_egl_image(egl_image: EGLImage) -> None: ... def bind_egl_image_to_texture(texture_id: int,...
def init_egl() -> bool: """Initialize EGL and load necessary functions""" global _egl # Don't re-initialize if already done if _egl.initialized: return True try: _egl.ffi = cffi.FFI() _egl.ffi.cdef(""" typedef int EGLint; typedef unsigned int EGLBoolean; typedef unsigned int EG...
function_simple
0
{"cognitive_complexity": 4, "loc": 61, "code_loc": 46, "docstring_loc": 1, "function_name": "init_egl", "class_name": null, "qualname": "init_egl", "file_path": "system/ui/lib/egl.py", "repo_id": "commaai/openpilot", "has_docstring": true, "runnable_level": "project_runnable"}
mem0ai/mem0:mem0/vector_stores/cassandra.py:CassandraDB.list
# Context: import json from typing import Any, Dict, List, Optional class OutputData(BaseModel): ... class CassandraDB(VectorStoreBase): def __init__( self, contact_points: List[str], port: int = 9042, username: Optional[str] = None, password: Optional[str] = None, ...
def list( self, filters: Optional[Dict] = None, limit: int = 100 ) -> List[List[OutputData]]: """ List all vectors in the collection. Args: filters (Dict, optional): Filters to apply limit (int): Number of vectors to return Returns: ...
function_complex
1
{"cognitive_complexity": 17, "loc": 47, "code_loc": 28, "docstring_loc": 10, "function_name": "list", "class_name": "CassandraDB", "qualname": "CassandraDB.list", "file_path": "mem0/vector_stores/cassandra.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "runnable_level": "file_runnable"}
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent.py:AgentExecutor.input_keys
# Context: class BaseSingleActionAgent(BaseModel): ... class BaseMultiActionAgent(BaseModel): ... class AgentOutputParser(BaseOutputParser[AgentAction | AgentFinish]): ... class MultiActionAgentOutputParser(BaseOutputParser[list[AgentAction] | AgentFinish]): ... class RunnableAgent(BaseSingleActionAgent): ... class Ru...
def input_keys(self) -> list[str]: """Return the input keys.""" return self._action_agent.input_keys
function_simple
1
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "input_keys", "class_name": "AgentExecutor", "qualname": "AgentExecutor.input_keys", "file_path": "libs/langchain/langchain_classic/agents/agent.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "c...
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/tests/test_alibabacloud_mysql.py:test_initialize_success
# Context: from unittest.mock import Mock, patch from llama_index.vector_stores.alibabacloud_mysql import AlibabaCloudMySQLVectorStore def test_class_name() -> None: ... def test_client_property() -> None: ... def test_create_engine() -> None: ... def test_get_connection_context_manager() -> None: ... def test_check_v...
def test_initialize_success() -> None: """Test _initialize method success case.""" with patch.object( AlibabaCloudMySQLVectorStore, "_check_vector_support" ) as mock_check: with patch.object( AlibabaCloudMySQLVectorStore, "_create_table_if_not_exists" ) as mock_create_tab...
test
1
{"function_name": "test_initialize_success", "class_name": null, "qualname": "test_initialize_success", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/tests/test_alibabacloud_mysql.py", "repo_id": "run-llama/llama_index", "loc": 23, "tested_modules": ["llama_index.core...
langchain-ai/langchain:libs/langchain/langchain_classic/chains/natbot/base.py:NatBotChain.from_default
# Context: from typing import Any class NatBotChain(Chain): model_config = ConfigDict( def _raise_deprecation(cls, values: dict) -> Any: ... def from_llm(cls, llm: BaseLanguageModel, objective: str, **kwargs) -> NatBotChain: ... def input_keys(self) -> list[str]: ... def output_keys(self) -> list[s...
def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain: """Load with default LLMChain.""" msg = ( "This method is no longer implemented. Please use from_llm." "model = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)" "For example, NatBotChain.from...
function_simple
1
{"cognitive_complexity": 0, "loc": 8, "code_loc": 6, "docstring_loc": 1, "function_name": "from_default", "class_name": "NatBotChain", "qualname": "NatBotChain.from_default", "file_path": "libs/langchain/langchain_classic/chains/natbot/base.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_leve...
apache/airflow:providers/teradata/tests/unit/teradata/utils/test_bteq_util.py:TestBteqUtils.test_prepare_bteq_script_for_remote_execution
# Context: from airflow.providers.teradata.utils.bteq_util import ( identify_os, is_valid_encoding, is_valid_file, is_valid_remote_bteq_script_file, prepare_bteq_script_for_local_execution, prepare_bteq_script_for_remote_execution, read_file, transfer_file_sftp, verify_bteq_installed...
def test_prepare_bteq_script_for_remote_execution(self): conn = {"host": "myhost", "login": "user", "password": "pass"} sql = "SELECT * FROM DUAL;" script = prepare_bteq_script_for_remote_execution(conn, sql) assert ".LOGON myhost/user,pass" in script assert "SELECT * FROM DUAL;"...
test
1
{"function_name": "test_prepare_bteq_script_for_remote_execution", "class_name": "TestBteqUtils", "qualname": "TestBteqUtils.test_prepare_bteq_script_for_remote_execution", "file_path": "providers/teradata/tests/unit/teradata/utils/test_bteq_util.py", "repo_id": "apache/airflow", "loc": 7, "tested_modules": ["__future_...
huggingface/transformers:src/transformers/models/glm4v/image_processing_glm4v.py:Glm4vImageProcessor.preprocess
# Context: import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make...
def preprocess( self, images: ImageInput, do_resize: bool | None = None, size: dict[str, int] | None = None, resample: PILImageResampling | None = None, do_rescale: bool | None = None, rescale_factor: float | None = None, do_normalize: bool | None = None, ...
function_complex
0
{"cognitive_complexity": 21, "loc": 130, "code_loc": 55, "docstring_loc": 50, "function_name": "preprocess", "class_name": "Glm4vImageProcessor", "qualname": "Glm4vImageProcessor.preprocess", "file_path": "src/transformers/models/glm4v/image_processing_glm4v.py", "repo_id": "huggingface/transformers", "has_docstring": ...
ray-project/ray:python/ray/serve/task_processor.py:CeleryTaskProcessorAdapter._handle_unknown_task
# Context: from typing import Any, Dict, List, Optional class CeleryTaskProcessorAdapter(TaskProcessorAdapter): def __init__(self, config: TaskProcessorConfig, *args, **kwargs): super().__init__(*args, **kwargs) if not isinstance(config.adapter_config, CeleryAdapterConfig): raise TypeE...
def _handle_unknown_task( self, sender: Any = None, name: str = None, id: str = None, message: Any = None, exc: Any = None, **kwargs, ): """Handle unknown or unregistered tasks received by Celery. This method is called when Celery receives a t...
function_simple
0
{"cognitive_complexity": 1, "loc": 39, "code_loc": 15, "docstring_loc": 14, "function_name": "_handle_unknown_task", "class_name": "CeleryTaskProcessorAdapter", "qualname": "CeleryTaskProcessorAdapter._handle_unknown_task", "file_path": "python/ray/serve/task_processor.py", "repo_id": "ray-project/ray", "has_docstring"...
crewAIInc/crewAI:lib/crewai-files/tests/processing/test_validators.py:TestValidateImage.test_validate_valid_image
# Context: from crewai_files import AudioFile, FileBytes, ImageFile, PDFFile, TextFile, VideoFile from crewai_files.processing.constraints import ( ANTHROPIC_CONSTRAINTS, AudioConstraints, ImageConstraints, PDFConstraints, ProviderConstraints, VideoConstraints, ) from crewai_files.processing.val...
def test_validate_valid_image(self): """Test validating a valid image within constraints.""" constraints = ImageConstraints( max_size_bytes=10 * 1024 * 1024, supported_formats=("image/png",), ) file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png...
test
0
{"function_name": "test_validate_valid_image", "class_name": "TestValidateImage", "qualname": "TestValidateImage.test_validate_valid_image", "file_path": "lib/crewai-files/tests/processing/test_validators.py", "repo_id": "crewAIInc/crewAI", "loc": 11, "tested_modules": ["crewai_files", "crewai_files.processing.constrai...
apache/airflow:task-sdk/src/airflow/sdk/configuration.py:get_custom_secret_backend
# Context: from airflow.sdk.configuration import conf def _default_config_file_path(file_name: str) -> str: ... def retrieve_configuration_description() -> dict[str, dict[str, Any]]: ... def create_default_config_parser(configuration_description: dict[str, dict[str, Any]]) -> ConfigParser: ... def get_sdk_expansion_va...
def get_custom_secret_backend(worker_mode: bool = False): """ Get Secret Backend if defined in airflow.cfg. Conditionally selects the section, key and kwargs key based on whether it is called from worker or not. This is a convenience function that calls conf._get_custom_secret_backend(). Uses SDK'...
function_simple
1
{"cognitive_complexity": 0, "loc": 13, "code_loc": 2, "docstring_loc": 8, "function_name": "get_custom_secret_backend", "class_name": null, "qualname": "get_custom_secret_backend", "file_path": "task-sdk/src/airflow/sdk/configuration.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_ru...
browser-use/browser-use:browser_use/browser/session.py:BrowserSession.is_local
# Context: class Target(BaseModel): ... class CDPSession(BaseModel): ... class BrowserSession(BaseModel): model_config = ConfigDict( def __init__( self, *, # Cloud browser params - use these for cloud mode cloud_profile_id: UUID | str | None = None, cloud_proxy_country_code: ProxyCountryCode | None = N...
def is_local(self) -> bool: """Whether this is a local browser instance from browser profile.""" return self.browser_profile.is_local
function_simple
0
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "is_local", "class_name": "BrowserSession", "qualname": "BrowserSession.is_local", "file_path": "browser_use/browser/session.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"}
google/langextract:langextract/data_lib.py:annotated_document_to_dict
# Context: import dataclasses from typing import Any, Iterable, Mapping from langextract.core import data def enum_asdict_factory(items: Iterable[tuple[str, Any]]) -> dict[str, Any]: ... def dict_to_annotated_document(adoc_dic: Mapping[str, Any]) -> data.AnnotatedDocument: ... # Task: Write a Python function `annotat...
def annotated_document_to_dict( adoc: data.AnnotatedDocument | None, ) -> dict[str, Any]: """Converts an AnnotatedDocument into a Python dict. This function converts an AnnotatedDocument object into a Python dict, making it easier to serialize or deserialize the document. Enum values and NumPy integers are...
function_simple
1
{"cognitive_complexity": 1, "loc": 26, "code_loc": 5, "docstring_loc": 14, "function_name": "annotated_document_to_dict", "class_name": null, "qualname": "annotated_document_to_dict", "file_path": "langextract/data_lib.py", "repo_id": "google/langextract", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/tests/test_shuffle_diagnostics.py:test_sort_inlined_objects_warnings
# Context: import logging import pytest import ray from ray.data.context import DataContext, ShuffleStrategy def test_debug_limit_shuffle_execution_to_num_blocks(ray_start_regular, restore_data_context, configure_shuffle_method, shuffle_op): ... def test_memory_usage(ray_start_regular, restore_data_context, configure_...
def test_sort_inlined_objects_warnings( ray_start_regular, restore_data_context, configure_shuffle_method, under_threshold, propagate_logs, caplog, ): # Test that we warn iff expected driver memory usage from # storing tiny Ray objects on driver heap is higher than # the configured t...
test
0
{"function_name": "test_sort_inlined_objects_warnings", "class_name": null, "qualname": "test_sort_inlined_objects_warnings", "file_path": "python/ray/data/tests/test_shuffle_diagnostics.py", "repo_id": "ray-project/ray", "loc": 32, "tested_modules": ["ray.data.context", "ray.data.dataset"], "has_docstring": false, "ru...
gradio-app/gradio:gradio/mcp.py:GradioMCPServer.launch_mcp_on_sse
# Context: from starlette.applications import Starlette from starlette.responses import JSONResponse, Response from starlette.routing import Mount, Route def resource(uri_template: str, description: str | None, mime_type: str | None): ... def prompt(name: str | None, description: str | None): ... def tool(name: str | ...
def launch_mcp_on_sse(self, app: Starlette, subpath: str, root_path: str) -> None: """ Launch the MCP server on the SSE transport. Parameters: app: The Gradio app to mount the MCP server on. subpath: The subpath to mount the MCP server on. E.g. "/gradio_api/mcp" ...
function_simple
1
{"cognitive_complexity": 1, "loc": 42, "code_loc": 31, "docstring_loc": 8, "function_name": "launch_mcp_on_sse", "class_name": "GradioMCPServer", "qualname": "GradioMCPServer.launch_mcp_on_sse", "file_path": "gradio/mcp.py", "repo_id": "gradio-app/gradio", "has_docstring": true, "runnable_level": "file_runnable"}
browser-use/browser-use:browser_use/skill_cli/commands/cloud_task.py:poll_until_complete
# Context: from browser_use_sdk.types.task_view import TaskView from browser_use.skill_cli.commands.utils import format_duration, get_sdk_client import asyncio def _filter_none(kwargs: dict[str, Any]) -> dict[str, Any]: ... def create_task(task: str, **kwargs) -> TaskCreatedResponse: ... def get_task(task_id: str) -> ...
async def poll_until_complete( task_id: str, stream: bool = False, poll_interval: float = 1.0, ) -> TaskView: """Poll task status until finished.""" import asyncio client = get_sdk_client() last_status = None while True: # Run blocking SDK call in thread to avoid blocking event loop task = await asyncio.t...
function_complex
0
{"cognitive_complexity": 6, "loc": 24, "code_loc": 12, "docstring_loc": 1, "function_name": "poll_until_complete", "class_name": null, "qualname": "poll_until_complete", "file_path": "browser_use/skill_cli/commands/cloud_task.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_r...
langchain-ai/langchain:libs/langchain/langchain_classic/evaluation/loading.py:load_dataset
Write a Python function `load_dataset` to load a dataset from the [LangChainDatasets on HuggingFace](https://huggingface.co/LangChainDatasets). Parameters: uri: str Returns: list[dict]
def load_dataset(uri: str) -> list[dict]: """Load a dataset from the [LangChainDatasets on HuggingFace](https://huggingface.co/LangChainDatasets). Args: uri: The uri of the dataset to load. Returns: A list of dictionaries, each representing a row in the dataset. **Prerequisites** ...
function_simple
1
{"cognitive_complexity": 1, "loc": 34, "code_loc": 10, "docstring_loc": 22, "function_name": "load_dataset", "class_name": null, "qualname": "load_dataset", "file_path": "libs/langchain/langchain_classic/evaluation/loading.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "self_containe...
apache/airflow:providers/keycloak/src/airflow/providers/keycloak/auth_manager/cli/commands.py:create_permissions_command
# Context: from airflow.utils import cli as cli_utils def _get_resource_methods() -> list[str]: ... def _get_extended_resource_methods() -> list[str]: ... def create_scopes_command(args): ... def create_resources_command(args): ... def create_all_command(args): ... def _get_client(args): ... def _get_client_uuid(args)...
def create_permissions_command(args): """Create Keycloak auth manager permissions in Keycloak.""" client = _get_client(args) client_uuid = _get_client_uuid(args) teams = _parse_teams(args.teams) _ensure_multi_team_enabled(teams=teams, command_name="create-permissions") if teams: # Role ...
function_simple
1
{"cognitive_complexity": 3, "loc": 13, "code_loc": 9, "docstring_loc": 1, "function_name": "create_permissions_command", "class_name": null, "qualname": "create_permissions_command", "file_path": "providers/keycloak/src/airflow/providers/keycloak/auth_manager/cli/commands.py", "repo_id": "apache/airflow", "has_docstrin...
browser-use/browser-use:browser_use/browser/profile.py:BrowserProfile._download_extension
# Context: from pathlib import Path def _get_enable_default_extensions_default() -> bool: ... class ViewportSize(BaseModel): ... def get_display_size() -> ViewportSize | None: ... def get_window_adjustments() -> tuple[int, int]: ... def validate_url(url: str, schemes: Iterable[str]) -> str: ... def validate_float_rang...
def _download_extension(self, url: str, output_path: Path) -> None: """Download extension .crx file.""" import urllib.request try: with urllib.request.urlopen(url) as response: with open(output_path, 'wb') as f: f.write(response.read()) except Exception as e: raise Exception(f'Failed to download...
function_simple
0
{"cognitive_complexity": 1, "loc": 10, "code_loc": 7, "docstring_loc": 1, "function_name": "_download_extension", "class_name": "BrowserProfile", "qualname": "BrowserProfile._download_extension", "file_path": "browser_use/browser/profile.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level"...
browser-use/browser-use:tests/ci/test_multi_act_guards.py:TestTerminatesSequenceMetadata.test_navigate_terminates
# Context: def http_server(): ... def base_url(http_server): ... async def browser_session(): ... def tools(): ... class TestStaticGuard: ... class TestRuntimeGuard: ... class TestSafeChain: ... class TestTerminatesSequenceMetadata: def test_search_terminates(self, tools): ... def test_go_back_terminates(self...
def test_navigate_terminates(self, tools): action = tools.registry.registry.actions.get('navigate') assert action is not None assert action.terminates_sequence is True
test
0
{"function_name": "test_navigate_terminates", "class_name": "TestTerminatesSequenceMetadata", "qualname": "TestTerminatesSequenceMetadata.test_navigate_terminates", "file_path": "tests/ci/test_multi_act_guards.py", "repo_id": "browser-use/browser-use", "loc": 4, "tested_modules": ["browser_use.agent.service", "browser_...
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py:TestTextOperationsTail.test_tail_negative_characters
# Context: import pytest from lfx.components.processing.text_operations import TextOperations class TestTextOperationsComponent(ComponentTestBaseWithoutClient): ... class TestTextOperationsWordCount: ... class TestTextOperationsCaseConversion: ... class TestTextOperationsReplace: ... class TestTextOperationsExtract: ....
def test_tail_negative_characters(self): """Test tail with negative characters raises ValueError (Bug #7 fix).""" component = TextOperations() component.tail_characters = -5 with pytest.raises(ValueError, match="non-negative"): component._text_tail("Hello")
test
1
{"function_name": "test_tail_negative_characters", "class_name": "TestTextOperationsTail", "qualname": "TestTextOperationsTail.test_tail_negative_characters", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "repo_id": "langflow-ai/langflow", "loc": 7, "tested_modules": ["l...
huggingface/transformers:src/transformers/models/olmo_hybrid/convert_olmo_hybrid_weights_to_hf.py:get_layer_types_from_config
Write a Python function `get_layer_types_from_config` to determine the layer types (full_attention, linear_attention). Parameters: olmo_config: dict Returns: list[str]
def get_layer_types_from_config(olmo_config: dict) -> list[str]: """ Determine the layer types (full_attention, linear_attention) from the OLMo config. """ model_config = olmo_config["model"] block_config = model_config["block"] n_layers = model_config["n_layers"] fla_hybrid_attention_i...
function_simple
0
{"cognitive_complexity": 4, "loc": 19, "code_loc": 11, "docstring_loc": 4, "function_name": "get_layer_types_from_config", "class_name": null, "qualname": "get_layer_types_from_config", "file_path": "src/transformers/models/olmo_hybrid/convert_olmo_hybrid_weights_to_hf.py", "repo_id": "huggingface/transformers", "has_d...
crewAIInc/crewAI:lib/crewai/tests/llms/anthropic/test_anthropic.py:test_anthropic_thinking
# Context: from unittest.mock import patch, MagicMock import pytest from crewai.llm import LLM from crewai.llms.providers.anthropic.completion import AnthropicCompletion from unittest.mock import patch def mock_anthropic_api_key(): ... def test_anthropic_completion_is_used_when_anthropic_provider(): ... def test_anthr...
def test_anthropic_thinking(): """Test that thinking is properly handled and thinking params are passed to messages.create""" from unittest.mock import patch from crewai.llms.providers.anthropic.completion import AnthropicCompletion llm = LLM( model="anthropic/claude-sonnet-4-5", thinki...
test
0
{"function_name": "test_anthropic_thinking", "class_name": null, "qualname": "test_anthropic_thinking", "file_path": "lib/crewai/tests/llms/anthropic/test_anthropic.py", "repo_id": "crewAIInc/crewAI", "loc": 34, "tested_modules": ["crewai.llm", "crewai.crew", "crewai.agent", "crewai.task", "crewai.llms.providers.anthro...
vllm-project/vllm:tests/v1/metrics/test_perf_metrics.py:module_doc
Write a module-level docstring for the Python module `test_perf_metrics` which contains class `MockModelConfig`, function `create_mock_vllm_config`, function `test_base_config_parser`, function `test_base_attention_config_parser_with_gqa`, function `test_base_attention_config_parser_without_gqa`.
Tests for the analytic estimators in metrics/flops.py.
documentation
1
{"doc_type": "module", "module_name": "test_perf_metrics", "file_path": "tests/v1/metrics/test_perf_metrics.py", "repo_id": "vllm-project/vllm", "char_length": 54}
ray-project/ray:python/ray/serve/task_processor.py:CeleryTaskProcessorAdapter.cancel_task_sync
# Context: class CeleryTaskProcessorAdapter(TaskProcessorAdapter): def __init__(self, config: TaskProcessorConfig, *args, **kwargs): super().__init__(*args, **kwargs) if not isinstance(config.adapter_config, CeleryAdapterConfig): raise TypeError( "TaskProcessorConfig.ad...
def cancel_task_sync(self, task_id): """ Cancels a task synchronously. Only supported for Redis and RabbitMQ brokers by Celery. More details can be found here: https://docs.celeryq.dev/en/stable/userguide/workers.html#revoke-revoking-tasks """ self._app.control.revoke(task_id)
function_simple
0
{"cognitive_complexity": 0, "loc": 6, "code_loc": 1, "docstring_loc": 4, "function_name": "cancel_task_sync", "class_name": "CeleryTaskProcessorAdapter", "qualname": "CeleryTaskProcessorAdapter.cancel_task_sync", "file_path": "python/ray/serve/task_processor.py", "repo_id": "ray-project/ray", "has_docstring": true, "ru...
ray-project/ray:python/ray/_common/tests/test_signature.py:TestExtractSignature.test_method_with_ignore_first
# Context: from ray._common.signature import ( DUMMY_TYPE, extract_signature, flatten_args, get_signature, recover_args, validate_args, ) class TestGetSignature: ... class TestValidateArgs: ... class TestFlattenArgs: ... class TestRecoverArgs: ... class TestIntegration: ... class TestExtractSi...
def test_method_with_ignore_first(self): """Test extracting signature from method ignoring 'self' parameter.""" class TestClass: def test_method(self, a, b=20): return a + b params = extract_signature(TestClass.test_method, ignore_first=True) assert len(para...
test
0
{"function_name": "test_method_with_ignore_first", "class_name": "TestExtractSignature", "qualname": "TestExtractSignature.test_method_with_ignore_first", "file_path": "python/ray/_common/tests/test_signature.py", "repo_id": "ray-project/ray", "loc": 12, "tested_modules": ["typing", "ray._common.signature"], "has_docst...
ray-project/ray:python/ray/data/namespace_expressions/string_namespace.py:_StringNamespace.extract
# Context: from typing import TYPE_CHECKING, Any, Callable, Literal import pyarrow.compute as pc from ray.data.datatype import DataType def _create_str_udf(pc_func: Callable[..., pyarrow.Array], return_dtype: DataType) -> Callable[..., 'UDFExpr']: ... class _StringNamespace: def len(self) -> 'UDFExpr': ... de...
def extract(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr": """Extract a substring matching a regex pattern.""" return _create_str_udf(pc.extract_regex, DataType.string())( self._expr, pattern, *args, **kwargs )
function_simple
0
{"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "extract", "class_name": "_StringNamespace", "qualname": "_StringNamespace.extract", "file_path": "python/ray/data/namespace_expressions/string_namespace.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": ...
github/spec-kit:src/specify_cli/extensions.py:ExtensionManifest._load_yaml
# Context: from pathlib import Path import yaml class ExtensionError(Exception): ... class ValidationError(ExtensionError): ... class CompatibilityError(ExtensionError): ... class ExtensionRegistry: ... class ExtensionManager: ... def version_satisfies(current: str, required: str) -> bool: ... class CommandRegistrar: ...
def _load_yaml(self, path: Path) -> dict: """Load YAML file safely.""" try: with open(path, 'r') as f: return yaml.safe_load(f) or {} except yaml.YAMLError as e: raise ValidationError(f"Invalid YAML in {path}: {e}") except FileNotFoundError: ...
function_simple
0
{"cognitive_complexity": 3, "loc": 9, "code_loc": 7, "docstring_loc": 1, "function_name": "_load_yaml", "class_name": "ExtensionManifest", "qualname": "ExtensionManifest._load_yaml", "file_path": "src/specify_cli/extensions.py", "repo_id": "github/spec-kit", "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:src/transformers/models/dinov3_convnext/convert_dinov3_convnext_to_hf.py:convert_old_keys_to_new_keys
# Context: import re def get_dinov3_config(model_name: str) -> DINOv3ConvNextConfig: ... def prepare_img(): ... def get_transform(resize_size: int): ... def get_image_processor(resize_size: int): ... def convert_and_test_dinov3_checkpoint(args): ... # Task: Write a Python function `convert_old_keys_to_new_keys` to th...
def convert_old_keys_to_new_keys(state_dict_keys: dict | None = None): """ This function should be applied only once, on the concatenated keys to efficiently rename using the key mappings. """ output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) ...
function_complex
0
{"cognitive_complexity": 6, "loc": 16, "code_loc": 11, "docstring_loc": 4, "function_name": "convert_old_keys_to_new_keys", "class_name": null, "qualname": "convert_old_keys_to_new_keys", "file_path": "src/transformers/models/dinov3_convnext/convert_dinov3_convnext_to_hf.py", "repo_id": "huggingface/transformers", "has...
ray-project/ray:python/ray/data/tests/test_map_batches.py:test_map_batches_async_exception_propagation
# Context: import pytest import ray def process_timestamp_data(row): ... def process_timestamp_data_batch_arrow(batch: pa.Table) -> pa.Table: ... def process_timestamp_data_batch_pandas(batch: pd.DataFrame) -> pd.DataFrame: ... def test_map_batches_basic(ray_start_regular_shared, tmp_path, restore_data_context, target...
def test_map_batches_async_exception_propagation(shutdown_only): ray.shutdown() ray.init(num_cpus=2) class MyUDF: def __init__(self): pass async def __call__(self, batch): # This will trigger an assertion error. assert False yield batch ...
test
0
{"function_name": "test_map_batches_async_exception_propagation", "class_name": null, "qualname": "test_map_batches_async_exception_propagation", "file_path": "python/ray/data/tests/test_map_batches.py", "repo_id": "ray-project/ray", "loc": 21, "tested_modules": ["typing", "ray.data._internal.arrow_ops.transform_pyarro...
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/ci/nb2sh.py:_extract_config_path
# Context: def _is_shell_cell(source: str) -> bool: ... def _extract_bash_fences(source: str) -> list[str]: ... def _postprocess(lines: list[str]) -> list[str]: ... def nb2sh(notebook_path: str, output_path: str) -> None: ... # Task: Write a Python function `_extract_config_path` to return the config file path from d...
def _extract_config_path(flags: str) -> str: """Return the config file path from deploy-style CLI flags.""" m = _CONFIG_PATH_RE.search(flags) return m.group(1) if m else flags.strip()
function_simple
0
{"cognitive_complexity": 1, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "_extract_config_path", "class_name": null, "qualname": "_extract_config_path", "file_path": "doc/source/ray-overview/examples/multi_agent_a2a/ci/nb2sh.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "f...
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py:module_doc
Write a module-level docstring for the Python module `handler` which contains function `_handle_push_error`, class `PushNotificationHandler`.
Push notification (webhook) update mechanism handler.
documentation
0
{"doc_type": "module", "module_name": "handler", "file_path": "lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py", "repo_id": "crewAIInc/crewAI", "char_length": 53}
browser-use/browser-use:browser_use/llm/groq/serializer.py:GroqMessageSerializer.serialize
# Context: from groq.types.chat import ( ChatCompletionAssistantMessageParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartTextParam, ChatCompletionMessageParam, ChatCompletionMessageToolCallParam, ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ) from browser_use.llm.messages ...
def serialize(message: BaseMessage) -> ChatCompletionMessageParam: """Serialize a custom message to an OpenAI message param.""" if isinstance(message, UserMessage): user_result: ChatCompletionUserMessageParam = { 'role': 'user', 'content': GroqMessageSerializer._serialize_user_content(message.content), ...
function_complex
0
{"cognitive_complexity": 16, "loc": 43, "code_loc": 30, "docstring_loc": 1, "function_name": "serialize", "class_name": "GroqMessageSerializer", "qualname": "GroqMessageSerializer.serialize", "file_path": "browser_use/llm/groq/serializer.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level"...
ray-project/ray:python/ray/data/tests/test_dataset_limits.py:test_limit_no_redundant_read
# Context: import sys import pandas as pd import pyarrow as pa import pytest import ray from ray.data.block import BlockMetadata from ray.data.datasource.datasource import Datasource, ReadTask def test_limit_execution(ray_start_regular): ... def test_limit(ray_start_regular_shared, lazy): ... def test_limit_no_num_row...
def test_limit_no_redundant_read( ray_start_regular_shared, limit, ): # Test that dataset truncation eliminates redundant reads. @ray.remote class Counter: def __init__(self): self.count = 0 def increment(self): self.count += 1 def get(self): ...
test
0
{"function_name": "test_limit_no_redundant_read", "class_name": null, "qualname": "test_limit_no_redundant_read", "file_path": "python/ray/data/tests/test_dataset_limits.py", "repo_id": "ray-project/ray", "loc": 70, "tested_modules": ["ray.data.block", "ray.data.context", "ray.data.datasource.datasource", "ray.data.tes...
commaai/openpilot:selfdrive/ui/mici/onroad/torque_bar.py:arc_bar_pts
# Context: import math import time import numpy as np import pyray as rl def quantized_lru_cache(maxsize): ... class TorqueBar(Widget): ... # Task: Write a Python function `arc_bar_pts` to return Nx2 np.float32 points for a single closed polygon (rounded thick arc). Parameters: cx: float, cy: float, r_mid: float, th...
def arc_bar_pts(cx: float, cy: float, r_mid: float, thickness: float, a0_deg: float, a1_deg: float, *, max_points: int = 100, cap_segs: int = 10, cap_radius: float = 7, px_per_seg: float = 2.0) -> np.ndarray: """Return Nx2 np.float32 points for a single ...
function_complex
0
{"cognitive_complexity": 19, "loc": 98, "code_loc": 59, "docstring_loc": 1, "function_name": "arc_bar_pts", "class_name": null, "qualname": "arc_bar_pts", "file_path": "selfdrive/ui/mici/onroad/torque_bar.py", "repo_id": "commaai/openpilot", "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/lfx/tests/unit/run/test_base.py:TestRunFlowPythonScript.test_no_graph_variable_raises_error
# Context: import pytest from lfx.run.base import RunError, output_error, run_flow class TestRunError: ... class TestOutputError: ... class TestRunFlowInputValidation: ... class TestRunFlowFileValidation: ... class TestRunFlowJsonInput: ... class TestRunFlowStdinInput: ... class TestRunFlowGlobalVariables: ... class T...
async def test_no_graph_variable_raises_error(self, no_graph_script): """Test that a script without graph variable raises RunError.""" with pytest.raises(RunError) as exc_info: await run_flow(script_path=no_graph_script) assert "No 'graph' variable found" in str(exc_info.value)
test
1
{"function_name": "test_no_graph_variable_raises_error", "class_name": "TestRunFlowPythonScript", "qualname": "TestRunFlowPythonScript.test_no_graph_variable_raises_error", "file_path": "src/lfx/tests/unit/run/test_base.py", "repo_id": "langflow-ai/langflow", "loc": 5, "tested_modules": ["io", "lfx.run.base"], "has_doc...
browser-use/browser-use:browser_use/skills/service.py:SkillService.__init__
# Context: import os from typing import Any, Literal from browser_use_sdk import AsyncBrowserUse from browser_use.skills.views import ( MissingCookieException, Skill, ) class SkillService: async def async_init(self) -> None: ... async def get_skill(self, skill_id: str) -> Skill | None: ... async def get_...
def __init__(self, skill_ids: list[str | Literal['*']], api_key: str | None = None): """Initialize the skills service Args: skill_ids: List of skill IDs to fetch and cache, or ['*'] to fetch all available skills api_key: Browser Use API key (optional, will use env var if not provided) """ self.skill_ids ...
function_simple
0
{"cognitive_complexity": 2, "loc": 16, "code_loc": 7, "docstring_loc": 6, "function_name": "__init__", "class_name": "SkillService", "qualname": "SkillService.__init__", "file_path": "browser_use/skills/service.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"}
unclecode/crawl4ai:docs/examples/link_head_extraction_example.py:module_doc
Write a module-level docstring for the Python module `link_head_extraction_example` which contains various utilities.
Link Head Extraction & Scoring Example This example demonstrates Crawl4AI's advanced link analysis capabilities: 1. Basic link head extraction 2. Three-layer scoring system (intrinsic, contextual, total) 3. Pattern-based filtering 4. Multiple practical use cases Requirements: - crawl4ai installed - Internet connectio...
documentation
1
{"doc_type": "module", "module_name": "link_head_extraction_example", "file_path": "docs/examples/link_head_extraction_example.py", "repo_id": "unclecode/crawl4ai", "char_length": 372}
ray-project/ray:python/ray/train/v2/tests/test_config.py:test_storage_filesystem_repr
# Context: from ray.train import RunConfig, ScalingConfig def test_scaling_config_validation(): ... def test_scaling_config_accelerator_type(): ... def test_scaling_config_tpu_min_workers_multiple(): ... def test_scaling_config_default_workers(): ... # Task: Write a Python test function `test_storage_filesystem_repr`...
def test_storage_filesystem_repr(): """Test for https://github.com/ray-project/ray/pull/40851""" config = RunConfig(storage_filesystem=pyarrow.fs.S3FileSystem()) repr(config)
test
0
{"function_name": "test_storage_filesystem_repr", "class_name": null, "qualname": "test_storage_filesystem_repr", "file_path": "python/ray/train/v2/tests/test_config.py", "repo_id": "ray-project/ray", "loc": 4, "tested_modules": ["ray.train"], "has_docstring": true, "runnable_level": "file_runnable"}