input string | label int64 | sample_id string | category string | repo_id string |
|---|---|---|---|---|
"""Tests for channel plugin discovery, merging, and config compatibility."""
from __future__ import annotations
from types import SimpleNamespace
from unittest.mock import patch
import pytest
from nanobot.bus.events import OutboundMessage
from nanobot.bus.queue import MessageBus
from nanobot.channels.base import BaseChannel
from nanobot.channels.manager import ChannelManager
from nanobot.config.schema import ChannelsConfig
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
class _FakePlugin(BaseChannel):
name = "fakeplugin"
display_name = "Fake Plugin"
async def start(self) -> None:
pass
async def stop(self) -> None:
pass
async def send(self, msg: OutboundMessage) -> None:
pass
class _FakeTelegram(BaseChannel):
"""Plugin that tries to shadow built-in telegram."""
name = "telegram"
display_name = "Fake Telegram"
async def start(self) -> None:
pass
async def stop(self) -> None:
pass
async def send(self, msg: OutboundMessage) -> None:
pass
def _make_entry_point(name: str, cls: type):
"""Create a mock entry point that returns *cls* on load()."""
| 0 | HKUDS/nanobot:tests/test_channel_plugins.py | unknown | HKUDS/nanobot |
#!/usr/bin/env python3
"""
Skill Initializer - Creates a new skill from template
Usage:
init_skill.py <skill-name> --path <path> [--resources scripts,references,assets] [--examples]
Examples:
init_skill.py my-new-skill --path skills/public
init_skill.py my-new-skill --path skills/public --resources scripts,references
init_skill.py my-api-helper --path skills/private --resources scripts --examples
init_skill.py custom-skill --path /custom/location
"""
import argparse
import re
import sys
from pathlib import Path
MAX_SKILL_NAME_LENGTH = 64
ALLOWED_RESOURCES = {"scripts", "references", "assets"}
SKILL_TEMPLATE = """---
name: {skill_name}
description: [TODO: Complete and informative explanation of what the skill does and when to use it. Include WHEN to use this skill - specific scenarios, file types, or tasks that trigger it.]
---
# {skill_title}
## Overview
[TODO: 1-2 sentences explaining what this skill enables]
## Structuring This Skill
[TODO: Choose the structure that best fits this skill's purpose. Common patterns:
**1. Workflow-Based** (best for sequential processes)
| 0 | HKUDS/nanobot:nanobot/skills/skill-creator/scripts/init_skill.py | unknown | HKUDS/nanobot |
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from http.cookies import SimpleCookie
from typing import TYPE_CHECKING, Any
from starlette.applications import Starlette
from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import PlainTextResponse, RedirectResponse
from starlette.routing import Route
from starlette.testclient import TestClient
from streamlit.web.server.starlette import starlette_app_utils, starlette_auth_routes
from streamlit.web.server.starlette.starlette_auth_routes import (
| 1 | streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_auth_routes_test.py | test | streamlit/streamlit |
#!/usr/bin/env python3
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PaddleOCR Text Recognition Caller
Simple CLI wrapper for the PaddleOCR text recognition library.
Usage:
python scripts/paddleocr-text-recognition/ocr_caller.py --file-url "URL"
python scripts/paddleocr-text-recognition/ocr_caller.py --file-path "image.png" --pretty
"""
import argparse
import io
import json
import sys
import tempfile
import uuid
from datetime import datetime
from pathlib import Path
# Fix Windows console encoding
if sys.platform == "win32 | 0 | PaddlePaddle/PaddleOCR:skills/paddleocr-text-recognition/scripts/ocr_caller.py | unknown | PaddlePaddle/PaddleOCR |
import os
import re
from collections.abc import Iterable, Sequence
from typing import Any
from django.apps.config import AppConfig
from django.conf import settings
from django.core import checks
def check_required_settings(
app_configs: Sequence[AppConfig] | None,
databases: Sequence[str] | None,
**kwargs: Any,
) -> Iterable[checks.CheckMessage]:
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
required_settings = [
("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "zulip-admin@example.com"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
]
errors = []
for setting_name, default in required_settings:
if (
hasattr(settings, setting_name)
and getattr(settings, setting_name) != default
| 1 | zulip/zulip:zerver/checks.py | function_complex | zulip/zulip |
"""
Base classes for MLflow GenAI tools that can be used by judges.
This module provides the foundational interfaces for tools that judges can use
to enhance their evaluation capabilities.
"""
from abc import ABC, abstractmethod
from typing import Any
from mlflow.entities.trace import Trace
from mlflow.types.llm import ToolDefinition
from mlflow.utils.annotations import experimental
@experimental(version="3.4.0")
class JudgeTool(ABC):
"""
Abstract base class for tools that can be used by MLflow judges.
Tools provide additional capabilities to judges for analyzing traces,
performing calculations, or accessing external data sources during evaluation.
"""
@property
@abstractmethod
def name(self) -> str:
"""
Return the unique name of the tool.
Returns:
Tool name used for registration and invocation
"""
@abstractmethod
def get_definition(self) -> ToolDefinition:
"""
Get the tool definition in LiteLLM/OpenAI function calling format.
Returns:
ToolDefinition object containing the tool specification
"""
@abstractmethod
def invoke(self, trace: Trace, **kwargs) -> Any:
"""
Invoke the tool with the provided trace and arguments.
Args:
trace | 1 | mlflow/mlflow:mlflow/genai/judges/tools/base.py | documentation | mlflow/mlflow |
from typing import Any
def bubble_sort_iterative(collection: list[Any]) -> list[Any]:
"""Pure implementation of bubble sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered in ascending order
Examples:
>>> bubble_sort_iterative([0, 5, 2, 3, 2])
[0, 2, 2, 3, 5]
>>> bubble_sort_iterative([])
[]
>>> bubble_sort_iterative([-2, -45, -5])
[-45, -5, -2]
>>> bubble_sort_iterative([-23, 0, 6, -4, 34])
[-23, -4, 0, 6, 34]
>>> bubble_sort_iterative([1, 2, 3, 4])
[1, 2, 3, 4]
>>> bubble_sort_iterative([3, 3, 3, 3])
[3, 3, 3, 3]
>>> bubble_sort_iterative([56])
[56]
>>> bubble | 0 | TheAlgorithms/Python:sorts/bubble_sort.py | unknown | TheAlgorithms/Python |
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Evaluate Transcription API correctness by computing Word Error Rate (WER)
on a given ASR dataset. When provided, it will also compare the WER against
a baseline.
This simulates real work usage of the API and makes sure that the frontend and
AsyncLLMEngine are working correctly.
"""
import asyncio
import io
import time
from statistics import mean, median
import librosa
import pytest
import soundfile
import torch
from datasets import load_dataset
from evaluate import load
from vllm.tokenizers import get_tokenizer
from ....models.registry import HF_EXAMPLE_MODELS
from ....utils import RemoteOpenAIServer
def to_bytes(y, sr):
buffer = io.BytesIO()
soundfile.write(buffer, y, sr, format="WAV")
buffer.seek(0)
return buffer
async def transcribe_audio(client, tokenizer, y, sr):
# Send loaded audio directly instead of loading from disk,
# don't account for that time though
with to_bytes(y, sr) as f:
start_time = time.perf_counter()
| 0 | vllm-project/vllm:tests/entrypoints/openai/correctness/test_transcription_api_correctness.py | unknown | vllm-project/vllm |
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
# Get PR title from environment
pr_title = os.environ.get("PR_TITLE", "").strip()
# Define rules
allowed_modules = ["fsdp", "megatron", "veomni", "sglang", "vllm", "trtllm", "rollout", "trainer"]
allowed_modules += ["tests", "training_utils", "recipe", "hardware", "deployment"]
allowed_modules += ["ray", "worker", "single_controller", "misc", "docker", "ci"]
allowed_modules += ["perf", "model", " | 0 | verl-project/verl:tests/special_sanity/check_pr_title.py | unknown | verl-project/verl |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from typing import Optional
class PeftType(str, enum.Enum):
"""
Enum class for the different types of adapters in PEFT.
"""
PROMPT_TUNING = "PROMPT_TUNING"
MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING"
P_TUNING = "P_TUNING"
PREFIX_TUNING = "PREFIX_TUNING"
LORA = "LORA"
ADALORA = "ADALORA"
BOFT = "BOFT"
ADAPTION_PROMPT = "AD | 0 | huggingface/peft:src/peft/utils/peft_types.py | unknown | huggingface/peft |
#!/usr/bin/env python3
"""Run trigger evaluation for a skill description.
Tests whether a skill's description causes Claude to trigger (read the skill)
for a set of queries. Outputs results as JSON.
"""
import argparse
import json
import os
import select
import subprocess
import sys
import time
import uuid
from concurrent.futures import ProcessPoolExecutor, as_completed
from pathlib import Path
from scripts.utils import parse_skill_md
def find_project_root() -> Path:
"""Find the project root by walking up from cwd looking for .claude/.
Mimics how Claude Code discovers its project root, so the command file
we create ends up where claude -p will look for it.
"""
current = Path.cwd()
for parent in [current, *current.parents]:
if (parent / ".claude").is_dir():
return parent
return current
def run_single_query(
query: str,
skill_name: str,
skill_description: str,
timeout: int,
project_root: str,
model: str | None = None,
) -> bool:
"""Run a single query and return whether the skill was triggered.
Creates a command file in .claude | 0 | davila7/claude-code-templates:cli-tool/components/skills/development/skill-creator/scripts/run_eval.py | unknown | davila7/claude-code-templates |
from __future__ import annotations
from abc import abstractmethod
from collections.abc import AsyncIterator, Callable, Iterator, Sequence
from typing import Any, Generic, Literal, cast, overload
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.runnables.graph import Graph as DrawableGraph
from typing_extensions import Self
from langgraph.types import (
All,
Command,
GraphOutput,
StateSnapshot,
StateUpdate,
StreamMode,
StreamPart,
)
from langgraph.typing import ContextT, InputT, OutputT, StateT
__all__ = ("PregelProtocol", "StreamProtocol")
class PregelProtocol(Runnable[InputT, Any], Generic[StateT, ContextT, InputT, OutputT]):
@abstractmethod
def with_config(
self, config: RunnableConfig | None = None, **kwargs: Any
) -> Self: ...
@abstractmethod
def get_graph(
self,
config: RunnableConfig | None = None,
*,
xray: int | bool = False,
) -> DrawableGraph: ...
@abstractmethod
async def aget_graph(
self,
config: Runnable | 0 | langchain-ai/langgraph:libs/langgraph/langgraph/pregel/protocol.py | unknown | langchain-ai/langgraph |
#!/usr/bin/env python3
"""Fetch num_key_value_heads from HuggingFace config.json and update TOML model cards.
Usage:
# Update only cards missing num_key_value_heads
uv run python scripts/fetch_kv_heads.py --missing
# Update all cards (overwrite existing values)
uv run python scripts/fetch_kv_heads.py --all
"""
from __future__ import annotations
import argparse
import json
import sys
import urllib.request
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
import tomlkit
CARDS_DIR = (
Path(__file__).resolve().parent.parent / "resources" / "inference_model_cards"
)
MAX_WORKERS = 5
def fetch_kv_heads(model_id: str) -> int | None:
"""Fetch num_key_value_heads from HuggingFace config.json."""
url = f"https://huggingface.co/{model_id}/raw/main/config.json"
try:
with urllib.request.urlopen(url, timeout=15) as resp:
config = json.loads(resp.read())
except Exception as e:
print(f" ERROR fetching {url}: {e}", file=sys.stderr)
return None
for source in [config | 0 | exo-explore/exo:scripts/fetch_kv_heads.py | unknown | exo-explore/exo |
"""
Functions related to generating headers and fingerprints generally
"""
from functools import lru_cache
from platform import system as platform_system
from browserforge.headers import Browser, HeaderGenerator
from browserforge.headers.generator import SUPPORTED_OPERATING_SYSTEMS
from scrapling.core._types import Dict, Literal, Tuple
__OS_NAME__ = platform_system()
OSName = Literal["linux", "macos", "windows"]
# Current versions hardcoded for now (Playwright doesn't allow to know the version of a browser without launching it)
chromium_version = 145
chrome_version = 145
@lru_cache(1, typed=True)
def get_os_name() -> OSName | Tuple:
"""Get the current OS name in the same format needed for browserforge, if the OS is Unknown, return None so browserforge uses all.
:return: Current OS name or `None` otherwise
"""
match __OS_NAME__: # pragma: no cover
case "Linux":
return "linux"
case "Darwin":
return "macos"
case "Windows":
return "windows"
case _:
return SUPPORTED_OPERATING_SYSTEMS
def generate_headers(browser_mode: bool | | 0 | D4Vinci/Scrapling:scrapling/engines/toolbelt/fingerprints.py | unknown | D4Vinci/Scrapling |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
from lfx.graph.vertex.base import Vertex
from lfx.log.logger import logger
from lfx.processing.utils import validate_and_repair_json
from pydantic import BaseModel
from langflow.schema.graph import InputValue, Tweaks
from langflow.schema.schema import INPUT_FIELD_NAME
from langflow.services.deps import get_settings_service
if TYPE_CHECKING:
from lfx.events.event_manager import EventManager
from lfx.graph.graph.base import Graph
from lfx.graph.schema import RunOutputs
from lfx.schema.schema import InputValueRequest
class Result(BaseModel):
result: Any
session_id: str
async def run_graph_internal(
graph: Graph,
flow_id: str,
*,
stream: bool = False,
session_id: str | None = None,
inputs: list[InputValueRequest] | None = None,
outputs: list[str] | None = None,
event_manager: EventManager | None = None,
) -> tuple[list[RunOutputs], str]:
"""Run the graph and generate the result."""
inputs = inputs or []
effective_session_id = session_id | 0 | langflow-ai/langflow:src/backend/base/langflow/processing/process.py | unknown | langflow-ai/langflow |
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
import pytest
import scrapy
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.extensions.feedexport import FeedExporter
from scrapy.utils.test import get_crawler
class TestURIParams(ABC):
spider_name = "uri_params_spider"
deprecated_options = False
@abstractmethod
def build_settings(self, uri="file:///tmp/foobar", uri_params=None):
raise NotImplementedError
def _crawler_feed_exporter(self, settings):
if self.deprecated_options:
with pytest.warns(
ScrapyDeprecationWarning,
match="The `FEED_URI` and `FEED_FORMAT` settings have been deprecated",
):
crawler = get_crawler(settings_dict=settings)
else:
crawler = get_crawler(settings_dict=settings)
feed_exporter = crawler.get_extension(FeedExporter)
return crawler, feed_exporter
def test_default(self):
settings = self.build_settings(
uri="file:///tmp/%(name)s",
)
crawler, feed_exporter = self._crawler_feed_exporter(settings)
spider = scrapy.Spider(self.spider_name)
spider.crawler = | 0 | scrapy/scrapy:tests/test_feedexport_uri_params.py | unknown | scrapy/scrapy |
"""SSE Polling Demo Client
Demonstrates the client-side auto-reconnect for SSE polling pattern.
This client connects to the SSE Polling Demo server and calls process_batch,
which triggers periodic server-side stream closes. The client automatically
reconnects using Last-Event-ID and resumes receiving messages.
Run with:
# First start the server:
uv run mcp-sse-polling-demo --port 3000
# Then run this client:
uv run mcp-sse-polling-client --url http://localhost:3000/mcp
"""
import asyncio
import logging
import click
from mcp import ClientSession
from mcp.client.streamable_http import streamable_http_client
async def run_demo(url: str, items: int, checkpoint_every: int) -> None:
"""Run the SSE polling demo."""
print(f"\n{'=' * 60}")
print("SSE Polling Demo Client")
print(f"{'=' * 60}")
print(f"Server URL: {url}")
print(f"Processing {items} items with checkpoints every {checkpoint_every}")
print(f"{'=' * 60}\n")
async with stream | 1 | modelcontextprotocol/python-sdk:examples/clients/sse-polling-client/mcp_sse_polling_client/main.py | function_simple | modelcontextprotocol/python-sdk |
import abc
import contextlib
import datetime
from collections.abc import AsyncIterator
from typing import Literal, TypeVar
import httpx
import mcp.types
from mcp import ClientSession
from mcp.client.session import (
ElicitationFnT,
ListRootsFnT,
LoggingFnT,
MessageHandlerFnT,
SamplingFnT,
)
from typing_extensions import TypedDict, Unpack
# TypeVar for preserving specific ClientTransport subclass types
ClientTransportT = TypeVar("ClientTransportT", bound="ClientTransport")
class SessionKwargs(TypedDict, total=False):
"""Keyword arguments for the MCP ClientSession constructor."""
read_timeout_seconds: datetime.timedelta | None
sampling_callback: SamplingFnT | None
sampling_capabilities: mcp.types.SamplingCapability | None
list_roots_callback: ListRootsFnT | None
logging_callback: LoggingFnT | None
elicitation_callback: ElicitationFnT | None
message_handler: MessageHandlerFnT | None
client_info: mcp.types.Implementation | None
class ClientTransport(abc.ABC):
"""
Abstract base class for different MCP client transport mechanisms.
A Transport | 1 | PrefectHQ/fastmcp:src/fastmcp/client/transports/base.py | function_simple | PrefectHQ/fastmcp |
from django.utils.timezone import now as timezone_now
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.models import UserProfile
from zerver.models.devices import Device
from zerver.tornado.django_api import send_event_on_commit
def do_register_push_device(
user_profile: UserProfile,
device: Device,
*,
token_kind: str,
push_key_bytes: bytes,
push_key_id: int,
token_id_int: int,
token_id_base64: str,
) -> None:
registered_at = timezone_now()
device.push_key = push_key_bytes
device.push_key_id = push_key_id
device.pending_push_token_id = token_id_int
device.push_token_kind = token_kind
device.push_token_last_updated_timestamp = registered_at
device.push_registration_error_code = None
device.save(
update_fields=[
"push_key",
"push_key_id",
"pending_push_token_id",
"push_token_kind",
"push_token_last_updated_timestamp",
"push_registration_error_code",
]
)
event = dict(
type="device",
op="update",
device_id=device.id,
push_key_id=device.push_key_id,
| 1 | zulip/zulip:zerver/actions/push_notifications.py | function_simple | zulip/zulip |
"""Tool search — deferred tool discovery at runtime.
Contains:
- DeferredToolRegistry: stores deferred tools and handles regex search
- tool_search: the LangChain tool the agent calls to discover deferred tools
The agent sees deferred tool names in <available-deferred-tools> but cannot
call them until it fetches their full schema via the tool_search tool.
Source-agnostic: no mention of MCP or tool origin.
"""
import json
import logging
import re
from dataclasses import dataclass
from langchain.tools import BaseTool
from langchain_core.tools import tool
from langchain_core.utils.function_calling import convert_to_openai_function
logger = logging.getLogger(__name__)
MAX_RESULTS = 5 # Max tools returned per search
# ── Registry ──
@dataclass
class DeferredToolEntry:
"""Lightweight metadata for a deferred tool (no full schema in context)."""
name: str
description: str
tool: BaseTool # Full tool object, returned only on search match
class DeferredToolRegistry:
"""Registry of deferred tools, searchable by regex pattern."""
def __init__(self):
self._entries: list[DeferredToolEntry] = []
def register(self | 0 | bytedance/deer-flow:backend/packages/harness/deerflow/tools/builtins/tool_search.py | unknown | bytedance/deer-flow |
#!/usr/bin/env python3
"""
Quick validation script for skills - minimal version
"""
import sys
import os
import re
import yaml
from pathlib import Path
def validate_skill(skill_path):
"""Basic validation of a skill"""
skill_path = Path(skill_path)
# Check SKILL.md exists
skill_md = skill_path / 'SKILL.md'
if not skill_md.exists():
return False, "SKILL.md not found"
# Read and validate frontmatter
content = skill_md.read_text()
if not content.startswith('---'):
return False, "No YAML frontmatter found"
# Extract frontmatter
match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL)
if not match:
return False, "Invalid frontmatter format"
frontmatter_text = match.group(1)
# Parse YAML frontmatter
try:
frontmatter = yaml.safe_load(frontmatter_text)
if not isinstance(frontmatter, dict):
return False, "Frontmatter must be a YAML dictionary"
except yaml.YAMLError as e:
return False, f"Invalid YAML in frontmatter: {e}"
# Define allowed properties | 0 | davila7/claude-code-templates:cli-tool/components/skills/development/skill-creator/scripts/quick_validate.py | unknown | davila7/claude-code-templates |
import gradio as gr
with gr.Blocks() as demo:
with gr.HTML(html_template='''
<button class="maximize">⛶</button>
<h2>${form_name}</h2>
@children
<button class="submit">Submit</button>
''', css_template='''
border: 2px solid gray;
border-radius: 12px;
padding: 20px;
.maximize {
position: absolute;
top: 10px;
right: 10px;
background: none;
border: none;
z-index: 1000;
}
''', js_on_load='''
element.querySelector('.submit').addEventListener('click', () => {
trigger('submit');
});
element.querySelector('.maximize').addEventListener('click', () => {
element.requestFullscreen();
});
''', form_name="Custom Form") as form:
name = gr.Textbox(label="Name")
email = gr.Textbox(label="Email")
output = gr.Textbox(label="Output")
form.submit(lambda name, email: f"Name: {name}, Email: {email}", | 1 | gradio-app/gradio:demo/html_children/run.py | documentation | gradio-app/gradio |
from blake3 import blake3
from typing import IO
import os
import asyncio
DEFAULT_CHUNK = 8 * 1024 *1024 # 8MB
# NOTE: this allows hashing different representations of a file-like object
def blake3_hash(
fp: str | IO[bytes],
chunk_size: int = DEFAULT_CHUNK,
) -> str:
"""
Returns a BLAKE3 hex digest for ``fp``, which may be:
- a filename (str/bytes) or PathLike
- an open binary file object
If ``fp`` is a file object, it must be opened in **binary** mode and support
``read``, ``seek``, and ``tell``. The function will seek to the start before
reading and will attempt to restore the original position afterward.
"""
# duck typing to check if input is a file-like object
if hasattr(fp, "read"):
return _hash_file_obj(fp, chunk_size)
with open(os.fspath(fp), "rb") as f:
return _hash_file_obj(f, chunk_size)
async def blake3_hash_async(
fp: str | IO[bytes],
chunk | 1 | Comfy-Org/ComfyUI:app/assets/hashing.py | function_complex | Comfy-Org/ComfyUI |
import re
from pathlib import Path
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, constr
from utils.function_catalog import get_function_catalog
from utils.function_manager import FUNCTION_CALLING_DIR
router = APIRouter()
class LocalToolCreateRequest(BaseModel):
filename: constr(strip_whitespace=True, min_length=1, max_length=255)
content: str
overwrite: bool = False
@router.get("/api/tools/local")
def list_local_tools():
catalog = get_function_catalog()
metadata = catalog.list_metadata()
tools = []
for name, meta in metadata.items():
tools.append(
{
"name": name,
"description": meta.description,
"parameters": meta.parameters_schema,
"module": meta.module_name,
"file_path": meta.file_path,
}
)
tools.sort(key=lambda item: item["name"])
return {
"success": True,
"count": len(tools),
"tools": tools,
"load_error": str(catalog.load_error) if catalog.load_error else None,
}
@router.post("/api/tools/local")
def create_local_tool(payload: LocalToolCreateRequest):
raw_name = | 0 | OpenBMB/ChatDev:server/routes/tools.py | unknown | OpenBMB/ChatDev |
# Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
# SM120 (Blackwell GeForce / DGX Spark) forward pass.
#
# SM120 uses the same SM80-era MMA instructions (mma.sync.aligned.m16n8k16) but has
# a smaller shared memory capacity (99 KB vs 163 KB on SM80). This module subclasses
# FlashAttentionForwardSm80 and overrides the SMEM capacity check accordingly.
import cutlass
import cutlass.utils as utils_basic
from flash_attn.cute.flash_fwd import FlashAttentionForwardSm80
class FlashAttentionForwardSm120(FlashAttentionForwardSm80):
# Keep arch = 80 to use CpAsync code paths (no TMA for output).
# The compilation target is determined by the GPU at compile time, not this field.
arch = 80
@staticmethod
def can_implement(
dtype,
head_dim,
head_dim_v,
tile_m,
tile_n,
num_stages,
num_threads | 0 | Dao-AILab/flash-attention:flash_attn/cute/flash_fwd_sm120.py | unknown | Dao-AILab/flash-attention |
#! /usr/bin/env python3
import argparse
import os
import subprocess
import sys
def run(cmd, *, cwd=None, env=None, dry_run=True):
"""Run a command with optional dry-run behavior."""
environ = os.environ.copy()
if env:
environ.update(env)
if dry_run:
print("[DRY RUN]", " ".join(cmd))
else:
print("[EXECUTE]", " ".join(cmd))
try:
result = subprocess.check_output(
cmd, cwd=cwd, env=environ, stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
result = e.output
print(" [ERROR]", result)
raise
else:
print(" [RESULT]", result)
return result.decode().strip()
def validate_env(checkout_dir):
if not checkout_dir:
sys.exit("Error: checkout directory not provided (--checkout-dir).")
if not os.path.exists(checkout_dir):
sys.exit(f"Error: checkout directory '{checkout_dir}' does not exist.")
if not os.path.isdir(checkout_dir):
sys.exit(f"Error: '{checkout_dir}' is not a directory.")
def get_remote_branches | 1 | django/django:scripts/archive_eol_stable_branches.py | function_complex | django/django |
#!/usr/bin/env python3
__package__ = 'archivebox.cli'
import os
import rich_click as click
from rich import print
from archivebox.misc.util import docstring, enforce_types
@enforce_types
def install(binaries: tuple[str, ...] = (), binproviders: str = '*', dry_run: bool = False) -> None:
"""Detect and install ArchiveBox dependencies by running a dependency-check crawl
Examples:
archivebox install # Install all dependencies
archivebox install wget curl # Install only wget and curl
archivebox install --binproviders=pip yt-dlp # Install yt-dlp using only pip
archivebox install --binproviders=brew,apt # Install all deps using only brew or apt
"""
from archivebox.config.permissions import IS_ROOT, ARCHIVEBOX_USER, ARCHIVEBOX_GROUP
from archivebox.config.paths import ARCHIVE_DIR
from archivebox.misc.logging import stderr
from archivebox.cli.archivebox_init import init
if not (os.access(ARCHIVE_DIR, os.R_OK) and ARCHIVE_DIR.is_dir()):
init() # must init full index because we need a db to store Binary entries in
| 0 | ArchiveBox/ArchiveBox:archivebox/cli/archivebox_install.py | unknown | ArchiveBox/ArchiveBox |
from collections.abc import Generator
from functools import cache
from typing import Any
from mlx_lm.models.deepseek_v32 import Model as DeepseekV32Model
from mlx_lm.models.gpt_oss import Model as GptOssModel
from mlx_lm.tokenizer_utils import TokenizerWrapper
from openai_harmony import ( # pyright: ignore[reportMissingTypeStubs]
HarmonyEncodingName,
HarmonyError, # pyright: ignore[reportUnknownVariableType]
Role,
StreamableParser,
load_harmony_encoding,
)
from exo.shared.types.api import ToolCallItem
from exo.shared.types.common import ModelId
from exo.shared.types.mlx import Model
from exo.shared.types.worker.runner_response import GenerationResponse, ToolCallResponse
from exo.worker.engines.mlx.utils_mlx import (
detect_thinking_prompt_suffix,
)
from exo.worker.runner.bootstrap import logger
from exo.worker.runner.llm_inference.tool_parsers import ToolParser
@cache
def get_gpt_oss_encoding():
encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
return encoding
def apply_all_parsers | 0 | exo-explore/exo:src/exo/worker/runner/llm_inference/model_output_parsers.py | unknown | exo-explore/exo |
import math, os
if __name__ == "__main__":
os.environ["DEFAULT_FLOAT"] = "bfloat16"
os.environ["OPTIM_DTYPE"] = "bfloat16"
os.environ["DEV"] = "NULL"
from tinygrad import Tensor, nn, function, getenv, dtypes, TinyJit
from tinygrad.helpers import Timing, colored, GlobalCounters
from extra.models.llama import apply_rotary_emb, precompute_freqs_cis
def rmsnorm(x_in:Tensor, eps:float):
x = x_in.float()
x = x * (x.square().mean(-1, keepdim=True) + eps).rsqrt()
return x.cast(x_in.dtype)
class FlatTransformer:
def __init__(self, dim:int, hidden_dim:int, n_heads:int, n_layers:int, norm_eps:float, vocab_size:int, n_kv_heads:int|None=None,
rope_theta:int=10000, max_context:int=1024):
self.vocab_size = vocab_size
self.n_layers = n_layers
self.n_heads = n_heads
self.n_kv_heads = n_kv_heads if n_kv_heads is not None | 0 | tinygrad/tinygrad:examples/mlperf/models/flat_llama.py | unknown | tinygrad/tinygrad |
"""
Recursive Language Model (RLM) module for DSPy.
RLMs are an inference strategy where LLMs treat long contexts as part of an external
environment rather than feeding them directly to the model. The LLM writes Python code
to programmatically examine, decompose, and recursively call sub-LLMs over snippets.
Reference: "Recursive Language Models" (Zhang, Kraska, Khattab, 2025)
"""
from __future__ import annotations
import logging
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Callable, Iterator
import pydantic
import dspy
from dspy.adapters.types.tool import Tool
from dspy.adapters.utils import parse_value, translate_field_type
from dspy.primitives.code_interpreter import SIMPLE_TYPES, CodeInterpreter, CodeInterpreterError, FinalOutput
from dspy.primitives.module import Module
from dspy.primitives.prediction import Prediction
from dspy.primitives.python_interpreter import PythonInterpreter
from dspy.primitives.repl_types import REPLEntry, REPLHistory, REPLVariable
from dspy.signatures.signature import ensure_signature
from dspy.utils.annotation import | 1 | stanfordnlp/dspy:dspy/predict/rlm.py | function_complex | stanfordnlp/dspy |
from __future__ import annotations
from typing import Any
from unittest import mock
import pytest
from testfixtures import LogCapture
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.http import Response, TextResponse, XmlResponse
from scrapy.settings import Settings
from scrapy.spiders import CSVFeedSpider, Spider, XMLFeedSpider
from scrapy.spiders.init import InitSpider
from scrapy.utils.test import get_crawler, get_reactor_settings
from tests import get_testdata
from tests.utils.decorators import coroutine_test, inline_callbacks_test
class TestSpider:
spider_class = Spider
def test_base_spider(self):
spider = self.spider_class("example.com")
assert spider.name == "example.com"
assert spider.start_urls == [] # pylint: disable=use-implicit-booleaness-not-comparison
def test_spider_args(self):
"""``__init__`` method arguments are assigned to spider attributes"""
spider = self.spider_class("example.com", foo="bar")
assert spider.foo == "bar"
def test_spider_without_name(self):
"""``__init__`` method arguments are assigned to spider attributes"""
msg = "must have a name"
| 0 | scrapy/scrapy:tests/test_spider.py | unknown | scrapy/scrapy |
#!/usr/bin/env python3
"""
Shader Blueprint Updater
Syncs GLSL shader files between this folder and blueprint JSON files.
File naming convention:
{Blueprint Name}_{node_id}.frag
Usage:
python update_blueprints.py extract # Extract shaders from JSONs to here
python update_blueprints.py patch # Patch shaders back into JSONs
python update_blueprints.py # Same as patch (default)
"""
import json
import logging
import sys
import re
from pathlib import Path
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger(__name__)
GLSL_DIR = Path(__file__).parent
BLUEPRINTS_DIR = GLSL_DIR.parent
def get_blueprint_files():
"""Get all blueprint JSON files."""
return sorted(BLUEPRINTS_DIR.glob("*.json"))
def sanitize_filename(name):
"""Convert blueprint name to safe filename."""
return re.sub(r'[^\w\-]', '_', name)
def extract_shaders():
"""Extract all shaders from blueprint JSONs to this folder."""
extracted = 0
for json_path in get_blueprint_files():
blueprint_name = json_path.stem
try:
with open(json_path, 'r') as | 1 | Comfy-Org/ComfyUI:blueprints/.glsl/update_blueprints.py | function_complex | Comfy-Org/ComfyUI |
"""
Interactive TemperatureGrid sensor visualization with keyboard teleop.
A platform has a temperature grid sensor on its top surface. Move a "hot" pusher
and drop objects onto the platform; the grid shows temperature (blue=cool, red=hot)
from contact-based blending of each body's base_temperature and conductivity.
"""
import argparse
import os
import numpy as np
import genesis as gs
from genesis.utils.misc import tensor_to_array
from genesis.vis.keybindings import Key, KeyAction, Keybind
# Teleop
KEY_DPOS = 0.08
KEY_DPOS_Z = 0.01
FORCE_SCALE = 100.0
PUSHER_SIZE = 0.1
# Temperature grid
GRID_SIZE = (10, 10, 1)
# Objects
SANDBOX_SIZE = 1.5
WALL_THICKNESS = 0.08
WALL_HEIGHT = 0.3
PLATFORM_HEIGHT = 0.1
OBJ_Z = PLATFORM_HEIGHT * 1.4
OBJ_SIZE = 0.1
def main():
parser = argparse.ArgumentParser(description="Interactive TemperatureGrid sensor visualization")
parser.add_argument("--vis", "-v", action=" | 0 | Genesis-Embodied-AI/Genesis:examples/sensors/temperature_grid.py | unknown | Genesis-Embodied-AI/Genesis |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _, bold
from frappe.model.document import Document
from frappe.utils import (
add_days,
cint,
comma_and,
flt,
formatdate,
get_link_to_form,
get_time,
get_url_to_form,
getdate,
time_diff_in_hours,
time_diff_in_seconds,
to_timedelta,
)
from frappe.utils.data import DateTimeLikeObject
from erpnext.support.doctype.issue.issue import get_holidays
class WorkstationHolidayError(frappe.ValidationError):
pass
class NotInWorkingHoursError(frappe.ValidationError):
pass
class OverlapError(frappe.ValidationError):
pass
class Workstation(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
from erpnext.manufacturing.doctype.workstation_cost.workstation_cost import WorkstationCost
from erpnext.manufacturing.doctype.workstation_working_hour.workstation_working_hour import | 0 | frappe/erpnext:erpnext/manufacturing/doctype/workstation/workstation.py | unknown | frappe/erpnext |
"""Abstract base class for sandbox state persistence.
The state store handles cross-process persistence of thread_id → sandbox mappings,
enabling different processes (gateway, langgraph, multiple workers) to find the same
sandbox for a given thread.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Generator
from contextlib import contextmanager
from .sandbox_info import SandboxInfo
class SandboxStateStore(ABC):
"""Abstract base for persisting thread_id → sandbox mappings across processes.
Implementations:
- FileSandboxStateStore: JSON files + fcntl file locking (single-host)
- TODO: RedisSandboxStateStore: Redis-based for distributed multi-host deployments
"""
@abstractmethod
def save(self, thread_id: str, info: SandboxInfo) -> None:
"""Save sandbox state for a thread.
Args:
thread_id: The thread ID.
info: Sandbox metadata to persist.
"""
...
@abstractmethod
def load(self, thread_id: str) -> SandboxInfo | None:
"""Load sandbox state for a thread.
Args:
thread_id: The thread ID.
Returns:
SandboxInfo if found, None otherwise.
| 1 | bytedance/deer-flow:backend/src/community/aio_sandbox/state_store.py | documentation | bytedance/deer-flow |
import functools
import math
import operator
import re
import warnings
def _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size, stride, dilation_rate, padding, output_padding
):
"""Convert the padding arguments from Keras to the ones used by JAX.
JAX starts with an shape of size `(input-1) * stride - kernel_size + 2`,
then adds `left_pad` on the left, and `right_pad` on the right.
In Keras, the `padding` argument determines a base shape, to which
`output_padding` is added on the right. If `output_padding` is None, it will
be given a default value.
"""
if padding.lower() not in {"valid", "same"}:
raise ValueError(
f"The `padding` argument must be one of 'valid', 'same'. "
f"Received: padding={padding}"
)
kernel_size = (kernel_size - 1) * dilation_rate + 1
if padding.lower() == "valid":
# If output_padding is None, we fill it so that the shape of the output
# is `(input-1)*s | 0 | keras-team/keras:keras/src/backend/common/backend_utils.py | unknown | keras-team/keras |
"""Async client for managing recurrent runs (cron jobs) in LangGraph."""
from __future__ import annotations
import warnings
from collections.abc import Mapping, Sequence
from datetime import datetime
from typing import Any
from langgraph_sdk._async.http import HttpClient
from langgraph_sdk.schema import (
All,
Config,
Context,
Cron,
CronSelectField,
CronSortBy,
Durability,
Input,
OnCompletionBehavior,
QueryParamTypes,
Run,
SortOrder,
StreamMode,
)
class CronClient:
"""Client for managing recurrent runs (cron jobs) in LangGraph.
A run is a single invocation of an assistant with optional input, config, and context.
This client allows scheduling recurring runs to occur automatically.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024"))
cron_job = await client.crons.create_for_thread(
thread_id="thread_123",
assistant_id="asst_456",
schedule="0 9 * * *",
input={"message": "Daily update"}
)
```
!!! note "Feature Availability"
The cr | 1 | langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_async/cron.py | documentation | langchain-ai/langgraph |
# encoding:utf-8
import json
import time
import requests
from models.bot import Bot
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from .doubao_session import DoubaoSession
# Doubao (火山方舟 / Volcengine Ark) API Bot
class DoubaoBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(DoubaoSession, model=conf().get("model") or "doubao-seed-2-0-pro-260215")
model = conf().get("model") or "doubao-seed-2-0-pro-260215"
self.args = {
"model": model,
"temperature": conf().get("temperature", 0.8),
"top_p": conf().get("top_p", 1.0),
}
self.api_key = conf().get("ark_api_key")
self.base_url = conf().get("ark_base_url", "https://ark.cn-beijing.volces.com/api/v | 1 | zhayujie/chatgpt-on-wechat:models/doubao/doubao_bot.py | function_complex | zhayujie/chatgpt-on-wechat |
from __future__ import annotations
import bz2
import gzip
import lzma
import marshal
import pickle
import sys
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, Any
import pytest
from scrapy.utils.test import get_crawler
from tests.test_feedexport import TestFeedExportBase, path_to_url, printf_escape
from tests.utils.decorators import coroutine_test
if TYPE_CHECKING:
from scrapy import Spider
class TestFeedPostProcessedExports(TestFeedExportBase):
items = [{"foo": "bar"}]
expected = b"foo\r\nbar\r\n"
class MyPlugin1:
def __init__(self, file, feed_options):
self.file = file
self.feed_options = feed_options
self.char = self.feed_options.get("plugin1_char", b"")
def write(self, data):
written_count = self.file.write(data)
written_count += self.file.write(self.char)
return written_count
def close(self):
self.file.close()
def _named_tempfile(self, name) -> str:
return str(Path(self.temp_dir, name))
async def run_and_export(
self, spider_cls: type[ | 0 | scrapy/scrapy:tests/test_feedexport_postprocess.py | unknown | scrapy/scrapy |
#!/usr/bin/env python
"""
Example script demonstrating the integration of MinerU parser with RAGAnything
This example shows how to:
1. Process parsed documents with RAGAnything
2. Perform multimodal queries on the processed documents
3. Handle different types of content (text, images, tables)
"""
import os
import argparse
import asyncio
import logging
import logging.config
from pathlib import Path
# Add project root directory to Python path
import sys
sys.path.append(str(Path(__file__).parent.parent))
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc, logger, set_verbose_debug
from raganything import RAGAnything, RAGAnythingConfig
def configure_logging():
"""Configure logging for the application"""
# Get log directory path from environment variable or use current directory
log_dir = os.getenv("LOG_DIR", os.getcwd())
log_file_path = os.path.abspath(os.path.join(log_dir, "raganything_example.log"))
print(f"\nRAGAnything example log file: {log_file_path}\n")
os.makedirs(os.path.dirname(log_dir), exist_ok=True)
# Get log file max size and backup count | 1 | HKUDS/LightRAG:examples/raganything_example.py | function_complex | HKUDS/LightRAG |
from django.db import NotSupportedError
from django.db.models.expressions import Func
from django.db.models.fields import UUIDField
class UUID4(Func):
function = "UUIDV4"
arity = 0
output_field = UUIDField()
def as_sql(self, compiler, connection, **extra_context):
if connection.features.supports_uuid4_function:
return super().as_sql(compiler, connection, **extra_context)
raise NotSupportedError("UUID4 is not supported on this database backend.")
def as_postgresql(self, compiler, connection, **extra_context):
if connection.features.is_postgresql_18:
return self.as_sql(compiler, connection, **extra_context)
return self.as_sql(
compiler, connection, function="GEN_RANDOM_UUID", **extra_context
)
def as_mysql(self, compiler, connection, **extra_context):
if not connection.features.supports_uuid4_function:
if connection.mysql_is_mariadb:
raise NotSupportedError("UUID4 requires MariaDB version 11.7 or later.")
raise NotSupportedError("UUID4 is not supported on MySQL.")
return self.as_sql(compiler, connection, function="UUID_V4", **extra | 1 | django/django:django/db/models/functions/uuid.py | function_complex | django/django |
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Integration tests for MCP tool support in the Responses API."""
from __future__ import annotations
import pytest
import pytest_asyncio
from openai import OpenAI
from openai_harmony import ToolDescription, ToolNamespaceConfig
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.mcp.tool_server import MCPToolServer
from .conftest import (
BASE_TEST_ENV,
events_contain_type,
log_response_diagnostics,
retry_for_tool_call,
retry_streaming_for,
validate_streaming_event_stack,
)
MODEL_NAME = "openai/gpt-oss-20b"
_BASE_SERVER_ARGS = [
"--enforce-eager",
"--tool-server",
"demo",
"--max_model_len",
"5000",
]
_PYTHON_TOOL_INSTRUCTION = (
"You must use the Python tool to execute code. Never simulate execution."
)
class TestMCPToolServerUnit:
"""Test MCPToolServer.get_tool_description filtering logic.
Note: The wildcard "*" is normalized to None by
_extract_allowed_tools_from | 0 | vllm-project/vllm:tests/entrypoints/openai/responses/test_mcp_tools.py | unknown | vllm-project/vllm |
import os
from pathlib import Path
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_types import SymbolKind
from solidlsp.ls_utils import SymbolUtils
@pytest.mark.dart
class TestDartLanguageServer:
@pytest.mark.parametrize("language_server", [Language.DART], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.DART], indirect=True)
def test_ls_is_running(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test that the language server starts and stops successfully."""
# The fixture already handles start and stop
assert language_server.is_running()
assert Path(language_server.language_server.repository_root_path).resolve() == repo_path.resolve()
@pytest.mark.parametrize("language_server", [Language.DART], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.DART], indirect=True)
def test_find_definition_within_file(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test finding definition of a method within the same file."""
# In lib/main.dart:
# Line 105: final result1 = calc.add | 1 | oraios/serena:test/solidlsp/dart/test_dart_basic.py | test | oraios/serena |
from typing import Any
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartTextParam,
SystemMessage,
UserMessage,
)
class LiteLLMMessageSerializer:
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[dict[str, Any]]:
if isinstance(content, str):
return content
parts: list[dict[str, Any]] = []
for part in content:
if part.type == 'text':
parts.append(
{
'type': 'text',
'text': part.text,
}
)
elif part.type == 'image_url':
parts.append(
{
'type': 'image_url',
'image_url': {
'url': part.image_url.url,
'detail': part.image_url.detail,
},
}
)
return parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | list[dict[str, Any]]:
if isinstance(content, | 0 | browser-use/browser-use:browser_use/llm/litellm/serializer.py | unknown | browser-use/browser-use |
"""
Tests for the Apache Iceberg format.
Tests in this file use a simple Iceberg catalog based on SQLite, with the same
data used for Parquet tests (``pandas/tests/io/data/parquet/simple.parquet``).
"""
import collections
import importlib
import pathlib
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.io.iceberg import read_iceberg
pytestmark = pytest.mark.single_cpu
pyiceberg = pytest.importorskip("pyiceberg")
pyiceberg_catalog = pytest.importorskip("pyiceberg.catalog")
pq = pytest.importorskip("pyarrow.parquet")
Catalog = collections.namedtuple("Catalog", ["name", "uri", "warehouse"])
@pytest.fixture
def catalog(request, tmp_path):
# the catalog stores the full path of data files, so the catalog needs to be
# created dynamically, and not saved in pandas/tests/io/data as other formats
uri = f"sqlite:///{tmp_path}/catalog.sqlite"
warehouse = f"file://{tmp_path}"
catalog_name = request.param if hasattr(request, "param") else None
catalog = pyiceberg_catalog.load_catalog(
catalog_name or "default",
| 1 | pandas-dev/pandas:pandas/tests/io/test_iceberg.py | test | pandas-dev/pandas |
import argparse
import json
from collections import defaultdict
import numpy as np
from openai import OpenAI
from mem0.memory.utils import extract_json
client = OpenAI()
ACCURACY_PROMPT = """
Your task is to label an answer to a question as ’CORRECT’ or ’WRONG’. You will be given the following data:
(1) a question (posed by one user to another user),
(2) a ’gold’ (ground truth) answer,
(3) a generated answer
which you will score as CORRECT/WRONG.
The point of the question is to ask about something one user should know about the other user based on their prior conversations.
The gold answer will usually be a concise and short answer that includes the referenced topic, for example:
Question: Do you remember what I got the last time I went to Hawaii?
Gold answer: A shell necklace
The generated answer might be much longer, but you should be generous with your grading - as long as it touches on the same topic as the gold answer, it should be counted as CORRECT.
For time related questions, the gold answer will be a specific date, month, year, etc. The generated answer might be much longer | 1 | mem0ai/mem0:evaluation/metrics/llm_judge.py | function_complex | mem0ai/mem0 |
# coding=utf-8
"""
@project: maxkb
@Author:虎
@file: base_question_node.py
@date:2024/6/4 14:30
@desc:
"""
import json
import re
import time
from functools import reduce
from typing import List, Dict
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode
from application.flow.tools import Reasoning, mcp_response_generator
from application.models import Application, ApplicationApiKey, ApplicationAccessToken
from common.exception.app_exception import AppApiException
from common.utils.rsa_util import rsa_long_decrypt
from common.utils.shared_resource_auth import filter_authorized_ids
from common.utils.tool_code import ToolExecutor
from django.db.models import QuerySet
from django.utils.translation import gettext as _
from langchain_core.messages import BaseMessage, AIMessage, HumanMessage, SystemMessage
from models_provider.models import Model
from models_provider.tools import get_model_credential, get_model_instance_by_model_workspace_id
from tools.models import Tool
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow | 0 | 1Panel-dev/MaxKB:apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py | unknown | 1Panel-dev/MaxKB |
"""Imports TreeNodes"""
from algorithms.common.tree_node import TreeNode
class AvlTree:
"""
An avl tree.
"""
def __init__(self):
# Root node of the tree.
self.node = None
self.height = -1
self.balance = 0
def insert(self, key):
"""
Insert new key into node
"""
# Create new node
node = TreeNode(key)
if not self.node:
self.node = node
self.node.left = AvlTree()
self.node.right = AvlTree()
elif key < self.node.val:
self.node.left.insert(key)
elif key > self.node.val:
self.node.right.insert(key)
self.re_balance()
def re_balance(self):
"""
Re balance tree. After inserting or deleting a node,
"""
self.update_heights(recursive=False)
self.update_balances(False)
while self.balance < -1 or self.balance > 1:
if self.balance > 1:
if self.node.left.balance < 0:
self.node.left.rotate_left()
self.update_heights()
self.update_balances()
self.rotate_right()
self.update_height | 0 | keon/algorithms:algorithms/data_structures/avl_tree.py | unknown | keon/algorithms |
"""Task key management for SEP-1686 background tasks.
Task keys encode security scoping and metadata in the Docket key format:
`{session_id}:{client_task_id}:{task_type}:{component_identifier}`
This format provides:
- Session-based security scoping (prevents cross-session access)
- Task type identification (tool/prompt/resource)
- Component identification (name or URI for result conversion)
"""
from urllib.parse import quote, unquote
def build_task_key(
session_id: str,
client_task_id: str,
task_type: str,
component_identifier: str,
) -> str:
"""Build Docket task key with embedded metadata.
Format: `{session_id}:{client_task_id}:{task_type}:{component_identifier}`
The component_identifier is URI-encoded to handle special characters (colons, slashes, etc.).
Args:
session_id: Session ID for security scoping
client_task_id: Client-provided task ID
task_type: Type of task ("tool", "prompt", "resource")
component_identifier: Tool name, prompt name, or resource URI
Returns:
Encoded task key for Docket
Examples:
>>> build_task_key("session123", | 1 | PrefectHQ/fastmcp:src/fastmcp/server/tasks/keys.py | documentation | PrefectHQ/fastmcp |
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 Search-R1 Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/PeterGriffinJin/Search-R1/blob/main/verl/utils/reward_score/qa_em.py
import random
import re
import string
def normalize_answer(s):
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc | 1 | verl-project/verl:verl/utils/reward_score/search_r1_like_qa_em.py | license | verl-project/verl |
# SPDX-License-Identifier: AGPL-3.0-only
# Copyright 2026-present the Unsloth AI Inc. team. All rights reserved. See /studio/LICENSE.AGPL-3.0
"""
Export subprocess entry point.
Each export session runs in a persistent subprocess (mp.get_context("spawn")).
This gives us a clean Python interpreter with no stale module state —
solving the transformers version-switching problem completely.
The subprocess stays alive while a model is loaded, accepting commands
(load, export_merged, export_base, export_gguf, export_lora, cleanup,
shutdown) via mp.Queue.
Pattern follows core/inference/worker.py and core/training/worker.py.
"""
from __future__ import annotations
import structlog
from loggers import get_logger
import os
import sys
import time
import traceback
from pathlib import Path
from typing import Any
logger = get_logger(__name__)
def _activate_transformers_version(model_name: str) -> None:
"""Activate the correct transformers version BEFORE any ML imports.
If the model needs transformers 5.x, prepend the pre-installed .venv_t5/
directory to sys.path. Otherwise do nothing (default 4.57.x | 0 | unslothai/unsloth:studio/backend/core/export/worker.py | unknown | unslothai/unsloth |
import numpy as np
import torch
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.particle as pu
from genesis.repr_base import RBC
class Emitter(RBC):
"""
A particle emitter for fluid or material simulation.
The Emitter manages the generation of particles into the simulation domain, allowing directional or omnidirectional
emissions with various droplet shapes. It supports resetting, shape-based emission, and spherical omni-emission.
Parameters
----------
max_particles : int
The maximum number of particles that this emitter can handle.
"""
def __init__(self, max_particles):
self._uid = gs.UID()
self._entity = None
self._max_particles = max_particles
self._acc_droplet_len = 0.0 # accumulated droplet length to be emitted
gs.logger.info(
f"Creating ~<{self.__repr_name__()}>~. id: ~~~<{self._uid}>~~~, max_particles: ~<{max_particles}>~."
)
def set_entity(self, entity):
"""
Assign an entity to the emitter and initialize relevant simulation and solver references.
Parameters
----------
entity | 0 | Genesis-Embodied-AI/Genesis:genesis/engine/entities/emitter.py | unknown | Genesis-Embodied-AI/Genesis |
from strix.telemetry.flags import is_otel_enabled, is_posthog_enabled
def test_flags_fallback_to_strix_telemetry(monkeypatch) -> None:
monkeypatch.delenv("STRIX_OTEL_TELEMETRY", raising=False)
monkeypatch.delenv("STRIX_POSTHOG_TELEMETRY", raising=False)
monkeypatch.setenv("STRIX_TELEMETRY", "0")
assert is_otel_enabled() is False
assert is_posthog_enabled() is False
def test_otel_flag_overrides_global_telemetry(monkeypatch) -> None:
monkeypatch.setenv("STRIX_TELEMETRY", "0")
monkeypatch.setenv("STRIX_OTEL_TELEMETRY", "1")
monkeypatch.delenv("STRIX_POSTHOG_TELEMETRY", raising=False)
assert is_otel_enabled() is True
assert is_posthog_enabled() is False
def test_posthog_flag_overrides_global_telemetry(monkeypatch) -> None:
monkeypatch.setenv("STRIX_TELEMETRY", "0")
monkeypatch.setenv("STRIX_POSTHOG_TELEMETRY", "1")
monkeypatch.del | 0 | usestrix/strix:tests/telemetry/test_flags.py | unknown | usestrix/strix |
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from: https://github.com/NVIDIA-NeMo/Curator/blob/main/nemo_curator/stages/deduplication/shuffle_utils/rapidsmpf_shuffler.py
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal
if TYPE_CHECKING:
from collections.abc import Iterator
import pylibcudf as plc
from rapidsmpf.shuffler import Shuffler
def align_down_to_256(value: int) -> int:
return (value >> 8 | 0 | ray-project/ray:python/ray/data/_internal/gpu_shuffle/rapidsmpf_backend.py | unknown | ray-project/ray |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""SSH Remote Job Trigger for deferrable execution."""
from __future__ import annotations
import asyncio
from collections.abc import AsyncIterator
from typing import Any, Literal
import tenacity
from airflow.providers.ssh.hooks.ssh import SSHHookAsync
from airflow.providers.ssh.utils.remote_job import (
build_posix_completion_check_command,
build_posix_file_size_command,
| 1 | apache/airflow:providers/ssh/src/airflow/providers/ssh/triggers/ssh_remote_job.py | function_complex | apache/airflow |
from typing import TYPE_CHECKING, Any, Optional
import litellm
import pydantic
from dspy.adapters.types.base_type import Type
if TYPE_CHECKING:
from dspy.clients.lm import LM
from dspy.signatures.signature import Signature
class Reasoning(Type):
"""Reasoning type in DSPy.
This type is useful when you want the DSPy output to include the reasoning of the LM. We build this type so that
DSPy can support the reasoning model and non-reasoning model with the same code.
This is a str-like type, you can convert a string directly to a Reasoning object, and from DSPy adapters'
perspective, `Reasoning` is treated as a string.
"""
content: str
def format(self):
return f"{self.content}"
@pydantic.model_validator(mode="before")
@classmethod
def validate_input(cls, data: Any):
if isinstance(data, cls):
return data
if isinstance(data, str):
return {"content": data}
if isinstance(data, dict):
if "content" not in data:
raise ValueError("`content` field is required for `dspy.Reasoning | 1 | stanfordnlp/dspy:dspy/adapters/types/reasoning.py | function_complex | stanfordnlp/dspy |
"""Config flow for the Namecheap DynamicDNS integration."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Any
from aiohttp import ClientError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_REAUTH, ConfigFlow, ConfigFlowResult
from homeassistant.const import CONF_DOMAIN, CONF_HOST, CONF_NAME, CONF_PASSWORD
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.selector import (
TextSelector,
TextSelectorConfig,
TextSelectorType,
)
from .const import DOMAIN
from .helpers import AuthFailed, update_namecheapdns
from .issue import deprecate_yaml_issue
_LOGGER = logging.getLogger(__name__)
STEP_USER_DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST, default="@"): cv.string,
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_PASSWORD): TextSelector(
TextSelectorConfig(
type=TextSelectorType.PASSWORD, autocomplete="current-password"
)
),
}
)
STEP_RECONFIGURE_DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_PASSWORD): TextSelector(
TextSelectorConfig | 1 | home-assistant/core:homeassistant/components/namecheapdns/config_flow.py | function_complex | home-assistant/core |
from __future__ import annotations
import argparse
import json
import os
import typing as t
import pytest
import pytest_mock
if t.TYPE_CHECKING:
from ansible_test._internal.ci.azp import AzurePipelinesChanges
def create_azure_pipelines_changes(mocker: pytest_mock.MockerFixture) -> AzurePipelinesChanges:
"""Prepare an AzurePipelinesChanges instance for testing."""
from ansible_test._internal.ci.azp import AzurePipelinesChanges
from ansible_test._internal.config import CommonConfig
namespace = argparse.Namespace()
namespace.color = False
namespace.explain = False
namespace.verbosity = False
namespace.debug = False
namespace.truncate = False
namespace.redact = False
namespace.display_traceback = False
config = CommonConfig(namespace, 'sanity')
env = dict(
HOME=os.environ['HOME'],
SYSTEM_COLLECTIONURI='https://dev.azure.com/ansible/',
SYSTEM_TEAMPROJECT='ansible',
BUILD_REPOSITORY_PROVIDER='GitHub',
BUILD_SOURCEBRANCH='devel',
BUILD_SOURCEBRANCHNAME='devel',
)
mocker.patch.dict(os.environ, env, clear=True)
return AzurePipelinesChanges(config | 1 | ansible/ansible:test/units/ansible_test/_internal/ci/test_azp.py | test | ansible/ansible |
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from importlib.util import find_spec
import pytest
import torch
import vllm.envs as envs
from tests.compile.backend import TestBackend
from tests.utils import TestFP8Layer, has_module_attribute, multi_gpu_test
from vllm._custom_ops import cutlass_scaled_fp4_mm, scaled_fp4_quant
from vllm.compilation.passes.fusion.allreduce_rms_fusion import AllReduceFusionPass
from vllm.compilation.passes.utility.fix_functionalization import (
FixFunctionalizationPass,
)
from vllm.compilation.passes.utility.noop_elimination import NoOpEliminationPass
from vllm.compilation.passes.utility.post_cleanup import PostCleanupPass
from vllm.config import (
CompilationConfig,
CompilationMode,
DeviceConfig,
ModelConfig,
PassConfig,
VllmConfig,
set_current_vllm_config,
)
from vllm.distributed import tensor_model_parallel_all_reduce
from vllm.distributed.parallel_state import (
init_distributed_environment,
initialize_model_parallel,
)
from v | 0 | vllm-project/vllm:tests/compile/passes/distributed/test_fusion_all_reduce.py | unknown | vllm-project/vllm |
import shutil
from collections.abc import Generator
from pathlib import Path
import pytest
import reflex as rx
import reflex.constants as constants
from reflex.assets import remove_stale_external_asset_symlinks
@pytest.fixture
def mock_asset_path(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path:
"""Create a mock asset file and patch the current working directory.
Args:
tmp_path: A temporary directory provided by pytest.
monkeypatch: A pytest fixture for patching.
Returns:
The path to a tmp cwd that will be used for assets.
"""
# Create a temporary directory to act as the current working directory.
mock_cwd = tmp_path / "mock_asset_path"
mock_cwd.mkdir()
monkeypatch.chdir(mock_cwd)
return mock_cwd
def test_shared_asset(mock_asset_path: Path) -> None:
"""Test shared assets."""
# The asset function copies a file to the app's external assets directory.
asset = rx.asset(path="custom_script.js", shared=True, subfolder="subfolder")
assert asset == "/external/test_assets/subfolder/custom_script.js"
result_file = Path(
mock_asset_path,
"assets",
" | 0 | reflex-dev/reflex:tests/units/assets/test_assets.py | unknown | reflex-dev/reflex |
"""
AgentOS Demo
Prerequisites:
uv pip install -U fastapi uvicorn sqlalchemy pgvector psycopg openai ddgs
"""
from agno import __version__ as agno_version
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.os.interfaces.a2a import A2A
from agno.os.interfaces.agui import AGUI
from agno.os.interfaces.slack import Slack
from agno.os.interfaces.telegram import Telegram
from agno.os.interfaces.whatsapp import Whatsapp
from agno.registry import Registry
from agno.team import Team
from agno.tools.mcp import MCPTools
from agno.vectordb.pgvector import PgVector
from agno.workflow import Workflow
from agno.workflow.step import Step
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
# Database connection
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
# Create Postgres-backed memory store
db = PostgresDb(db_url=db_url)
# Create Postgres-backed vector store
vector_db = Pg | 0 | agno-agi/agno:cookbook/05_agent_os/interfaces/all_interfaces.py | unknown | agno-agi/agno |
"""
Construct Tree from Preorder and Postorder Traversal
Given preorder and postorder traversals of a full binary tree, construct the
tree and return its inorder traversal. A full binary tree has either zero or
two children per node.
Reference: https://en.wikipedia.org/wiki/Binary_tree#Types_of_binary_trees
Complexity:
Time: O(n^2) due to linear search in postorder array
Space: O(n) for the constructed tree
"""
from __future__ import annotations
from algorithms.common.tree_node import TreeNode
pre_index = 0
def construct_tree_util(
pre: list[int], post: list[int], low: int, high: int, size: int
) -> TreeNode | None:
"""Recursively construct a binary tree from preorder and postorder arrays.
Uses a global pre_index to track the current position in the preorder
array during recursive construction.
Args:
pre: The preorder traversal array.
post: The postorder traversal array.
low: The lower bound index in the postorder array.
high: The upper bound index in the postorder array.
size: The total number of elements.
Returns:
The root of the constructed subtree, | 0 | keon/algorithms:algorithms/tree/construct_tree_postorder_preorder.py | unknown | keon/algorithms |
# Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
| 1 | FunAudioLLM/CosyVoice:runtime/triton_trtllm/model_repo/speaker_embedding/1/model.py | function_complex | FunAudioLLM/CosyVoice |
"""
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
from typing import Any, ClassVar
from openai import AsyncAzureOpenAI, AsyncOpenAI
from openai.types.chat import ChatCompletionMessageParam
from pydantic import BaseModel
from .config import DEFAULT_MAX_TOKENS, LLMConfig
from .openai_base_client import BaseOpenAIClient
logger = logging.getLogger(__name__)
class AzureOpenAILLMClient(BaseOpenAIClient):
"""Wrapper class for Azure OpenAI that implements the LLMClient interface.
Supports both AsyncAzureOpenAI and AsyncOpenAI (with Azure v1 API endpoint).
"""
# Class-level constants
MAX | 1 | getzep/graphiti:graphiti_core/llm_client/azure_openai_client.py | function_complex | getzep/graphiti |
# flags: --minimum-version=3.14
x = t"foo"
x = t'foo {{ {2 + 2}bar {{ baz'
x = t"foo {f'abc'} bar"
x = t"""foo {{ a
foo {2 + 2}bar {{ baz
x = f"foo {{ {
2 + 2 # comment
}bar"
{{ baz
}} buzz
{print("abc" + "def"
)}
abc"""
t'{(abc:=10)}'
t'''This is a really long string, but just make sure that you reflow tstrings {
2+2:d
}'''
t'This is a really long string, but just make sure that you reflow tstrings correctly {2+2:d}'
t"{ 2 + 2 = }"
t'{
X
!r
}'
tr'\{{\}}'
t'''
WITH {f'''
{1}_cte AS ()'''}
'''
# output
x = t"foo"
x = t"foo {{ {2 + 2}bar {{ baz"
x = t"foo {f' | 1 | psf/black:tests/data/cases/pep_750.py | test | psf/black |
import json
import os
from itertools import islice
from typing import Iterable
import pyarrow as pa
import datasets
from datasets.builder import Key
logger = datasets.utils.logging.get_logger(__name__)
class Eval(datasets.GeneratorBasedBuilder):
NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5
def _info(self):
return datasets.DatasetInfo()
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
base_data_files = dl_manager.download(self.config.data_files)
extracted_data_files = dl_manager.extract(base_data_files)
splits = []
for split_name, logs in extracted_data_files.items():
logs_files_iterables = [dl_manager.iter_files(log) for log in logs]
splits.append(
datasets.SplitGenerator(
name=split_name,
gen_kwargs={
"logs_files_iterables": logs_files_iterables,
"base_files": base_data_files[split_name],
},
)
)
if not | 1 | huggingface/datasets:src/datasets/packaged_modules/eval/eval.py | function_complex | huggingface/datasets |
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom Components v2 manager and supporting orchestration.
This module composes the registry, manifest handling, and file watching
capabilities for Streamlit's Custom Components v2. It provides a unified
interface to register components from manifests or individual definitions, query
component metadata and asset paths, and react to on-disk changes by re-resolving
component definitions.
"""
from __future__ import annotations
import threading
from dataclasses import dataclass
from typing import TYPE_CHECKING, Final
from | 1 | streamlit/streamlit:lib/streamlit/components/v2/component_manager.py | license | streamlit/streamlit |
from __future__ import annotations
import numpy as np
import numpy.typing as npt
from supervision.detection.utils.iou_and_nms import box_iou_batch
def clip_boxes(
xyxy: npt.NDArray[np.number],
resolution_wh: tuple[int, int],
) -> npt.NDArray[np.number]:
"""
Clips bounding boxes coordinates to fit within the frame resolution.
Args:
xyxy: A numpy array of shape `(N, 4)` where each
row corresponds to a bounding box in
the format `(x_min, y_min, x_max, y_max)`.
resolution_wh: A tuple of the form
`(width, height)` representing the resolution of the frame.
Returns:
A numpy array of shape `(N, 4)` where each row
corresponds to a bounding box with coordinates clipped to fit
within the frame resolution.
Examples:
```pycon
>>> import numpy as np
>>> import supervision as sv
>>> xyxy = np.array([
... [10, 20, 300, 200],
... [15, 25, 3 | 0 | roboflow/supervision:src/supervision/detection/utils/boxes.py | unknown | roboflow/supervision |
from typing import NotRequired, override
from langchain.agents import AgentState
from langchain.agents.middleware import AgentMiddleware
from langgraph.runtime import Runtime
from src.agents.thread_state import SandboxState, ThreadDataState
from src.sandbox import get_sandbox_provider
class SandboxMiddlewareState(AgentState):
"""Compatible with the `ThreadState` schema."""
sandbox: NotRequired[SandboxState | None]
thread_data: NotRequired[ThreadDataState | None]
class SandboxMiddleware(AgentMiddleware[SandboxMiddlewareState]):
"""Create a sandbox environment and assign it to an agent.
Lifecycle Management:
- With lazy_init=True (default): Sandbox is acquired on first tool call
- With lazy_init=False: Sandbox is acquired on first agent invocation (before_agent)
- Sandbox is reused across multiple turns within the same thread
- Sandbox is NOT released after each agent call to avoid wasteful recreation
- Cleanup happens at application shutdown via SandboxProvider.shutdown()
"""
state_schema = SandboxMiddlewareState
def __init__(self, lazy_init: bool = True):
"""Initialize sandbox middleware.
Args:
lazy_init: If True, defer sandbox acquisition until first tool call.
If False | 1 | bytedance/deer-flow:backend/src/sandbox/middleware.py | function_simple | bytedance/deer-flow |
import json
import sys
import types
from pathlib import Path
from typing import Any, ClassVar
import pytest
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExportResult
from strix.telemetry import tracer as tracer_module
from strix.telemetry import utils as telemetry_utils
from strix.telemetry.tracer import Tracer, set_global_tracer
def _load_events(events_path: Path) -> list[dict[str, Any]]:
lines = events_path.read_text(encoding="utf-8").splitlines()
return [json.loads(line) for line in lines if line]
@pytest.fixture(autouse=True)
def _reset_tracer_globals(monkeypatch) -> None:
monkeypatch.setattr(tracer_module, "_global_tracer", None)
monkeypatch.setattr(tracer_module, "_OTEL_BOOTSTRAPPED", False)
monkeypatch.setattr(tracer_module, "_OTEL_REMOTE_ENABLED", False)
telemetry_utils.reset_events_write_locks()
monkeypatch.delenv("STRIX_TELEMETRY", raising=False)
monkeypatch.delenv("STRIX_OTEL_TELEMETRY", raising=False)
monkeypatch.delenv("STRIX_POSTHOG_TELEMETRY", | 0 | usestrix/strix:tests/telemetry/test_tracer.py | unknown | usestrix/strix |
# SPDX-License-Identifier: AGPL-3.0-only
# Copyright 2026-present the Unsloth AI Inc. team. All rights reserved. See /studio/LICENSE.AGPL-3.0
"""
Audio codec loading and decoding for TTS inference.
Supports: SNAC (Orpheus), CSM (Sesame), BiCodec (Spark), DAC (OuteTTS)
"""
import io
import re
import wave
import structlog
from loggers import get_logger
from typing import Optional, Tuple
import numpy as np
import torch
logger = get_logger(__name__)
def _numpy_to_wav_bytes(waveform: np.ndarray, sample_rate: int) -> bytes:
"""Convert a float32 numpy waveform to WAV bytes (16-bit PCM)."""
waveform = waveform.flatten()
peak = max(abs(waveform.max()), abs(waveform.min()))
if peak > 1.0:
waveform = waveform / peak
pcm = (waveform * 32767).astype(np.int16)
buf = io.BytesIO()
with wave.open(buf, "wb") as wf:
wf.setnchannels(1)
wf.set | 0 | unslothai/unsloth:studio/backend/core/inference/audio_codecs.py | unknown | unslothai/unsloth |
from __future__ import annotations
import logging
import pickle
import re
import warnings
from hashlib import sha256
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Callable
from collections.abc import Iterator
from datetime import datetime
from numpy import ndarray
from django.conf import settings
from django.core.cache import cache
from django.core.cache import caches
from documents.caching import CACHE_5_MINUTES
from documents.caching import CACHE_50_MINUTES
from documents.caching import CLASSIFIER_HASH_KEY
from documents.caching import CLASSIFIER_MODIFIED_KEY
from documents.caching import CLASSIFIER_VERSION_KEY
from documents.caching import StoredLRUCache
from documents.models import Document
from documents.models import MatchingModel
logger = logging.getLogger("paperless.classifier")
ADVANCED_TEXT_PROCESSING_ENABLED = (
settings.NLTK_LANGUAGE is not None and settings.NLTK_ENABLED
)
read_cache = caches["read-cache"]
RE_DIGIT = re.compile(r"\d")
RE_WORD = re.compile(r"\b[\w]+\b") # words that may contain digits
class IncompatibleClassifierVersionError(Exception):
def __init__( | 0 | paperless-ngx/paperless-ngx:src/documents/classifier.py | unknown | paperless-ngx/paperless-ngx |
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import connection
from django.db.models.signals import post_save
from django.utils.translation import gettext_lazy as _
from dcim.constants import LOCATION_SCOPE_TYPES
from dcim.models import PortMapping, PortTemplateMapping, Site
from utilities.forms import get_field_value
from utilities.forms.fields import (
ContentTypeChoiceField,
CSVContentTypeField,
DynamicModelChoiceField,
)
from utilities.forms.widgets import HTMXSelect
from utilities.templatetags.builtins.filters import bettertitle
__all__ = (
'FrontPortFormMixin',
'ScopedBulkEditForm',
'ScopedForm',
'ScopedImportForm',
)
class ScopedForm(forms.Form):
scope_type = ContentTypeChoiceField(
queryset=ContentType.objects.filter(model__in=LOCATION_SCOPE_TYPES),
widget=HTMXSelect(),
required=False,
label=_('Scope type')
)
scope = DynamicModelChoiceField(
label=_('Scope'),
queryset=Site.objects.none(), # Initial queryset
required=False,
disabled=True,
selector=True
)
def __init__(self, *args, **kwargs | 0 | netbox-community/netbox:netbox/dcim/forms/mixins.py | unknown | netbox-community/netbox |
# HumanEval/120
# Loki Mode Multi-Agent Solution
# Attempts: 1
# Passed: True
def maximum(arr, k):
"""
Given an array arr of integers and a positive integer k, return a sorted list
of length k with the maximum k numbers in arr.
Example 1:
Input: arr = [-3, -4, 5], k = 3
Output: [-4, -3, 5]
Example 2:
Input: arr = [4, -4, 4], k = 2
Output: [4, 4]
Example 3:
Input: arr = [-3, 2, 1, 2, -1, -2, 1], k = 1
Output: [2]
Note:
1. The length of the array will be in the range of [1, 1000].
2. The elements in the array will be in the range of [-1000, 1000].
3. 0 <= k <= len(arr)
"""
if k == 0:
return []
return sorted | 1 | davila7/claude-code-templates:cli-tool/components/skills/ai-research/loki-mode/benchmarks/results/humaneval-loki-solutions/120.py | documentation | davila7/claude-code-templates |
"""Anthropic prompt caching middleware.
Requires:
- `langchain`: For agent middleware framework
- `langchain-anthropic`: For `ChatAnthropic` model (already a dependency)
"""
from collections.abc import Awaitable, Callable
from typing import Literal
from warnings import warn
from langchain_anthropic.chat_models import ChatAnthropic
try:
from langchain.agents.middleware.types import (
AgentMiddleware,
ModelCallResult,
ModelRequest,
ModelResponse,
)
except ImportError as e:
msg = (
"AnthropicPromptCachingMiddleware requires 'langchain' to be installed. "
"This middleware is designed for use with LangChain agents. "
"Install it with: pip install langchain"
)
raise ImportError(msg) from e
class AnthropicPromptCachingMiddleware(AgentMiddleware):
"""Prompt Caching Middleware.
Optimizes API usage by caching conversation prefixes for Anthropic models.
Requires both `langchain` and `langchain-anthropic` packages to be installed.
Learn more about Anthropic prompt caching
[here](https://platform.claude.com/docs/en/build-with-claude/prompt-caching).
"""
def | 1 | langchain-ai/langchain:libs/partners/anthropic/langchain_anthropic/middleware/prompt_caching.py | function_complex | langchain-ai/langchain |
from dataclasses import dataclass
from itertools import product
from operator import itemgetter
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy.sparse import csc_array
from scipy.special import xlogy
from sklearn.metrics import mean_poisson_deviance
from sklearn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from sklearn.utils.stats import _weighted_percentile
CLF_CRITERIONS = ("gini", "log_loss")
REG_CRITERIONS = ("squared_error", "absolute_error", "poisson")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"ExtraTreeRegressor": ExtraTreeRegressor,
}
@dataclass
class NaiveSplitter:
criterion: str
n_classes: int = 0
def compute_node_value_and_impurity(self, y, w):
sum_weights = np.sum(w)
if sum_weights < 1e-7:
return np.nan, np.inf # invalid split
if self.c | 1 | scikit-learn/scikit-learn:sklearn/tree/tests/test_split.py | test | scikit-learn/scikit-learn |
"""Utils for built-in HTTP download handlers."""
from __future__ import annotations
from abc import ABC
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any
from twisted.internet.defer import CancelledError
from twisted.internet.error import ConnectionRefusedError as TxConnectionRefusedError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError as TxTimeoutError
from twisted.python.failure import Failure
from twisted.web.client import ResponseFailed
from twisted.web.error import SchemeNotSupported
from scrapy import responsetypes
from scrapy.core.downloader.handlers.base import BaseDownloadHandler
from scrapy.exceptions import (
CannotResolveHostError,
DownloadCancelledError,
DownloadConnectionRefusedError,
DownloadFailedError,
DownloadTimeoutError,
StopDownload,
UnsupportedURLSchemeError,
)
from scrapy.utils.log import logger
if TYPE_CHECKING:
from collections.abc import Iterator
from ipaddress import IPv4Address, IPv6Address
from twisted.internet.ssl import Certificate
from scrapy import Request
from scrapy.crawler import Crawler
from scrapy.http import Headers, Response
class BaseHttpDownloadHandler(BaseDownloadHandler, ABC):
"""Base class for built-in HTTP download | 1 | scrapy/scrapy:scrapy/utils/_download_handlers.py | function_complex | scrapy/scrapy |
import os
from pathlib import Path
import pytest
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
from tests.utils import workdir_lock
@pytest.fixture(scope="module")
def client():
static_dir: Path = Path(os.getcwd()) / "static"
static_dir.mkdir(exist_ok=True)
sample_file = static_dir / "sample.txt"
sample_file.write_text("This is a sample static file.")
from docs_src.static_files.tutorial001_py310 import app
with TestClient(app) as client:
yield client
sample_file.unlink()
static_dir.rmdir()
@workdir_lock
def test_static_files(client: TestClient):
response = client.get("/static/sample.txt")
assert response.status_code == 200, response.text
assert response.text == "This is a sample static file."
@workdir_lock
def test_static_files_not_found(client: TestClient):
response = client.get("/static/non_existent_file.txt")
assert response.status_code == 404, response.text
@workdir_lock
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code | 1 | fastapi/fastapi:tests/test_tutorial/test_static_files/test_tutorial001.py | test | fastapi/fastapi |
## taken from: https://github.com/yarikoptic/nitest-balls1/blob/2cd07d86e2cc2d3c612d5d4d659daccd7a58f126/NIFTI/T1.nii.gz
from pathlib import Path
import pyarrow as pa
import pytest
from datasets import Dataset, Features, Nifti, load_dataset
from src.datasets.features.nifti import encode_nibabel_image
from ..utils import require_nibabel
@require_nibabel
@pytest.mark.parametrize("nifti_file", ["test_nifti.nii", "test_nifti.nii.gz"])
@pytest.mark.parametrize(
"build_example",
[
lambda nifti_path: nifti_path,
lambda nifti_path: Path(nifti_path),
lambda nifti_path: open(nifti_path, "rb").read(),
lambda nifti_path: {"path": nifti_path},
lambda nifti_path: {"path": nifti_path, "bytes": None},
lambda nifti_path: {"path": nifti_path, "bytes": open(nifti_path, | 1 | huggingface/datasets:tests/features/test_nifti.py | test | huggingface/datasets |
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections.abc import Iterator
import torch
logger = logging.getLogger(__name__)
class EpisodeAwareSampler:
def __init__(
self,
dataset_from_indices: list[int],
dataset_to_indices: list[int],
episode_indices_to_use: list | None = None,
drop_n_first_frames: int = 0,
drop_n_last_frames: int = 0,
shuffle: bool = False,
):
"""Sampler that optionally incorporates episode boundary information.
Args:
dataset_from_indices: List | 0 | huggingface/lerobot:src/lerobot/datasets/sampler.py | unknown | huggingface/lerobot |
import os
import time
from dataclasses import dataclass
from enum import Enum
import mlx.core as mx
from anyio import WouldBlock
from mlx_lm.tokenizer_utils import TokenizerWrapper
from exo.shared.models.model_cards import ModelTask
from exo.shared.types.chunks import (
ErrorChunk,
TokenChunk,
ToolCallChunk,
)
from exo.shared.types.common import CommandId, ModelId
from exo.shared.types.events import (
ChunkGenerated,
Event,
RunnerStatusUpdated,
TaskAcknowledged,
TaskStatusUpdated,
)
from exo.shared.types.mlx import Model
from exo.shared.types.tasks import (
ConnectToGroup,
LoadModel,
Shutdown,
StartWarmup,
Task,
TaskId,
TaskStatus,
TextGeneration,
)
from exo.shared.types.worker.instances import BoundInstance
from exo.shared.types.worker.runner_response import (
GenerationResponse,
ToolCallResponse,
)
from exo.shared.types.worker.runners import (
RunnerConnected,
RunnerConnecting,
RunnerFailed,
RunnerIdle,
RunnerLoaded,
RunnerLoading,
RunnerReady,
RunnerRunning,
RunnerShutdown,
RunnerShuttingDown,
RunnerStatus | 0 | exo-explore/exo:src/exo/worker/runner/llm_inference/runner.py | unknown | exo-explore/exo |
"""
Tests for the pip binary provider plugin.
Tests cover:
1. Hook script execution
2. pip package detection
3. Virtual environment handling
4. JSONL output format
"""
import json
import os
import subprocess
import sys
import tempfile
from pathlib import Path
from unittest.mock import patch, MagicMock
import pytest
from django.test import TestCase
# Get the path to the pip provider hook
PLUGIN_DIR = Path(__file__).parent.parent
INSTALL_HOOK = next(PLUGIN_DIR.glob('on_Binary__*_pip_install.py'), None)
class TestPipProviderHook(TestCase):
"""Test the pip binary provider installation hook."""
def setUp(self):
"""Set up test environment."""
self.temp_dir = tempfile.mkdtemp()
self.output_dir = Path(self.temp_dir) / 'output'
self.output_dir.mkdir()
self.lib_dir = Path(self.temp_dir) / 'lib' / 'x86_64-linux'
self.lib_dir.mkdir(parents=True, exist_ok=True)
self.lib_dir = Path(self.temp_dir) / 'lib' / 'x86_64-linux'
self.lib_dir.mkdir(parents=True, exist_ok=True)
def tearDown(self):
""" | 1 | ArchiveBox/ArchiveBox:archivebox/plugins/pip/tests/test_pip_provider.py | test | ArchiveBox/ArchiveBox |
t = (
{"foo": "very long string", "bar": "another very long string", "baz": "we should run out of space by now"}, # fmt: skip
{"foo": "bar"},
)
t = (
{
"foo": "very long string",
"bar": "another very long string",
"baz": "we should run out of space by now",
}, # fmt: skip
{"foo": "bar"},
)
t = (
{"foo": "very long string", "bar": "another very long string", "baz": "we should run out of space by now"}, # fmt: skip
{"foo": "bar",},
)
t = (
{
"foo": "very long string",
"bar": "another very long string",
"baz": "we should run out of space by now",
}, # fmt: skip
{"foo": "bar",},
)
# output
t = (
{"foo": "very long string", "bar": "another very long string", "baz": "we should run out of space by now"}, # fmt: skip
{"foo": "bar"},
| 1 | psf/black:tests/data/cases/fmtskip13.py | test | psf/black |
from pathlib import Path
from lightrag.api.runtime_validation import (
RuntimeEnvironment,
validate_runtime_target,
validate_runtime_target_from_env_file,
)
def test_validate_runtime_target_skips_when_not_declared() -> None:
is_valid, error_message = validate_runtime_target(None)
assert is_valid is True
assert error_message is None
def test_validate_runtime_target_accepts_host_on_host() -> None:
is_valid, error_message = validate_runtime_target(
"host",
RuntimeEnvironment(
in_container=False,
in_docker=False,
in_kubernetes=False,
),
)
assert is_valid is True
assert error_message is None
def test_validate_runtime_target_rejects_host_in_container() -> None:
is_valid, error_message = validate_runtime_target(
"host",
RuntimeEnvironment(
in_container=True,
in_docker=True,
in_kubernetes=False,
),
)
assert is_valid is False
assert "\n" in error_message
assert "Configuration error in .env" in error_message
assert "LIGHTRAG_RUNTIME_TARGET=host" in error_message
assert "This value from .env" in error_message
assert | 0 | HKUDS/LightRAG:tests/test_runtime_target_validation.py | unknown | HKUDS/LightRAG |
#!/usr/bin/env python3
""" Default configurations for models """
import gettext
import logging
import os
from dataclasses import dataclass
from lib.config import ConfigItem, FaceswapConfig, GlobalSection
from plugins.plugin_loader import PluginLoader
from plugins.train.trainer import trainer_config
# LOCALES
_LANG = gettext.translation("plugins.train._config", localedir="locales", fallback=True)
_ = _LANG.gettext
logger = logging.getLogger(__name__)
_ADDITIONAL_INFO = _("\nNB: Unless specifically stated, values changed here will only take effect "
"when creating a new model.")
class _Config(FaceswapConfig):
""" Config File for Models """
# pylint:disable=too-many-statements
def set_defaults(self, helptext="") -> None:
""" Set the default values for config """
super().set_defaults(helptext=_("Options that apply to all models") + _ADDITIONAL_INFO)
self._defaults_from_plugin(os.path.dirname(__file__))
train_helptext, section, train_opts = trainer_config.get_defaults()
self.add_section(section, train_helptext)
for k, v in train_opts.items():
self.add_item | 1 | deepfakes/faceswap:plugins/train/train_config.py | function_complex | deepfakes/faceswap |
"""
This file serves as a documentation example and CI test for autoscaling data parallel attention deployment.
Structure:
1. Monkeypatch setup: Ensures serve.run is non-blocking and removes accelerator requirements for CI testing.
2. Docs example (between __dp_autoscaling_example_start/end__): Embedded in Sphinx docs via literalinclude.
3. Test validation (deployment status polling + cleanup)
"""
import time
from ray import serve
from ray.serve.schema import ApplicationStatus
from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME
from ray.serve import llm
_original_serve_run = serve.run
_original_build_dp_openai_app = llm.build_dp_openai_app
def _non_blocking_serve_run(app, **kwargs):
"""Forces blocking=False for testing"""
kwargs["blocking"] = False
return _original_serve_run(app, **kwargs)
def _testing_build_dp_openai_app(builder_config, **kwargs):
"""Removes accelerator requirements for testing"""
if "llm_config" in builder_config:
config = builder_config["llm_config"]
if hasattr(config, "accelerator_type") and config.accelerator_type is not None:
config.accelerator_type = None
return _original | 0 | ray-project/ray:doc/source/llm/doc_code/serve/multi_gpu/dp_autoscaling_example.py | unknown | ray-project/ray |
"""File discovery and module import utilities for filesystem-based routing.
This module provides functions to:
1. Discover Python files in a directory tree
2. Import modules (as packages if __init__.py exists, else directly)
3. Extract decorated components (Tool, Resource, Prompt objects) from imported modules
"""
from __future__ import annotations
import importlib.util
import sys
from dataclasses import dataclass, field
from pathlib import Path
from types import ModuleType
from fastmcp.utilities.components import FastMCPComponent
from fastmcp.utilities.logging import get_logger
logger = get_logger(__name__)
@dataclass
class DiscoveryResult:
"""Result of filesystem discovery."""
# Components are real objects (Tool, Resource, ResourceTemplate, Prompt)
components: list[tuple[Path, FastMCPComponent]] = field(default_factory=list)
failed_files: dict[Path, str] = field(default_factory=dict) # path -> error message
def discover_files(root: Path) -> list[Path]:
"""Recursively discover all Python files under a directory.
Excludes __init__.py files (they're for package structure, not components).
Args:
root: Root directory to scan.
Returns | 0 | PrefectHQ/fastmcp:src/fastmcp/server/providers/filesystem_discovery.py | unknown | PrefectHQ/fastmcp |
from typing import Annotated
from annotated_doc import Doc
from fastapi.openapi.models import APIKey, APIKeyIn
from fastapi.security.base import SecurityBase
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED
class APIKeyBase(SecurityBase):
model: APIKey
def __init__(
self,
location: APIKeyIn,
name: str,
description: str | None,
scheme_name: str | None,
auto_error: bool,
):
self.auto_error = auto_error
self.model: APIKey = APIKey(
**{"in": location}, # ty: ignore[invalid-argument-type]
name=name,
description=description,
)
self.scheme_name = scheme_name or self.__class__.__name__
def make_not_authenticated_error(self) -> HTTPException:
"""
The WWW-Authenticate header is not standardized for API Key authentication but
the HTTP specification requires that an error of 401 "Unauthorized" must
include a WWW-Authenticate header.
Ref: https://datatracker.ietf.org/doc/html/rfc9110#name- | 0 | fastapi/fastapi:fastapi/security/api_key.py | unknown | fastapi/fastapi |
from textual.app import App, ComposeResult
from textual.containers import Grid
from textual.widgets import Footer, Markdown, Placeholder
HELP = """\
## Breakpoints
A demonstration of how to make an app respond to the dimensions of the terminal.
Try resizing the terminal, then have a look at the source to see how it works!
"""
class BreakpointApp(App):
# A breakpoint consists of a width and a class name to set
HORIZONTAL_BREAKPOINTS = [
(0, "-narrow"),
(40, "-normal"),
(80, "-wide"),
(120, "-very-wide"),
]
CSS = """
Screen {
Placeholder { padding: 2; }
Grid { grid-rows: auto; height: auto; }
# Change the styles according to the breakpoint classes
&.-narrow {
Grid { grid-size: 1; }
}
&.-normal {
Grid { grid-size: 2; }
}
&.-wide {
Grid { grid-size: 4; }
}
&.-very-wide {
Grid { grid-size: 6; }
}
}
"""
def compose(self) -> | 1 | Textualize/textual:examples/breakpoints.py | documentation | Textualize/textual |
"""
Graph Traversal Algorithms
Provides DFS and BFS traversal of a graph represented as an adjacency
dictionary.
Complexity:
Time: O(V + E)
Space: O(V)
"""
from __future__ import annotations
from collections import deque
from typing import Any
def dfs_traverse(graph: dict[Any, list[Any]], start: Any) -> set[Any]:
"""Traverse the graph from *start* using iterative DFS.
Args:
graph: Adjacency list.
start: Starting node.
Returns:
Set of visited nodes.
Examples:
>>> sorted(dfs_traverse({'a': ['b'], 'b': []}, 'a'))
['a', 'b']
"""
visited: set[Any] = set()
stack = [start]
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
for next_node in graph[node]:
if next_node not in visited:
stack.append(next_node)
return visited
def bfs_traverse(graph: dict[Any, list[Any]], start: Any) -> set[Any]:
"""Traverse the graph from *start* using BFS.
Args:
graph: Adjacency list.
| 0 | keon/algorithms:algorithms/graph/traversal.py | unknown | keon/algorithms |
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
class ColorToRGBInt(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="ColorToRGBInt",
display_name="Color to RGB Int",
category="utils",
description="Convert a color to a RGB integer value.",
inputs=[
io.Color.Input("color"),
],
outputs=[
io.Int.Output(display_name="rgb_int"),
],
)
@classmethod
def execute(
cls,
color: str,
) -> io.NodeOutput:
# expect format #RRGGBB
if len(color) != 7 or color[0] != "#":
raise ValueError("Color must be in format #RRGGBB")
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
return io.NodeOutput(r * 256 * 256 + g * 256 + b)
class ColorExtension(ComfyExtension):
@override
async | 1 | Comfy-Org/ComfyUI:comfy_extras/nodes_color.py | function_simple | Comfy-Org/ComfyUI |
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022-2026)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from streamlit.elements.lib.layout_utils import validate_width
from streamlit.proto.Alert_pb2 import Alert as AlertProto
from streamlit.proto.WidthConfig_pb2 import WidthConfig
from streamlit.runtime.metrics_util import gather_metrics
from streamlit.string_util import (
clean_text,
extract_leading_icon,
validate_icon_or_emoji,
)
if TYPE_CHECKING:
from streamlit.delta_generator import DeltaGenerator | 0 | streamlit/streamlit:lib/streamlit/elements/alert.py | unknown | streamlit/streamlit |
"""
EA-compatible analogue to np.putmask
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
)
import numpy as np
from pandas._libs import lib
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import is_list_like
from pandas.core.arrays import ExtensionArray
if TYPE_CHECKING:
from pandas._typing import (
ArrayLike,
npt,
)
from pandas import MultiIndex
def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:
"""
ExtensionArray-compatible implementation of np.putmask. The main
difference is we do not handle repeating or truncating like numpy.
Parameters
----------
values: np.ndarray or ExtensionArray
mask : np.ndarray[bool]
We assume extract_bool_array has already been called.
value : Any
"""
if (
not isinstance(values, np.ndarray)
or (values.dtype == object and not lib.is_scalar(value))
# GH#43424: np.putmask raises TypeError if we cannot cast between types with
# rule = "safe", a | 0 | pandas-dev/pandas:pandas/core/array_algos/putmask.py | unknown | pandas-dev/pandas |
import math
import numpy as np
import torch
import genesis as gs
from genesis.repr_base import RBC
from genesis.constants import IMAGE_TYPE
from genesis.utils.misc import qd_to_torch
from .rasterizer_context import SegmentationColorMap
# Optional imports for platform-specific functionality
try:
from gs_madrona.renderer_gs import MadronaBatchRendererAdapter
_MADRONA_AVAILABLE = True
except ImportError:
MadronaBatchRendererAdapter = None
_MADRONA_AVAILABLE = False
def _transform_camera_quat(quat):
# quat for Madrona needs to be transformed to y-forward
w, x, y, z = torch.unbind(quat, dim=-1)
return torch.stack([x + w, x - w, y - z, y + z], dim=-1) / math.sqrt(2.0)
def _make_tensor(data, *, dtype: torch.dtype = torch.float32):
return torch.tensor(data, dtype=dtype, device=gs.device)
class GenesisGeomRetriever:
def __init__(self, rigid_solver, seg_level):
self.rigid_solver = rigid_solver
self.seg_color | 1 | Genesis-Embodied-AI/Genesis:genesis/vis/batch_renderer.py | function_complex | Genesis-Embodied-AI/Genesis |
import uuid
from dash import Dash, Input, Output, callback_context, State, MATCH
from dash_test_components import ComponentAsProp
from dash.dcc import Checklist, Dropdown
from dash.html import Button, Div, Span
from flaky import flaky
def opt(u):
return {
"label": [
Button(
"click me", id={"type": "button", "index": u}, className="label-button"
),
Span(id={"type": "text", "index": u}, className="label-result"),
],
"value": u,
}
def test_rdcap001_component_as_prop(dash_duo):
app = Dash(__name__)
content = [
ComponentAsProp(
element=Div(
"as-props",
id="as-props",
)
),
ComponentAsProp(
id="clicker-container", element=Button("click-me", id="clicker")
),
ComponentAsProp(
id="nested-output-container",
element=Div(id="nested-output"),
),
Div(
[
Button("click-nested", id="send-nested"),
Div(id="output-from-prop"),
]
),
| 0 | plotly/dash:tests/integration/renderer/test_component_as_prop.py | unknown | plotly/dash |
# Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import pytest
import torch
from lightning.pytorch import Trainer
from lightning.pytorch.callbacks import ModelSummary, ProgressBar, RichModelSummary, RichProgressBar, TQDMProgressBar
from lightning.pytorch.demos.boring_classes import BoringModel
class TestRichIntegration:
@patch("lightning.pytorch.trainer.connectors.callback_connector._RICH_AVAILABLE", False)
def test_no_rich_defaults_tqdm_and_model_summary(self, tmp_path):
trainer = Trainer(default_root_dir=tmp_path, logger=False, enable_checkpointing=False)
assert any(isinstance(cb, TQDMProgressBar) | 1 | Lightning-AI/pytorch-lightning:tests/tests_pytorch/trainer/connectors/test_rich_integration.py | test | Lightning-AI/pytorch-lightning |
#!/usr/bin/env python3
"""Improve a skill description based on eval results.
Takes eval results (from run_eval.py) and generates an improved description
by calling `claude -p` as a subprocess (same auth pattern as run_eval.py —
uses the session's Claude Code auth, no separate ANTHROPIC_API_KEY needed).
"""
import argparse
import json
import os
import re
import subprocess
import sys
from pathlib import Path
from scripts.utils import parse_skill_md
def _call_claude(prompt: str, model: str | None, timeout: int = 300) -> str:
"""Run `claude -p` with the prompt on stdin and return the text response.
Prompt goes over stdin (not argv) because it embeds the full SKILL.md
body and can easily exceed comfortable argv length.
"""
cmd = ["claude", "-p", "--output-format", "text"]
if model:
cmd.extend(["--model", model])
# Remove CLAUDECODE env var to allow nesting claude -p inside a
# Claude Code session. The guard is for interactive terminal conflicts;
# programmatic subprocess usage is safe. | 0 | davila7/claude-code-templates:cli-tool/components/skills/development/skill-creator/scripts/improve_description.py | unknown | davila7/claude-code-templates |
#!/usr/bin/env python3
"""
archivebox crawl <action> [args...] [--filters]
Manage Crawl records.
Actions:
create - Create Crawl jobs from URLs
list - List Crawls as JSONL (with optional filters)
update - Update Crawls from stdin JSONL
delete - Delete Crawls from stdin JSONL
Examples:
# Create
archivebox crawl create https://example.com https://foo.com --depth=1
archivebox crawl create --tag=news https://example.com
# List with filters
archivebox crawl list --status=queued
archivebox crawl list --urls__icontains=example.com
# Update
archivebox crawl list --status=started | archivebox crawl update --status=queued
# Delete
archivebox crawl list --urls__icontains=spam.com | archivebox crawl delete --yes
# Full pipeline
archivebox crawl create https://example.com | archivebox snapshot create | archivebox run
"""
__package__ = 'archivebox.cli'
__command__ = 'archivebox crawl'
import sys
from typing import Optional, Iterable
import rich_click as click
from rich import print as r | 1 | ArchiveBox/ArchiveBox:archivebox/cli/archivebox_crawl.py | function_complex | ArchiveBox/ArchiveBox |
"""Code copied from Django Software Foundation (https://djangoproject.com/) which is licensed under the BSD 3-Clause.
Original code: https://github.com/django/django/blob/001c2f546b4053acb04f16d6b704f7b4fbca1c45/django/core/handlers/asgi.py
Modifications: we added a fix for a memory leak
(https://code.djangoproject.com/ticket/36700).
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED | 1 | saleor/saleor:saleor/asgi/asgi_handler.py | function_complex | saleor/saleor |
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
from tests.entrypoints.openai.chat_completion.test_vision import TEST_IMAGE_ASSETS
from vllm import LLM
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.sampling_params import SamplingParams
@pytest.fixture(scope="function")
def text_llm():
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(model="meta-llama/Llama-3.2-1B-Instruct", enforce_eager=True, seed=0)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.fixture(scope="function")
def llm_for_failure_test():
"""
Fixture for testing issue #26081.
Uses a small max_model_len to easily trigger length errors.
"""
# pytest caches the fixture so we use weakref.proxy to
# enable garbage collection
llm = LLM(
model="meta-llama/Llama-3.2-1B-Instruct",
| 0 | vllm-project/vllm:tests/entrypoints/llm/test_chat.py | unknown | vllm-project/vllm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.