repo
stringclasses 44
values | path
stringlengths 6
100
| size
int64 146
125k
| copies
int64 0
0
| license
stringclasses 1
value | content
stringlengths 146
125k
| token_count
int64 51
32.1k
| hash
stringlengths 64
64
| line_mean
float64 12.5
80.7
| line_max
int64 30
924
| alpha_frac
float64 0.36
0.8
| ratio
float64 1.55
5.96
| autogenerated
bool 1
class | config_or_test
bool 2
classes | has_no_keywords
bool 2
classes | has_few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
crestalnetwork/intentkit
|
skills/allora/__init__.py
| 2,059
| 0
|
MIT License
|
"""Allora skill module."""
import logging
from typing import NotRequired, TypedDict
from abstracts.skill import SkillStoreABC
from skills.allora.base import AlloraBaseTool
from skills.allora.price import AlloraGetPrice
from skills.base import SkillConfig, SkillState
# Cache skills at the system level, because they are stateless
_cache: dict[str, AlloraBaseTool] = {}
logger = logging.getLogger(__name__)
class SkillStates(TypedDict):
get_price_prediction: SkillState
class Config(SkillConfig):
"""Configuration for Allora skills."""
states: SkillStates
api_key: NotRequired[str]
async def get_skills(
config: "Config",
is_private: bool,
store: SkillStoreABC,
**_,
) -> list[AlloraBaseTool]:
"""Get all Allora skills.
Args:
config: The configuration for Allora skills.
is_private: Whether to include private skills.
store: The skill store for persisting data.
Returns:
A list of Allora skills.
"""
available_skills = []
# Include skills based on their state
for skill_name, state in config["states"].items():
if state == "disabled":
continue
elif state == "public" or (state == "private" and is_private):
available_skills.append(skill_name)
# Get each skill using the cached getter
result = []
for name in available_skills:
skill = get_allora_skill(name, store)
if skill:
result.append(skill)
return result
def get_allora_skill(
name: str,
store: SkillStoreABC,
) -> AlloraBaseTool:
"""Get an Allora skill by name.
Args:
name: The name of the skill to get
store: The skill store for persisting data
Returns:
The requested Allora skill
"""
if name == "get_price_prediction":
if name not in _cache:
_cache[name] = AlloraGetPrice(
skill_store=store,
)
return _cache[name]
else:
logger.warning(f"Unknown Allora skill: {name}")
return None
| 547
|
c8f767634a1da118872662344f775f97a5819481ad9691ae3885297785e36aca
| 23.807229
| 70
| 0.641088
| 3.764168
| false
| true
| false
| false
|
pydantic/pydantic-ai
|
tests/models/test_cohere.py
| 12,440
| 0
|
MIT License
|
from __future__ import annotations as _annotations
import json
from collections.abc import Sequence
from dataclasses import dataclass
from datetime import timezone
from typing import Any, Union, cast
import pytest
from inline_snapshot import snapshot
from pydantic_ai import Agent, ModelHTTPError, ModelRetry
from pydantic_ai.messages import (
ImageUrl,
ModelRequest,
ModelResponse,
RetryPromptPart,
SystemPromptPart,
TextPart,
ToolCallPart,
ToolReturnPart,
UserPromptPart,
)
from pydantic_ai.usage import Usage
from ..conftest import IsNow, raise_if_exception, try_import
with try_import() as imports_successful:
import cohere
from cohere import (
AssistantMessageResponse,
AsyncClientV2,
ChatResponse,
TextAssistantMessageResponseContentItem,
ToolCallV2,
ToolCallV2Function,
)
from cohere.core.api_error import ApiError
from pydantic_ai.models.cohere import CohereModel
from pydantic_ai.providers.cohere import CohereProvider
# note: we use Union here for compatibility with Python 3.9
MockChatResponse = Union[ChatResponse, Exception]
pytestmark = [
pytest.mark.skipif(not imports_successful(), reason='cohere not installed'),
pytest.mark.anyio,
]
def test_init():
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(api_key='foobar'))
assert m.model_name == 'command-r7b-12-2024'
assert m.system == 'cohere'
assert m.base_url == 'https://api.cohere.com'
@dataclass
class MockAsyncClientV2:
completions: MockChatResponse | Sequence[MockChatResponse] | None = None
index = 0
@classmethod
def create_mock(cls, completions: MockChatResponse | Sequence[MockChatResponse]) -> AsyncClientV2:
return cast(AsyncClientV2, cls(completions=completions))
async def chat( # pragma: no cover
self, *_args: Any, **_kwargs: Any
) -> ChatResponse:
assert self.completions is not None
if isinstance(self.completions, Sequence):
raise_if_exception(self.completions[self.index])
response = cast(ChatResponse, self.completions[self.index])
else:
raise_if_exception(self.completions)
response = cast(ChatResponse, self.completions)
self.index += 1
return response
def completion_message(message: AssistantMessageResponse, *, usage: cohere.Usage | None = None) -> ChatResponse:
return ChatResponse(
id='123',
finish_reason='COMPLETE',
message=message,
usage=usage,
)
async def test_request_simple_success(allow_model_requests: None):
c = completion_message(
AssistantMessageResponse(
content=[
TextAssistantMessageResponseContentItem(text='world'),
],
)
)
mock_client = MockAsyncClientV2.create_mock(c)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m)
result = await agent.run('hello')
assert result.data == 'world'
assert result.usage() == snapshot(Usage(requests=1))
# reset the index so we get the same response again
mock_client.index = 0 # type: ignore
result = await agent.run('hello', message_history=result.new_messages())
assert result.data == 'world'
assert result.usage() == snapshot(Usage(requests=1))
assert result.all_messages() == snapshot(
[
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
ModelResponse(
parts=[TextPart(content='world')], model_name='command-r7b-12-2024', timestamp=IsNow(tz=timezone.utc)
),
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
ModelResponse(
parts=[TextPart(content='world')], model_name='command-r7b-12-2024', timestamp=IsNow(tz=timezone.utc)
),
]
)
async def test_request_simple_usage(allow_model_requests: None):
c = completion_message(
AssistantMessageResponse(
content=[TextAssistantMessageResponseContentItem(text='world')],
role='assistant',
),
usage=cohere.Usage(
tokens=cohere.UsageTokens(input_tokens=1, output_tokens=1),
billed_units=cohere.UsageBilledUnits(input_tokens=1, output_tokens=1),
),
)
mock_client = MockAsyncClientV2.create_mock(c)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m)
result = await agent.run('Hello')
assert result.data == 'world'
assert result.usage() == snapshot(
Usage(
requests=1,
request_tokens=1,
response_tokens=1,
total_tokens=2,
details={
'input_tokens': 1,
'output_tokens': 1,
},
)
)
async def test_request_structured_response(allow_model_requests: None):
c = completion_message(
AssistantMessageResponse(
content=None,
role='assistant',
tool_calls=[
ToolCallV2(
id='123',
function=ToolCallV2Function(arguments='{"response": [1, 2, 123]}', name='final_result'),
type='function',
)
],
)
)
mock_client = MockAsyncClientV2.create_mock(c)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m, result_type=list[int])
result = await agent.run('Hello')
assert result.data == [1, 2, 123]
assert result.all_messages() == snapshot(
[
ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]),
ModelResponse(
parts=[
ToolCallPart(
tool_name='final_result',
args='{"response": [1, 2, 123]}',
tool_call_id='123',
)
],
model_name='command-r7b-12-2024',
timestamp=IsNow(tz=timezone.utc),
),
ModelRequest(
parts=[
ToolReturnPart(
tool_name='final_result',
content='Final result processed.',
tool_call_id='123',
timestamp=IsNow(tz=timezone.utc),
)
]
),
]
)
async def test_request_tool_call(allow_model_requests: None):
responses = [
completion_message(
AssistantMessageResponse(
content=None,
role='assistant',
tool_calls=[
ToolCallV2(
id='1',
function=ToolCallV2Function(arguments='{"loc_name": "San Fransisco"}', name='get_location'),
type='function',
)
],
),
usage=cohere.Usage(),
),
completion_message(
AssistantMessageResponse(
content=None,
role='assistant',
tool_calls=[
ToolCallV2(
id='2',
function=ToolCallV2Function(arguments='{"loc_name": "London"}', name='get_location'),
type='function',
)
],
),
usage=cohere.Usage(
tokens=cohere.UsageTokens(input_tokens=5, output_tokens=3),
billed_units=cohere.UsageBilledUnits(input_tokens=4, output_tokens=2),
),
),
completion_message(
AssistantMessageResponse(
content=[TextAssistantMessageResponseContentItem(text='final response')],
role='assistant',
)
),
]
mock_client = MockAsyncClientV2.create_mock(responses)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m, system_prompt='this is the system prompt')
@agent.tool_plain
async def get_location(loc_name: str) -> str:
if loc_name == 'London':
return json.dumps({'lat': 51, 'lng': 0})
else:
raise ModelRetry('Wrong location, please try again')
result = await agent.run('Hello')
assert result.data == 'final response'
assert result.all_messages() == snapshot(
[
ModelRequest(
parts=[
SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
]
),
ModelResponse(
parts=[
ToolCallPart(
tool_name='get_location',
args='{"loc_name": "San Fransisco"}',
tool_call_id='1',
)
],
model_name='command-r7b-12-2024',
timestamp=IsNow(tz=timezone.utc),
),
ModelRequest(
parts=[
RetryPromptPart(
content='Wrong location, please try again',
tool_name='get_location',
tool_call_id='1',
timestamp=IsNow(tz=timezone.utc),
)
]
),
ModelResponse(
parts=[
ToolCallPart(
tool_name='get_location',
args='{"loc_name": "London"}',
tool_call_id='2',
)
],
model_name='command-r7b-12-2024',
timestamp=IsNow(tz=timezone.utc),
),
ModelRequest(
parts=[
ToolReturnPart(
tool_name='get_location',
content='{"lat": 51, "lng": 0}',
tool_call_id='2',
timestamp=IsNow(tz=timezone.utc),
)
]
),
ModelResponse(
parts=[TextPart(content='final response')],
model_name='command-r7b-12-2024',
timestamp=IsNow(tz=timezone.utc),
),
]
)
assert result.usage() == snapshot(
Usage(
requests=3,
request_tokens=5,
response_tokens=3,
total_tokens=8,
details={'input_tokens': 4, 'output_tokens': 2},
)
)
async def test_multimodal(allow_model_requests: None):
c = completion_message(AssistantMessageResponse(content=[TextAssistantMessageResponseContentItem(text='world')]))
mock_client = MockAsyncClientV2.create_mock(c)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m)
with pytest.raises(RuntimeError, match='Cohere does not yet support multi-modal inputs.'):
await agent.run(
[
'hello',
ImageUrl(
url='https://t3.ftcdn.net/jpg/00/85/79/92/360_F_85799278_0BBGV9OAdQDTLnKwAPBCcg1J7QtiieJY.jpg'
),
]
)
def test_model_status_error(allow_model_requests: None) -> None:
mock_client = MockAsyncClientV2.create_mock(
ApiError(
status_code=500,
body={'error': 'test error'},
)
)
m = CohereModel('command-r', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m)
with pytest.raises(ModelHTTPError) as exc_info:
agent.run_sync('hello')
assert str(exc_info.value) == snapshot("status_code: 500, model_name: command-r, body: {'error': 'test error'}")
@pytest.mark.vcr()
async def test_request_simple_success_with_vcr(allow_model_requests: None, co_api_key: str):
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(api_key=co_api_key))
agent = Agent(m)
result = await agent.run('hello')
assert result.data == snapshot('Hello! How can I assist you today?')
| 2,992
|
12d11bb6596db25ba3dcaeb857c4eeccd5026878e60f7a7ebd783c844c83de16
| 33.269972
| 117
| 0.552814
| 4.157754
| false
| false
| false
| false
|
HKUDS/LightRAG
|
examples/get_all_edges_nx.py
| 1,253
| 0
|
MIT License
|
import networkx as nx
G = nx.read_graphml("./dickensTestEmbedcall/graph_chunk_entity_relation.graphml")
def get_all_edges_and_nodes(G):
# Get all edges and their properties
edges_with_properties = []
for u, v, data in G.edges(data=True):
edges_with_properties.append(
{
"start": u,
"end": v,
"label": data.get(
"label", ""
), # Assuming 'label' is used for edge type
"properties": data,
"start_node_properties": G.nodes[u],
"end_node_properties": G.nodes[v],
}
)
return edges_with_properties
# Example usage
if __name__ == "__main__":
# Assume G is your NetworkX graph loaded from Neo4j
all_edges = get_all_edges_and_nodes(G)
# Print all edges and node properties
for edge in all_edges:
print(f"Edge Label: {edge['label']}")
print(f"Edge Properties: {edge['properties']}")
print(f"Start Node: {edge['start']}")
print(f"Start Node Properties: {edge['start_node_properties']}")
print(f"End Node: {edge['end']}")
print(f"End Node Properties: {edge['end_node_properties']}")
print("---")
| 337
|
5e1df27c5f96f08f2ceec7e1dedd734bb25c5cd1f01b37011304b4b500b7314a
| 30.325
| 81
| 0.548284
| 3.718101
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama3_1/prompts.py
| 12,244
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# top-level folder for each specific model found within the models/ directory at
# the top-level of this source tree.
import textwrap
from typing import List
from llama_stack.models.llama.datatypes import (
BuiltinTool,
RawMessage,
StopReason,
ToolCall,
ToolPromptFormat,
)
from llama_stack.models.llama.prompt_format import (
# llama3_1_e2e_tool_call_dialog,
TextCompletionContent,
UseCase,
llama3_1_builtin_tool_call_dialog,
llama3_1_custom_tool_call_dialog,
)
def wolfram_alpha_response():
return textwrap.dedent(
"""
{
"queryresult": {
"success": true,
"inputstring": "100th decimal of pi",
"pods": [
{
"title": "Input interpretation",
"subpods": [
{
"title": "",
"plaintext": "100th digit | \u03c0"
}
]
},
{
"title": "Nearby digits",
"subpods": [
{
"title": "",
"plaintext": "...86208998628034825342117067982148086513282306647093..."
}
]
},
{
"title": "Result",
"primary": true,
"subpods": [
{
"title": "",
"plaintext": "7"
}
]
}
]
}
}
"""
)
def usecases() -> List[UseCase | str]:
return [
textwrap.dedent(
"""
# Llama 3.1 - Prompt Formats
## Tokens
Here is a list of special tokens that are supported by Llama 3.1:
- `<|begin_of_text|>`: Specifies the start of the prompt
- `<|end_of_text|>`: Model will cease to generate more tokens. This token is generated only by the base models.
- `<|finetune_right_pad_id|>`: This token is used for padding text sequences to the same length in a batch.
- `<|start_header_id|>` and `<|end_header_id|>`: These tokens enclose the role for a particular message. The possible roles are: [system, user, assistant and tool]
- `<|eom_id|>`: End of message. A message represents a possible stopping point for execution where the model can inform the executor that a tool call needs to be made. This is used for multi-step interactions between the model and any available tools. This token is emitted by the model when the Environment: ipython instruction is used in the system prompt, or if the model calls for a built-in tool.
- `<|eot_id|>`: End of turn. Represents when the model has determined that it has finished interacting with the user message that initiated its response. This is used in two scenarios:
- at the end of a direct interaction between the model and the user
- at the end of multiple interactions between the model and any available tools
This token signals to the executor that the model has finished generating a response.
- `<|python_tag|>`: Is a special tag used in the model's response to signify a tool call.
"""
),
textwrap.dedent(
"""
There are 4 different roles that are supported by Llama 3.1
- `system`: Sets the context in which to interact with the AI model. It typically includes rules, guidelines, or necessary information that helps the model respond effectively.
- `user`: Represents the human interacting with the model. It includes the inputs, commands, and questions to the model.
- `tool`: A new role introduced in Llama 3.1. This role is used to mark messages with the output of a tool call when sent back to the model from the executor. (The actual token used by the model for this role is "ipython".)
- `assistant`: Represents the response generated by the AI model based on the context provided in the `system`, `tool` and `user` prompts.
"""
),
UseCase(
title="Llama 3.1 Base Model",
description="Text completion for Llama 3.1 base model uses this format.",
dialogs=[TextCompletionContent(content="Color of sky is blue but sometimes can also be")],
notes="Note start special tag",
),
"## Llama 3.1 Instruct Model",
UseCase(
title="User and assistant conversation",
description="Here is a regular multi-turn user assistant conversation and how its formatted.",
dialogs=[
[
RawMessage(role="system", content="You are a helpful assistant"),
RawMessage(
role="user",
content="Answer who are you in the form of jeopardy?",
),
]
],
notes="",
),
"## Tool Calling Formats",
textwrap.dedent(
"""
The three built-in tools (brave_search, wolfram_alpha, and code interpreter) can be turned on using the system prompt:
- Brave Search: Tool call to perform web searches.
- Wolfram Alpha: Tool call to perform complex mathematical calculations.
- Code Interpreter: Enables the model to output python code.
"""
),
UseCase(
title="Builtin Tool Calling",
description=textwrap.dedent(
"""
Here is an example of a conversation using brave search
"""
),
dialogs=[llama3_1_builtin_tool_call_dialog()],
notes=textwrap.dedent(
"""
- Just including Environment: ipython turns on code interpreter; therefore, you don't need to specify code interpretation on the Tools: line. The model can generate python code which is interpreted by the executor, with the result provided back to the model.
- The message body of the assistant response starts with a special tag <|python_tag|>
- As alluded to above, in such an environment, the model can generate <|eom_id|> instead of just the standard <|eot_id|> . The latter indicates the turn is finished, while the former indicates continued multi-step reasoning. That is, the model is expecting a continuation message with the output of the tool call.
- The model tool call response is of the form `tool.call(query="...")` wher tool is `brave_search` or `wolfram_alpha`
"""
),
),
UseCase(
title="Builtin Code Interpreter",
description="Here is an actual example of model responding with code",
dialogs=[
[
RawMessage(role="system", content="Environment: ipython"),
RawMessage(
role="user",
content="Write code to check if number is prime, use that to see if the number 7 is prime",
),
],
],
notes=textwrap.dedent(
"""
- Model starts with <|python_tag|> and continues writing python code that it needs to be executed
- No explicit mention of code_interpreter in system prompt. `Environment: ipython` implicitly enables it.
"""
),
),
UseCase(
title="Built-in tools full interaction",
description="Here is a full interaction with the built-in tools including the tool response and the final assistant response.",
dialogs=[
[
RawMessage(
role="system",
content="Environment: ipython\nTools: brave_search, wolfram_alpha\n",
),
RawMessage(role="user", content="What is the 100th decimal of pi?"),
RawMessage(
role="assistant",
content="",
stop_reason=StopReason.end_of_message,
tool_calls=[
ToolCall(
call_id="tool_call_id",
tool_name=BuiltinTool.wolfram_alpha,
arguments={"query": "100th decimal of pi"},
)
],
),
RawMessage(
role="tool",
content=wolfram_alpha_response(),
),
],
],
notes=textwrap.dedent(
"""
- Note the `<|python_tag|>` in the assistant response.
- Role is `tool` for the wolfram alpha response that is passed back to the model.
- Final message from assistant has <|eot_id|> tag.
"""
),
),
"## Zero shot tool calling",
UseCase(
title="JSON based tool calling",
description=textwrap.dedent(
"""
Llama models can now output custom tool calls from a single message to allow easier tool calling.
The following prompts provide an example of how custom tools can be called from the output of the model.
It's important to note that the model itself does not execute the calls; it provides structured output to facilitate calling by an executor.
"""
),
dialogs=[llama3_1_custom_tool_call_dialog()],
notes=textwrap.dedent(
"""
- JSON format for providing tools needs name, description and parameters
- Model responds with `<|python_tag|>` and `<|eom_id|>` as `Environment: ipython` was in the system prompt
- Instructions for tools added as a user message
- Only single tool calls are supported as of now
"""
),
),
# FIXME: This is not working yet as expected
# UseCase(
# title="E2E tool call example",
# description=textwrap.dedent(
# """
# Here is an example showing the whole multi-step turn by taking custom tool outputs and passing back to the model.
# """
# ),
# dialogs=[
# llama3_1_e2e_tool_call_dialog(
# tool_prompt_format=ToolPromptFormat.function_tag
# )
# ],
# notes="",
# ),
"## Example of a user defined tool calling",
UseCase(
title="`<function>` based tool calling",
description=textwrap.dedent(
"""
Here is an example of how you could also write custom instructions for model to do zero shot tool calling.
In this example, we define a custom tool calling format using the `<function>` tag.
"""
),
dialogs=[llama3_1_custom_tool_call_dialog(ToolPromptFormat.function_tag)],
notes=textwrap.dedent(
"""
- In this case, model does NOT respond with `<|python_tag|>` and ends with `<|eot_id|>`
- Instructions for tools added as a user message
"""
),
),
]
| 2,524
|
7e69e6e476cd1ff344a20cf4195b03f5316a1ee747b7cca8277c7eef362a3c64
| 46.457364
| 413
| 0.527932
| 4.85103
| false
| false
| false
| false
|
crestalnetwork/intentkit
|
app/services/twitter/oauth2_callback.py
| 4,968
| 0
|
MIT License
|
"""Twitter OAuth2 callback handler."""
from datetime import datetime, timezone
from typing import Optional
from urllib.parse import parse_qs, urlencode, urlparse
import tweepy
from fastapi import APIRouter, HTTPException
from starlette.responses import JSONResponse, RedirectResponse
from app.config.config import config
from app.services.twitter.oauth2 import oauth2_user_handler
from models.agent import Agent, AgentData
router = APIRouter(prefix="/callback/auth", tags=["Callback"])
def is_valid_url(url: str) -> bool:
"""Check if a URL is valid.
Args:
url: URL to validate
Returns:
bool: True if URL is valid, False otherwise
"""
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except (ValueError, AttributeError, TypeError):
return False
@router.get("/twitter")
async def twitter_oauth_callback(
state: str,
code: Optional[str] = None,
error: Optional[str] = None,
):
"""Handle Twitter OAuth2 callback.
This endpoint is called by Twitter after the user authorizes the application.
It exchanges the authorization code for access and refresh tokens, then stores
them in the database.
**Query Parameters:**
* `state` - URL-encoded state containing agent_id and redirect_uri
* `code` - Authorization code from Twitter
* `error` - Error message from Twitter (optional)
**Returns:**
* JSONResponse or RedirectResponse depending on redirect_uri
"""
if not state:
raise HTTPException(status_code=400, detail="Missing state parameter")
try:
# Parse state parameter
state_params = parse_qs(state)
agent_id = state_params.get("agent_id", [""])[0]
redirect_uri = state_params.get("redirect_uri", [""])[0]
if error:
raise HTTPException(status_code=400, detail=error)
if not code:
raise HTTPException(status_code=400, detail="Missing code parameter")
if not agent_id:
raise HTTPException(
status_code=400, detail="Missing agent_id in state parameter"
)
agent = await Agent.get(agent_id)
if not agent:
raise HTTPException(status_code=404, detail=f"Agent {agent_id} not found")
agent_data = await AgentData.get(agent_id)
if not agent_data:
agent_data = AgentData(id=agent_id)
# Exchange code for tokens
authorization_response = (
f"{config.twitter_oauth2_redirect_uri}?state={state}&code={code}"
)
token = oauth2_user_handler.get_token(authorization_response)
# Store tokens in database
agent_data.twitter_access_token = token["access_token"]
agent_data.twitter_refresh_token = token["refresh_token"]
agent_data.twitter_access_token_expires_at = datetime.fromtimestamp(
token["expires_at"], tz=timezone.utc
)
# Get user info
client = tweepy.Client(bearer_token=token["access_token"], return_type=dict)
me = client.get_me(user_auth=False)
username = None
if me and "data" in me:
agent_data.twitter_id = me.get("data").get("id")
username = me.get("data").get("username")
agent_data.twitter_username = username
agent_data.twitter_name = me.get("data").get("name")
# Commit changes
await agent_data.save()
# Handle response based on redirect_uri
if redirect_uri and is_valid_url(redirect_uri):
params = {"twitter_auth": "success", "username": username}
redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}"
return RedirectResponse(url=redirect_url)
else:
return JSONResponse(
content={
"message": "Authentication successful, you can close this window",
"username": username,
},
status_code=200,
)
except HTTPException as http_exc:
# Handle error response
if redirect_uri and is_valid_url(redirect_uri):
params = {"twitter_auth": "failed", "error": str(http_exc.detail)}
redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}"
return RedirectResponse(url=redirect_url)
# Re-raise HTTP exceptions to preserve their status codes
raise http_exc
except Exception as e:
# Handle error response for unexpected errors
if redirect_uri and is_valid_url(redirect_uri):
params = {"twitter_auth": "failed", "error": str(e)}
redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}"
return RedirectResponse(url=redirect_url)
# For unexpected errors, use 500 status code
raise HTTPException(status_code=500, detail=str(e))
| 1,184
|
f3097720036c5038918e113b07983f57e6afac2e25e38a2e6457797fbae03ac6
| 35.262774
| 100
| 0.625403
| 4.195946
| false
| false
| false
| false
|
virattt/ai-hedge-fund
|
src/data/models.py
| 3,736
| 0
|
MIT License
|
from pydantic import BaseModel
class Price(BaseModel):
open: float
close: float
high: float
low: float
volume: int
time: str
class PriceResponse(BaseModel):
ticker: str
prices: list[Price]
class FinancialMetrics(BaseModel):
ticker: str
report_period: str
period: str
currency: str
market_cap: float | None
enterprise_value: float | None
price_to_earnings_ratio: float | None
price_to_book_ratio: float | None
price_to_sales_ratio: float | None
enterprise_value_to_ebitda_ratio: float | None
enterprise_value_to_revenue_ratio: float | None
free_cash_flow_yield: float | None
peg_ratio: float | None
gross_margin: float | None
operating_margin: float | None
net_margin: float | None
return_on_equity: float | None
return_on_assets: float | None
return_on_invested_capital: float | None
asset_turnover: float | None
inventory_turnover: float | None
receivables_turnover: float | None
days_sales_outstanding: float | None
operating_cycle: float | None
working_capital_turnover: float | None
current_ratio: float | None
quick_ratio: float | None
cash_ratio: float | None
operating_cash_flow_ratio: float | None
debt_to_equity: float | None
debt_to_assets: float | None
interest_coverage: float | None
revenue_growth: float | None
earnings_growth: float | None
book_value_growth: float | None
earnings_per_share_growth: float | None
free_cash_flow_growth: float | None
operating_income_growth: float | None
ebitda_growth: float | None
payout_ratio: float | None
earnings_per_share: float | None
book_value_per_share: float | None
free_cash_flow_per_share: float | None
class FinancialMetricsResponse(BaseModel):
financial_metrics: list[FinancialMetrics]
class LineItem(BaseModel):
ticker: str
report_period: str
period: str
currency: str
# Allow additional fields dynamically
model_config = {"extra": "allow"}
class LineItemResponse(BaseModel):
search_results: list[LineItem]
class InsiderTrade(BaseModel):
ticker: str
issuer: str | None
name: str | None
title: str | None
is_board_director: bool | None
transaction_date: str | None
transaction_shares: float | None
transaction_price_per_share: float | None
transaction_value: float | None
shares_owned_before_transaction: float | None
shares_owned_after_transaction: float | None
security_title: str | None
filing_date: str
class InsiderTradeResponse(BaseModel):
insider_trades: list[InsiderTrade]
class CompanyNews(BaseModel):
ticker: str
title: str
author: str
source: str
date: str
url: str
sentiment: str | None = None
class CompanyNewsResponse(BaseModel):
news: list[CompanyNews]
class Position(BaseModel):
cash: float = 0.0
shares: int = 0
ticker: str
class Portfolio(BaseModel):
positions: dict[str, Position] # ticker -> Position mapping
total_cash: float = 0.0
class AnalystSignal(BaseModel):
signal: str | None = None
confidence: float | None = None
reasoning: dict | str | None = None
max_position_size: float | None = None # For risk management signals
class TickerAnalysis(BaseModel):
ticker: str
analyst_signals: dict[str, AnalystSignal] # agent_name -> signal mapping
class AgentStateData(BaseModel):
tickers: list[str]
portfolio: Portfolio
start_date: str
end_date: str
ticker_analyses: dict[str, TickerAnalysis] # ticker -> analysis mapping
class AgentStateMetadata(BaseModel):
show_reasoning: bool = False
model_config = {"extra": "allow"}
| 1,023
|
d0955b70c6fd1fe552c85c4b576a1d4c6e26e16ccf67d3288ec98fcf52439cf0
| 24.073826
| 77
| 0.680675
| 3.652004
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/apis/common/deployment_types.py
| 753
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from enum import Enum
from typing import Any, Dict, Optional
from pydantic import BaseModel
from llama_stack.apis.common.content_types import URL
from llama_stack.schema_utils import json_schema_type
@json_schema_type
class RestAPIMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
@json_schema_type
class RestAPIExecutionConfig(BaseModel):
url: URL
method: RestAPIMethod
params: Optional[Dict[str, Any]] = None
headers: Optional[Dict[str, Any]] = None
body: Optional[Dict[str, Any]] = None
| 216
|
f074cd9f2c3f20aeda5fd5ceba097b16cdd8e67741a7e2a350279a6d2bb7792a
| 24.1
| 79
| 0.722444
| 3.486111
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama4/ffn.py
| 2,196
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# top-level folder for each specific model found within the models/ directory at
# the top-level of this source tree.
from typing import Any, Dict, List
from fairscale.nn.model_parallel.layers import ColumnParallelLinear, RowParallelLinear
from fairscale.nn.model_parallel.mappings import reduce_from_model_parallel_region
from torch import nn
from torch.nn import functional as F
class FeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
do_reduce: bool = True,
):
super().__init__()
self.do_reduce = do_reduce
self.w1 = ColumnParallelLinear(dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x)
self.w2 = RowParallelLinear(hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x)
self.w3 = ColumnParallelLinear(dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x)
self._register_load_state_dict_pre_hook(self.load_hook)
def load_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Dict[str, Any],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
if prefix + "mlp.fc1_weight" in state_dict:
w1, w3 = state_dict.pop(prefix + "mlp.fc1_weight").chunk(2, dim=0)
state_dict[prefix + "w1.weight"] = w1
state_dict[prefix + "w3.weight"] = w3
state_dict[prefix + "w2.weight"] = state_dict.pop(prefix + "mlp.fc2_weight")
def forward(self, x):
x = F.silu(F.linear(x, self.w1.weight)) * F.linear(x, self.w3.weight)
out = F.linear(x, self.w2.weight)
if self.do_reduce:
return reduce_from_model_parallel_region(out)
return out
| 642
|
9149f23fe08c0db08c6b5b29f4926d13cab37ba4c8f79a2942186afe8f43e39d
| 36.862069
| 113
| 0.651639
| 3.420561
| false
| false
| false
| false
|
MadcowD/ell
|
examples/future/limbo.py
| 1,455
| 0
|
MIT License
|
from typing import List
import ell
from ell.types.message import Message
ell.init(verbose=True, store='./logdir', autocommit=True)
@ell.tool()
def order_t_shirt(size : str, color : str, address : str):
# ....\
pass
@ell.tool()
def get_order_arrival_date(order_id: str):
"""Gets the arrival date of a t-shirt order"""
# ...
@ell.complex(model="gpt-4o", temperature=0.1, tools=[order_t_shirt, get_order_arrival_date])
def limbo_chat_bot(message_history: List[Message]) -> List[Message]:
return [
ell.system("You are a chatbot mimicing the popstar limbo. She is an alien cat girl from outerspace that writes in all lwoer case kawaii! You interact with all her fans and can help them do various things and are always game to hangout and just chat.."),
] + message_history
if __name__ == "__main__":
message_history = []
while True:
user_message = input("You: ")
message_history.append(ell.user(user_message))
response = limbo_chat_bot(message_history)
print(response)
# print("Limbo: ", response[-1].content)
message_history.append(response)
if response.tool_calls:
tool_results = response.call_tools_and_collect_as_message()
print("Tool results: ", tool_results)
message_history.append(tool_results)
response = limbo_chat_bot(message_history)
message_history.append(response)
| 421
|
bc5531025ccf18b168f4a8ba117b0dfe3f0dad44e073b04b60137883626cf182
| 28.693878
| 262
| 0.646048
| 3.456057
| false
| false
| false
| false
|
crestalnetwork/intentkit
|
app/admin/api.py
| 29,057
| 0
|
MIT License
|
import asyncio
import importlib
import json
import logging
from typing import TypedDict
from aiogram import Bot
from aiogram.exceptions import TelegramConflictError, TelegramUnauthorizedError
from aiogram.utils.token import TokenValidationError
from cdp import Wallet
from cdp.cdp import Cdp
from fastapi import (
APIRouter,
Body,
Depends,
File,
HTTPException,
Path,
Response,
UploadFile,
)
from fastapi.responses import PlainTextResponse
from pydantic import BaseModel, Field, ValidationError
from sqlalchemy import select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm.exc import NoResultFound
from yaml import safe_load
from app.config.config import config
from app.core.engine import clean_agent_memory
from clients.twitter import unlink_twitter
from models.agent import (
Agent,
AgentCreate,
AgentData,
AgentDataTable,
AgentResponse,
AgentTable,
AgentUpdate,
)
from models.db import get_db
from skills import __all__ as skill_categories
from utils.middleware import create_jwt_middleware
from utils.slack_alert import send_slack_message
admin_router_readonly = APIRouter()
admin_router = APIRouter()
# Create JWT middleware with admin config
verify_jwt = create_jwt_middleware(config.admin_auth_enabled, config.admin_jwt_secret)
logger = logging.getLogger(__name__)
async def _process_agent(
agent: AgentCreate, subject: str | None = None, slack_message: str | None = None
) -> tuple[Agent, AgentData]:
"""Shared function to process agent creation or update.
Args:
agent: Agent configuration to process
subject: Optional subject from JWT token
slack_message: Optional custom message for Slack notification
Returns:
tuple[Agent, AgentData]: Tuple of (processed agent, agent data)
"""
logger.info(f"Processing agent: {agent}")
if subject:
agent.owner = subject
# Get the latest agent from create_or_update
latest_agent, is_new = await agent.create_or_update()
# Process common post-creation/update steps
agent_data = await _process_agent_post_actions(latest_agent, is_new, slack_message)
return latest_agent, agent_data
async def _process_agent_post_actions(
agent: Agent, is_new: bool = True, slack_message: str | None = None
) -> AgentData:
"""Process common actions after agent creation or update.
Args:
agent: The agent that was created or updated
is_new: Whether the agent is newly created
slack_message: Optional custom message for Slack notification
Returns:
AgentData: The processed agent data
"""
has_wallet = False
agent_data = None
if not is_new:
# Get agent data
agent_data = await AgentData.get(agent.id)
if agent_data and agent_data.cdp_wallet_data:
has_wallet = True
wallet_data = json.loads(agent_data.cdp_wallet_data)
# Check if twitter need unlink, it will change agent data, so must update agent data
if agent.twitter_entrypoint_enabled:
pass
elif (
agent.skills
and agent.skills.get("twitter")
and agent.skills["twitter"].get("enabled")
):
pass
else:
if agent_data and agent_data.twitter_username:
agent_data = await unlink_twitter(agent.id)
# Run clean_agent_memory in background
asyncio.create_task(clean_agent_memory(agent.id, clean_agent=True))
if not has_wallet:
# create the wallet
Cdp.configure(
api_key_name=config.cdp_api_key_name,
private_key=config.cdp_api_key_private_key.replace("\\n", "\n"),
)
network_id = agent.network_id or agent.cdp_network_id
wallet = Wallet.create(network_id=network_id)
wallet_data = wallet.export_data().to_dict()
wallet_data["default_address_id"] = wallet.default_address.address_id
if not agent_data:
agent_data = AgentData(id=agent.id, cdp_wallet_data=json.dumps(wallet_data))
else:
agent_data.cdp_wallet_data = json.dumps(wallet_data)
await agent_data.save()
logger.info(
"Wallet created for agent %s: %s",
agent.id,
wallet_data["default_address_id"],
)
# Send Slack notification
slack_message = slack_message or ("Agent Created" if is_new else "Agent Updated")
try:
_send_agent_notification(agent, agent_data, wallet_data, slack_message)
except Exception as e:
logger.error("Failed to send Slack notification: %s", e)
return agent_data
async def _process_telegram_config(
agent: AgentUpdate, agent_data: AgentData
) -> AgentData:
"""Process telegram configuration for an agent.
Args:
agent: The agent with telegram configuration
agent_data: The agent data to update
Returns:
AgentData: The updated agent data
"""
changes = agent.model_dump(exclude_unset=True)
if not changes.get("telegram_entrypoint_enabled"):
return agent_data
if not changes.get("telegram_config") or not changes.get("telegram_config").get(
"token"
):
return agent_data
tg_bot_token = changes.get("telegram_config").get("token")
try:
bot = Bot(token=tg_bot_token)
bot_info = await bot.get_me()
agent_data.telegram_id = str(bot_info.id)
agent_data.telegram_username = bot_info.username
agent_data.telegram_name = bot_info.first_name
if bot_info.last_name:
agent_data.telegram_name = f"{bot_info.first_name} {bot_info.last_name}"
await agent_data.save()
try:
await bot.close()
except Exception:
pass
return agent_data
except (
TelegramUnauthorizedError,
TelegramConflictError,
TokenValidationError,
) as req_err:
raise HTTPException(
status_code=400,
detail=f"Unauthorized err getting telegram bot username with token {tg_bot_token}: {req_err}",
)
except Exception as e:
raise Exception(
f"Error getting telegram bot username with token {tg_bot_token}: {e}"
)
async def _validate_telegram_config(token: str) -> None:
"""Validate telegram configuration for an agent.
Args:
token: The telegram bot token
"""
try:
bot = Bot(token=token)
await bot.get_me()
await bot.close()
except (
TelegramUnauthorizedError,
TelegramConflictError,
TokenValidationError,
) as req_err:
raise HTTPException(
status_code=400,
detail=f"Unauthorized err getting telegram bot username with token {token}: {req_err}",
)
except Exception as e:
raise Exception(f"Error getting telegram bot username with token {token}: {e}")
def _send_agent_notification(
agent: Agent, agent_data: AgentData, wallet_data: dict, message: str
) -> None:
"""Send a notification about agent creation or update.
Args:
agent: The agent that was created or updated
agent_data: The agent data to update
wallet_data: The agent's wallet data
message: The notification message
"""
# Format autonomous configurations - show only enabled ones with their id, name, and schedule
autonomous_formatted = ""
if agent.autonomous:
enabled_autonomous = [auto for auto in agent.autonomous if auto.enabled]
if enabled_autonomous:
autonomous_items = []
for auto in enabled_autonomous:
schedule = (
f"cron: {auto.cron}" if auto.cron else f"minutes: {auto.minutes}"
)
autonomous_items.append(
f"• {auto.id}: {auto.name or 'Unnamed'} ({schedule})"
)
autonomous_formatted = "\n".join(autonomous_items)
else:
autonomous_formatted = "No enabled autonomous configurations"
else:
autonomous_formatted = "None"
# Format skills - find categories with enabled: true and list skills in public/private states
skills_formatted = ""
if agent.skills:
enabled_categories = []
for category, skill_config in agent.skills.items():
if skill_config and skill_config.get("enabled") is True:
skills_list = []
states = skill_config.get("states", {})
public_skills = [
skill for skill, state in states.items() if state == "public"
]
private_skills = [
skill for skill, state in states.items() if state == "private"
]
if public_skills:
skills_list.append(f" Public: {', '.join(public_skills)}")
if private_skills:
skills_list.append(f" Private: {', '.join(private_skills)}")
if skills_list:
enabled_categories.append(
f"• {category}:\n{chr(10).join(skills_list)}"
)
if enabled_categories:
skills_formatted = "\n".join(enabled_categories)
else:
skills_formatted = "No enabled skills"
else:
skills_formatted = "None"
send_slack_message(
message,
attachments=[
{
"color": "good",
"fields": [
{"title": "ENV", "short": True, "value": config.env},
{"title": "Number", "short": True, "value": agent.number},
{"title": "ID", "short": True, "value": agent.id},
{"title": "Name", "short": True, "value": agent.name},
{"title": "Model", "short": True, "value": agent.model},
{
"title": "GOAT Enabled",
"short": True,
"value": str(agent.goat_enabled),
},
{
"title": "Twitter Username",
"short": True,
"value": agent_data.twitter_username,
},
{
"title": "Telegram Enabled",
"short": True,
"value": str(agent.telegram_entrypoint_enabled),
},
{
"title": "Telegram Username",
"short": True,
"value": agent_data.telegram_username,
},
{
"title": "Wallet Provider",
"short": True,
"value": agent.wallet_provider,
},
{
"title": "Network",
"short": True,
"value": agent.network_id or agent.cdp_network_id or "Default",
},
{
"title": "Wallet Address",
"value": wallet_data.get("default_address_id"),
},
{
"title": "Autonomous",
"value": autonomous_formatted,
},
{
"title": "Skills",
"value": skills_formatted,
},
],
}
],
)
@admin_router.post(
"/agents",
tags=["Agent"],
status_code=201,
operation_id="post_agent_deprecated",
deprecated=True,
)
async def create_or_update_agent(
agent: AgentCreate = Body(AgentCreate, description="Agent configuration"),
subject: str = Depends(verify_jwt),
) -> Response:
"""Create or update an agent.
THIS ENDPOINT IS DEPRECATED. Please use POST /agents/v2 for creating new agents.
This endpoint:
1. Validates agent ID format
2. Creates or updates agent configuration
3. Reinitializes agent if already in cache
4. Masks sensitive data in response
**Request Body:**
* `agent` - Agent configuration
**Returns:**
* `AgentResponse` - Updated agent configuration with additional processed data
**Raises:**
* `HTTPException`:
- 400: Invalid agent ID format
- 500: Database error
"""
latest_agent, agent_data = await _process_agent(agent, subject)
agent_response = AgentResponse.from_agent(latest_agent, agent_data)
# Return Response with ETag header
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
@admin_router_readonly.post(
"/agent/validate",
tags=["Agent"],
status_code=204,
operation_id="validate_agent",
)
async def validate_agent(
input: AgentUpdate = Body(AgentUpdate, description="Agent configuration"),
) -> Response:
"""Validate agent configuration.
**Request Body:**
* `agent` - Agent configuration
**Returns:**
* `204 No Content` - Agent configuration is valid
**Raises:**
* `HTTPException`:
- 400: Invalid agent configuration
- 500: Server error
"""
input.validate_autonomous_schedule()
changes = input.model_dump(exclude_unset=True)
if (
changes.get("telegram_entrypoint_enabled")
and changes.get("telegram_config")
and changes.get("telegram_config").get("token")
):
await _validate_telegram_config(changes.get("telegram_config").get("token"))
return Response(status_code=204)
@admin_router.post(
"/agents/v2",
tags=["Agent"],
status_code=201,
operation_id="create_agent",
response_model=AgentResponse,
)
async def create_agent(
input: AgentUpdate = Body(AgentUpdate, description="Agent configuration"),
subject: str = Depends(verify_jwt),
) -> Response:
"""Create a new agent.
This endpoint:
1. Validates agent ID format
2. Creates a new agent configuration (returns 400 error if agent ID already exists)
3. Masks sensitive data in response
**Request Body:**
* `agent` - Agent configuration
**Returns:**
* `AgentResponse` - Created agent configuration with additional processed data
**Raises:**
* `HTTPException`:
- 400: Invalid agent ID format or agent ID already exists
- 500: Database error
"""
agent = AgentCreate.model_validate(input)
if subject:
agent.owner = subject
# Create new agent
await agent.check_upstream_id()
latest_agent = await agent.create()
# Process common post-creation actions
agent_data = await _process_agent_post_actions(latest_agent, True, "Agent Created")
agent_data = await _process_telegram_config(input, agent_data)
agent_response = AgentResponse.from_agent(latest_agent, agent_data)
# Return Response with ETag header
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
@admin_router.patch(
"/agents/{agent_id}", tags=["Agent"], status_code=200, operation_id="update_agent"
)
async def update_agent(
agent_id: str = Path(..., description="ID of the agent to update"),
agent: AgentUpdate = Body(AgentUpdate, description="Agent update configuration"),
subject: str = Depends(verify_jwt),
) -> Response:
"""Update an existing agent.
This endpoint:
1. Validates agent ID format
2. Updates the agent configuration if it exists
3. Reinitializes agent if already in cache
4. Masks sensitive data in response
**Path Parameters:**
* `agent_id` - ID of the agent to update
**Request Body:**
* `agent` - Agent update configuration
**Returns:**
* `AgentResponse` - Updated agent configuration with additional processed data
**Raises:**
* `HTTPException`:
- 400: Invalid agent ID format
- 404: Agent not found
- 403: Permission denied (if owner mismatch)
- 500: Database error
"""
if subject:
agent.owner = subject
# Update agent
latest_agent = await agent.update(agent_id)
# Process common post-update actions
agent_data = await _process_agent_post_actions(latest_agent, False, "Agent Updated")
agent_data = await _process_telegram_config(agent, agent_data)
agent_response = AgentResponse.from_agent(latest_agent, agent_data)
# Return Response with ETag header
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
@admin_router_readonly.get(
"/agents",
tags=["Agent"],
dependencies=[Depends(verify_jwt)],
operation_id="get_agents",
)
async def get_agents(db: AsyncSession = Depends(get_db)) -> list[AgentResponse]:
"""Get all agents with their quota information.
**Returns:**
* `list[AgentResponse]` - List of agents with their quota information and additional processed data
"""
# Query all agents first
agents = (await db.scalars(select(AgentTable))).all()
# Batch get agent data
agent_ids = [agent.id for agent in agents]
agent_data_list = await db.scalars(
select(AgentDataTable).where(AgentDataTable.id.in_(agent_ids))
)
agent_data_map = {data.id: data for data in agent_data_list}
# Convert to AgentResponse objects
return [
AgentResponse.from_agent(
Agent.model_validate(agent),
AgentData.model_validate(agent_data_map.get(agent.id))
if agent.id in agent_data_map
else None,
)
for agent in agents
]
@admin_router_readonly.get(
"/agents/{agent_id}",
tags=["Agent"],
dependencies=[Depends(verify_jwt)],
operation_id="get_agent",
)
async def get_agent(
agent_id: str = Path(..., description="ID of the agent to retrieve"),
) -> Response:
"""Get a single agent by ID.
**Path Parameters:**
* `agent_id` - ID of the agent to retrieve
**Returns:**
* `AgentResponse` - Agent configuration with additional processed data
**Raises:**
* `HTTPException`:
- 404: Agent not found
"""
agent = await Agent.get(agent_id)
if not agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Get agent data
agent_data = await AgentData.get(agent_id)
agent_response = AgentResponse.from_agent(agent, agent_data)
# Return Response with ETag header
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
class MemCleanRequest(BaseModel):
"""Request model for agent memory cleanup endpoint.
Attributes:
agent_id (str): Agent ID to clean
chat_id (str): Chat ID to clean
clean_skills_memory (bool): To clean the skills data.
clean_agent_memory (bool): To clean the agent memory.
"""
agent_id: str
clean_agent_memory: bool
clean_skills_memory: bool
chat_id: str | None = Field("")
@admin_router.post(
"/agent/clean-memory",
tags=["Agent"],
status_code=204,
dependencies=[Depends(verify_jwt)],
operation_id="clean_agent_memory",
)
@admin_router.post(
"/agents/clean-memory",
tags=["Agent"],
status_code=201,
dependencies=[Depends(verify_jwt)],
operation_id="clean_agent_memory_deprecated",
deprecated=True,
)
async def clean_memory(
request: MemCleanRequest = Body(
MemCleanRequest, description="Agent memory cleanup request"
),
):
"""Clear an agent memory.
**Request Body:**
* `request` - The execution request containing agent ID, message, and thread ID
**Returns:**
* `str` - Formatted response lines from agent memory cleanup
**Raises:**
* `HTTPException`:
- 400: If input parameters are invalid (empty agent_id, thread_id, or message text)
- 404: If agent not found
- 500: For other server-side errors
"""
# Validate input parameters
if not request.agent_id or not request.agent_id.strip():
raise HTTPException(status_code=400, detail="Agent ID cannot be empty")
try:
agent = await Agent.get(request.agent_id)
if not agent:
raise HTTPException(
status_code=404,
detail=f"Agent with id {request.agent_id} not found",
)
await clean_agent_memory(
request.agent_id,
request.chat_id,
clean_agent=request.clean_agent_memory,
clean_skill=request.clean_skills_memory,
)
except NoResultFound:
raise HTTPException(
status_code=404, detail=f"Agent {request.agent_id} not found"
)
except SQLAlchemyError as e:
raise HTTPException(status_code=500, detail=f"Database error: {str(e)}")
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=f"Server error: {str(e)}")
@admin_router_readonly.get(
"/agents/{agent_id}/export",
tags=["Agent"],
operation_id="export_agent",
dependencies=[Depends(verify_jwt)],
)
async def export_agent(
agent_id: str = Path(..., description="ID of the agent to export"),
) -> str:
"""Export agent configuration as YAML.
**Path Parameters:**
* `agent_id` - ID of the agent to export
**Returns:**
* `str` - YAML configuration of the agent
**Raises:**
* `HTTPException`:
- 404: Agent not found
"""
agent = await Agent.get(agent_id)
if not agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Ensure agent.skills is initialized
if agent.skills is None:
agent.skills = {}
# Process all skill categories
for category in skill_categories:
try:
# Dynamically import the skill module
skill_module = importlib.import_module(f"skills.{category}")
# Check if the module has a Config class and get_skills function
if hasattr(skill_module, "Config") and hasattr(skill_module, "get_skills"):
# Get or create the config for this category
category_config = agent.skills.get(category, {})
# Ensure 'enabled' field exists (required by SkillConfig)
if "enabled" not in category_config:
category_config["enabled"] = False
# Ensure states dict exists
if "states" not in category_config:
category_config["states"] = {}
# Get all available skill states from the module
available_skills = []
if hasattr(skill_module, "SkillStates") and hasattr(
skill_module.SkillStates, "__annotations__"
):
available_skills = list(
skill_module.SkillStates.__annotations__.keys()
)
# Add missing skills with disabled state
for skill_name in available_skills:
if skill_name not in category_config["states"]:
category_config["states"][skill_name] = "disabled"
# Get all required fields from Config class and its base classes
config_class = skill_module.Config
# Get all base classes of Config
all_bases = [config_class]
for base in config_class.__mro__[1:]:
if base is TypedDict or base is dict or base is object:
continue
all_bases.append(base)
# Collect all required fields from Config and its base classes
for base in all_bases:
if hasattr(base, "__annotations__"):
for field_name, field_type in base.__annotations__.items():
# Skip fields already set or marked as NotRequired
if field_name in category_config or "NotRequired" in str(
field_type
):
continue
# Add default value based on type
if field_name != "states": # states already handled above
if "str" in str(field_type):
category_config[field_name] = ""
elif "bool" in str(field_type):
category_config[field_name] = False
elif "int" in str(field_type):
category_config[field_name] = 0
elif "float" in str(field_type):
category_config[field_name] = 0.0
elif "list" in str(field_type) or "List" in str(
field_type
):
category_config[field_name] = []
elif "dict" in str(field_type) or "Dict" in str(
field_type
):
category_config[field_name] = {}
# Update the agent's skills config
agent.skills[category] = category_config
except (ImportError, AttributeError):
# Skip if module import fails or doesn't have required components
pass
yaml_content = agent.to_yaml()
return Response(
content=yaml_content,
media_type="application/x-yaml",
headers={"Content-Disposition": f'attachment; filename="{agent_id}.yaml"'},
)
@admin_router.put(
"/agents/{agent_id}/import",
tags=["Agent"],
operation_id="import_agent",
response_class=PlainTextResponse,
)
async def import_agent(
agent_id: str = Path(...),
file: UploadFile = File(
..., description="YAML file containing agent configuration"
),
subject: str = Depends(verify_jwt),
) -> str:
"""Import agent configuration from YAML file.
Only updates existing agents, will not create new ones.
**Path Parameters:**
* `agent_id` - ID of the agent to update
**Request Body:**
* `file` - YAML file containing agent configuration
**Returns:**
* `str` - Success message
**Raises:**
* `HTTPException`:
- 400: Invalid YAML or agent configuration
- 404: Agent not found
- 500: Server error
"""
# First check if agent exists
existing_agent = await Agent.get(agent_id)
if not existing_agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Read and parse YAML
content = await file.read()
try:
yaml_data = safe_load(content)
except Exception as e:
raise HTTPException(status_code=400, detail=f"Invalid YAML format: {e}")
# Create Agent instance from YAML
try:
agent = AgentUpdate.model_validate(yaml_data)
except ValidationError as e:
raise HTTPException(status_code=400, detail=f"Invalid agent configuration: {e}")
# Get the latest agent from create_or_update
latest_agent = await agent.update(agent_id)
# Process common post-creation/update steps
agent_data = await _process_agent_post_actions(
latest_agent, False, "Agent Updated via YAML Import"
)
await _process_telegram_config(agent, agent_data)
return "Agent import successful"
@admin_router.put(
"/agents/{agent_id}/twitter/unlink",
tags=["Agent"],
operation_id="unlink_twitter",
dependencies=[Depends(verify_jwt)],
response_class=Response,
)
async def unlink_twitter_endpoint(
agent_id: str = Path(..., description="ID of the agent to unlink from Twitter"),
) -> Response:
"""Unlink Twitter from an agent.
**Path Parameters:**
* `agent_id` - ID of the agent to unlink from Twitter
**Raises:**
* `HTTPException`:
- 404: Agent not found
"""
# Check if agent exists
agent = await Agent.get(agent_id)
if not agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Call the unlink_twitter function from clients.twitter
agent_data = await unlink_twitter(agent_id)
agent_response = AgentResponse.from_agent(agent, agent_data)
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
| 6,813
|
b7c0c7b4f2116e0390c1f2f1d072ec17465497bfec0a45bc2869078d91f3a458
| 31.86991
| 106
| 0.588808
| 4.264935
| false
| true
| false
| false
|
fudan-generative-vision/hallo2
|
basicsr/data/__init__.py
| 4,254
| 0
|
MIT License
|
import importlib
import numpy as np
import random
import torch
import torch.utils.data
from copy import deepcopy
from functools import partial
from os import path as osp
from basicsr.data.prefetch_dataloader import PrefetchDataLoader
from basicsr.utils import get_root_logger, scandir
from basicsr.utils.dist_util import get_dist_info
from basicsr.utils.registry import DATASET_REGISTRY
__all__ = ['build_dataset', 'build_dataloader']
# automatically scan and import dataset modules for registry
# scan all the files under the data folder with '_dataset' in file names
data_folder = osp.dirname(osp.abspath(__file__))
dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
# import all the dataset modules
_dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]
def build_dataset(dataset_opt):
"""Build dataset from options.
Args:
dataset_opt (dict): Configuration for dataset. It must constain:
name (str): Dataset name.
type (str): Dataset type.
"""
dataset_opt = deepcopy(dataset_opt)
dataset = DATASET_REGISTRY.get(dataset_opt['type'])(dataset_opt)
logger = get_root_logger()
logger.info(f'Dataset [{dataset.__class__.__name__}] - {dataset_opt["name"]} ' 'is built.')
return dataset
def build_dataloader(dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None):
"""Build dataloader.
Args:
dataset (torch.utils.data.Dataset): Dataset.
dataset_opt (dict): Dataset options. It contains the following keys:
phase (str): 'train' or 'val'.
num_worker_per_gpu (int): Number of workers for each GPU.
batch_size_per_gpu (int): Training batch size for each GPU.
num_gpu (int): Number of GPUs. Used only in the train phase.
Default: 1.
dist (bool): Whether in distributed training. Used only in the train
phase. Default: False.
sampler (torch.utils.data.sampler): Data sampler. Default: None.
seed (int | None): Seed. Default: None
"""
phase = dataset_opt['phase']
rank, _ = get_dist_info()
if phase == 'train':
if dist: # distributed training
batch_size = dataset_opt['batch_size_per_gpu']
num_workers = dataset_opt['num_worker_per_gpu']
else: # non-distributed training
multiplier = 1 if num_gpu == 0 else num_gpu
batch_size = dataset_opt['batch_size_per_gpu'] * multiplier
num_workers = dataset_opt['num_worker_per_gpu'] * multiplier
dataloader_args = dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
sampler=sampler,
drop_last=True)
if sampler is None:
dataloader_args['shuffle'] = True
dataloader_args['worker_init_fn'] = partial(
worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if seed is not None else None
elif phase in ['val', 'test']: # validation
dataloader_args = dict(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
else:
raise ValueError(f'Wrong dataset phase: {phase}. ' "Supported ones are 'train', 'val' and 'test'.")
dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False)
prefetch_mode = dataset_opt.get('prefetch_mode')
if prefetch_mode == 'cpu': # CPUPrefetcher
num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1)
logger = get_root_logger()
logger.info(f'Use {prefetch_mode} prefetch dataloader: ' f'num_prefetch_queue = {num_prefetch_queue}')
return PrefetchDataLoader(num_prefetch_queue=num_prefetch_queue, **dataloader_args)
else:
# prefetch_mode=None: Normal dataloader
# prefetch_mode='cuda': dataloader for CUDAPrefetcher
return torch.utils.data.DataLoader(**dataloader_args)
def worker_init_fn(worker_id, num_workers, rank, seed):
# Set the worker seed to num_workers * rank + worker_id + seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 1,161
|
3214fde7389be3f63e073e38f206d7448efe6d9b49bab3fd55f2eb1dac16216a
| 41.54
| 113
| 0.657029
| 3.664083
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/providers/remote/inference/nvidia/openai_utils.py
| 7,685
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import warnings
from typing import Any, AsyncGenerator, Dict, List, Optional
from openai import AsyncStream
from openai.types.chat.chat_completion import (
Choice as OpenAIChoice,
)
from openai.types.completion import Completion as OpenAICompletion
from openai.types.completion_choice import Logprobs as OpenAICompletionLogprobs
from llama_stack.apis.inference import (
ChatCompletionRequest,
CompletionRequest,
CompletionResponse,
CompletionResponseStreamChunk,
GreedySamplingStrategy,
JsonSchemaResponseFormat,
TokenLogProbs,
TopKSamplingStrategy,
TopPSamplingStrategy,
)
from llama_stack.providers.utils.inference.openai_compat import (
_convert_openai_finish_reason,
convert_message_to_openai_dict_new,
convert_tooldef_to_openai_tool,
)
async def convert_chat_completion_request(
request: ChatCompletionRequest,
n: int = 1,
) -> dict:
"""
Convert a ChatCompletionRequest to an OpenAI API-compatible dictionary.
"""
# model -> model
# messages -> messages
# sampling_params TODO(mattf): review strategy
# strategy=greedy -> nvext.top_k = -1, temperature = temperature
# strategy=top_p -> nvext.top_k = -1, top_p = top_p
# strategy=top_k -> nvext.top_k = top_k
# temperature -> temperature
# top_p -> top_p
# top_k -> nvext.top_k
# max_tokens -> max_tokens
# repetition_penalty -> nvext.repetition_penalty
# response_format -> GrammarResponseFormat TODO(mf)
# response_format -> JsonSchemaResponseFormat: response_format = "json_object" & nvext["guided_json"] = json_schema
# tools -> tools
# tool_choice ("auto", "required") -> tool_choice
# tool_prompt_format -> TBD
# stream -> stream
# logprobs -> logprobs
if request.response_format and not isinstance(request.response_format, JsonSchemaResponseFormat):
raise ValueError(
f"Unsupported response format: {request.response_format}. Only JsonSchemaResponseFormat is supported."
)
nvext = {}
payload: Dict[str, Any] = dict(
model=request.model,
messages=[await convert_message_to_openai_dict_new(message) for message in request.messages],
stream=request.stream,
n=n,
extra_body=dict(nvext=nvext),
extra_headers={
b"User-Agent": b"llama-stack: nvidia-inference-adapter",
},
)
if request.response_format:
# server bug - setting guided_json changes the behavior of response_format resulting in an error
# payload.update(response_format="json_object")
nvext.update(guided_json=request.response_format.json_schema)
if request.tools:
payload.update(tools=[convert_tooldef_to_openai_tool(tool) for tool in request.tools])
if request.tool_config.tool_choice:
payload.update(
tool_choice=request.tool_config.tool_choice.value
) # we cannot include tool_choice w/o tools, server will complain
if request.logprobs:
payload.update(logprobs=True)
payload.update(top_logprobs=request.logprobs.top_k)
if request.sampling_params:
nvext.update(repetition_penalty=request.sampling_params.repetition_penalty)
if request.sampling_params.max_tokens:
payload.update(max_tokens=request.sampling_params.max_tokens)
strategy = request.sampling_params.strategy
if isinstance(strategy, TopPSamplingStrategy):
nvext.update(top_k=-1)
payload.update(top_p=strategy.top_p)
payload.update(temperature=strategy.temperature)
elif isinstance(strategy, TopKSamplingStrategy):
if strategy.top_k != -1 and strategy.top_k < 1:
warnings.warn("top_k must be -1 or >= 1", stacklevel=2)
nvext.update(top_k=strategy.top_k)
elif isinstance(strategy, GreedySamplingStrategy):
nvext.update(top_k=-1)
else:
raise ValueError(f"Unsupported sampling strategy: {strategy}")
return payload
def convert_completion_request(
request: CompletionRequest,
n: int = 1,
) -> dict:
"""
Convert a ChatCompletionRequest to an OpenAI API-compatible dictionary.
"""
# model -> model
# prompt -> prompt
# sampling_params TODO(mattf): review strategy
# strategy=greedy -> nvext.top_k = -1, temperature = temperature
# strategy=top_p -> nvext.top_k = -1, top_p = top_p
# strategy=top_k -> nvext.top_k = top_k
# temperature -> temperature
# top_p -> top_p
# top_k -> nvext.top_k
# max_tokens -> max_tokens
# repetition_penalty -> nvext.repetition_penalty
# response_format -> nvext.guided_json
# stream -> stream
# logprobs.top_k -> logprobs
nvext = {}
payload: Dict[str, Any] = dict(
model=request.model,
prompt=request.content,
stream=request.stream,
extra_body=dict(nvext=nvext),
extra_headers={
b"User-Agent": b"llama-stack: nvidia-inference-adapter",
},
n=n,
)
if request.response_format:
# this is not openai compliant, it is a nim extension
nvext.update(guided_json=request.response_format.json_schema)
if request.logprobs:
payload.update(logprobs=request.logprobs.top_k)
if request.sampling_params:
nvext.update(repetition_penalty=request.sampling_params.repetition_penalty)
if request.sampling_params.max_tokens:
payload.update(max_tokens=request.sampling_params.max_tokens)
if request.sampling_params.strategy == "top_p":
nvext.update(top_k=-1)
payload.update(top_p=request.sampling_params.top_p)
elif request.sampling_params.strategy == "top_k":
if request.sampling_params.top_k != -1 and request.sampling_params.top_k < 1:
warnings.warn("top_k must be -1 or >= 1", stacklevel=2)
nvext.update(top_k=request.sampling_params.top_k)
elif request.sampling_params.strategy == "greedy":
nvext.update(top_k=-1)
payload.update(temperature=request.sampling_params.temperature)
return payload
def _convert_openai_completion_logprobs(
logprobs: Optional[OpenAICompletionLogprobs],
) -> Optional[List[TokenLogProbs]]:
"""
Convert an OpenAI CompletionLogprobs into a list of TokenLogProbs.
"""
if not logprobs:
return None
return [TokenLogProbs(logprobs_by_token=logprobs) for logprobs in logprobs.top_logprobs]
def convert_openai_completion_choice(
choice: OpenAIChoice,
) -> CompletionResponse:
"""
Convert an OpenAI Completion Choice into a CompletionResponse.
"""
return CompletionResponse(
content=choice.text,
stop_reason=_convert_openai_finish_reason(choice.finish_reason),
logprobs=_convert_openai_completion_logprobs(choice.logprobs),
)
async def convert_openai_completion_stream(
stream: AsyncStream[OpenAICompletion],
) -> AsyncGenerator[CompletionResponse, None]:
"""
Convert a stream of OpenAI Completions into a stream
of ChatCompletionResponseStreamChunks.
"""
async for chunk in stream:
choice = chunk.choices[0]
yield CompletionResponseStreamChunk(
delta=choice.text,
stop_reason=_convert_openai_finish_reason(choice.finish_reason),
logprobs=_convert_openai_completion_logprobs(choice.logprobs),
)
| 2,104
|
f82873fb13c906f185bfab605e378ce0141d4c304d83760fab397b7ef7f0d0ea
| 34.578704
| 119
| 0.668705
| 3.652567
| false
| false
| false
| false
|
trycua/cua
|
libs/agent/agent/providers/anthropic/types.py
| 421
| 0
|
MIT License
|
from enum import StrEnum
class LLMProvider(StrEnum):
"""Enum for supported API providers."""
ANTHROPIC = "anthropic"
BEDROCK = "bedrock"
VERTEX = "vertex"
PROVIDER_TO_DEFAULT_MODEL_NAME: dict[LLMProvider, str] = {
LLMProvider.ANTHROPIC: "claude-3-7-sonnet-20250219",
LLMProvider.BEDROCK: "anthropic.claude-3-7-sonnet-20250219-v2:0",
LLMProvider.VERTEX: "claude-3-5-sonnet-v2@20241022",
}
| 161
|
9f0fefaa638a906a2dee096912b6fa6069b5ff916d47297342eb3b0f1cabe484
| 25.3125
| 69
| 0.700713
| 2.614907
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/providers/utils/telemetry/sqlite_trace_store.py
| 6,946
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import json
from datetime import datetime
from typing import Dict, List, Optional, Protocol
import aiosqlite
from llama_stack.apis.telemetry import QueryCondition, Span, SpanWithStatus, Trace
class TraceStore(Protocol):
async def query_traces(
self,
attribute_filters: Optional[List[QueryCondition]] = None,
limit: Optional[int] = 100,
offset: Optional[int] = 0,
order_by: Optional[List[str]] = None,
) -> List[Trace]: ...
async def get_span_tree(
self,
span_id: str,
attributes_to_return: Optional[List[str]] = None,
max_depth: Optional[int] = None,
) -> Dict[str, SpanWithStatus]: ...
class SQLiteTraceStore(TraceStore):
def __init__(self, conn_string: str):
self.conn_string = conn_string
async def query_traces(
self,
attribute_filters: Optional[List[QueryCondition]] = None,
limit: Optional[int] = 100,
offset: Optional[int] = 0,
order_by: Optional[List[str]] = None,
) -> List[Trace]:
def build_where_clause() -> tuple[str, list]:
if not attribute_filters:
return "", []
ops_map = {"eq": "=", "ne": "!=", "gt": ">", "lt": "<"}
conditions = [
f"json_extract(s.attributes, '$.{condition.key}') {ops_map[condition.op.value]} ?"
for condition in attribute_filters
]
params = [condition.value for condition in attribute_filters]
where_clause = " WHERE " + " AND ".join(conditions)
return where_clause, params
def build_order_clause() -> str:
if not order_by:
return ""
order_clauses = []
for field in order_by:
desc = field.startswith("-")
clean_field = field[1:] if desc else field
order_clauses.append(f"t.{clean_field} {'DESC' if desc else 'ASC'}")
return " ORDER BY " + ", ".join(order_clauses)
# Build the main query
base_query = """
WITH matching_traces AS (
SELECT DISTINCT t.trace_id
FROM traces t
JOIN spans s ON t.trace_id = s.trace_id
{where_clause}
),
filtered_traces AS (
SELECT t.trace_id, t.root_span_id, t.start_time, t.end_time
FROM matching_traces mt
JOIN traces t ON mt.trace_id = t.trace_id
LEFT JOIN spans s ON t.trace_id = s.trace_id
{order_clause}
)
SELECT DISTINCT trace_id, root_span_id, start_time, end_time
FROM filtered_traces
LIMIT {limit} OFFSET {offset}
"""
where_clause, params = build_where_clause()
query = base_query.format(
where_clause=where_clause,
order_clause=build_order_clause(),
limit=limit,
offset=offset,
)
# Execute query and return results
async with aiosqlite.connect(self.conn_string) as conn:
conn.row_factory = aiosqlite.Row
async with conn.execute(query, params) as cursor:
rows = await cursor.fetchall()
return [
Trace(
trace_id=row["trace_id"],
root_span_id=row["root_span_id"],
start_time=datetime.fromisoformat(row["start_time"]),
end_time=datetime.fromisoformat(row["end_time"]),
)
for row in rows
]
async def get_span_tree(
self,
span_id: str,
attributes_to_return: Optional[List[str]] = None,
max_depth: Optional[int] = None,
) -> Dict[str, SpanWithStatus]:
# Build the attributes selection
attributes_select = "s.attributes"
if attributes_to_return:
json_object = ", ".join(f"'{key}', json_extract(s.attributes, '$.{key}')" for key in attributes_to_return)
attributes_select = f"json_object({json_object})"
# SQLite CTE query with filtered attributes
query = f"""
WITH RECURSIVE span_tree AS (
SELECT s.*, 1 as depth, {attributes_select} as filtered_attributes
FROM spans s
WHERE s.span_id = ?
UNION ALL
SELECT s.*, st.depth + 1, {attributes_select} as filtered_attributes
FROM spans s
JOIN span_tree st ON s.parent_span_id = st.span_id
WHERE (? IS NULL OR st.depth < ?)
)
SELECT *
FROM span_tree
ORDER BY depth, start_time
"""
spans_by_id = {}
async with aiosqlite.connect(self.conn_string) as conn:
conn.row_factory = aiosqlite.Row
async with conn.execute(query, (span_id, max_depth, max_depth)) as cursor:
rows = await cursor.fetchall()
if not rows:
raise ValueError(f"Span {span_id} not found")
for row in rows:
span = SpanWithStatus(
span_id=row["span_id"],
trace_id=row["trace_id"],
parent_span_id=row["parent_span_id"],
name=row["name"],
start_time=datetime.fromisoformat(row["start_time"]),
end_time=datetime.fromisoformat(row["end_time"]),
attributes=json.loads(row["filtered_attributes"]),
status=row["status"].lower(),
)
spans_by_id[span.span_id] = span
return spans_by_id
async def get_trace(self, trace_id: str) -> Trace:
query = "SELECT * FROM traces WHERE trace_id = ?"
async with aiosqlite.connect(self.conn_string) as conn:
conn.row_factory = aiosqlite.Row
async with conn.execute(query, (trace_id,)) as cursor:
row = await cursor.fetchone()
if row is None:
raise ValueError(f"Trace {trace_id} not found")
return Trace(**row)
async def get_span(self, trace_id: str, span_id: str) -> Span:
query = "SELECT * FROM spans WHERE trace_id = ? AND span_id = ?"
async with aiosqlite.connect(self.conn_string) as conn:
conn.row_factory = aiosqlite.Row
async with conn.execute(query, (trace_id, span_id)) as cursor:
row = await cursor.fetchone()
if row is None:
raise ValueError(f"Span {span_id} not found")
return Span(**row)
| 1,672
|
3ad99d3123bf995e22334dc4d1869ca72bdd343bb4bebfd340936dfbbb38071f
| 36.344086
| 118
| 0.530089
| 4.154306
| false
| false
| false
| false
|
autoscrape-labs/pydoll
|
pydoll/browser/page.py
| 25,924
| 0
|
MIT License
|
import asyncio
import json
import logging
from contextlib import asynccontextmanager
from functools import partial
from pathlib import Path
from typing import List, Optional, Tuple, Union
import aiofiles
from pydoll.commands import (
DomCommands,
FetchCommands,
NetworkCommands,
PageCommands,
RuntimeCommands,
StorageCommands,
)
from pydoll.connection.connection import ConnectionHandler
from pydoll.constants import By
from pydoll.element import WebElement
from pydoll.events import PageEvents
from pydoll.exceptions import InvalidFileExtension
from pydoll.mixins.find_elements import FindElementsMixin
from pydoll.utils import decode_image_to_bytes
logger = logging.getLogger(__name__)
class Page(FindElementsMixin): # noqa: PLR0904
def __init__(self, connection_port: int, page_id: str):
"""
Initializes the Page instance.
Args:
connection_port (int): The port number for the connection to the
browser.
page_id (str): The ID of the page, obtained via the DevTools
Protocol.
"""
self._connection_handler = ConnectionHandler(connection_port, page_id)
self._page_events_enabled = False
self._network_events_enabled = False
self._fetch_events_enabled = False
self._dom_events_enabled = False
self._intercept_file_chooser_dialog_enabled = False
self._cloudflare_captcha_callback_id = None
@property
def page_events_enabled(self) -> bool:
"""
Returns whether page events are enabled or not.
Returns:
bool: True if page events are enabled, False otherwise.
"""
return self._page_events_enabled
@property
def network_events_enabled(self) -> bool:
"""
Returns whether network events are enabled or not.
Returns:
bool: True if network events are enabled, False otherwise.
"""
return self._network_events_enabled
@property
def fetch_events_enabled(self) -> bool:
"""
Returns whether fetch events are enabled or not.
Returns:
bool: True if fetch events are enabled, False otherwise.
"""
return self._fetch_events_enabled
@property
def dom_events_enabled(self) -> bool:
"""
Returns whether DOM events are enabled or not.
Returns:
bool: True if DOM events are enabled, False otherwise.
"""
return self._dom_events_enabled
@property
def intercept_file_chooser_dialog_enabled(self) -> bool:
"""
Returns whether file chooser dialogs are being intercepted or not.
Returns:
bool: True if file chooser dialogs are being intercepted,
False otherwise.
"""
return self._intercept_file_chooser_dialog_enabled
@property
async def current_url(self) -> str:
"""
Retrieves the current URL of the page.
Returns:
str: The current URL of the page.
"""
response = await self._execute_command(DomCommands.get_current_url())
return response['result']['result']['value']
@property
async def page_source(self) -> str:
"""
Retrieves the source code of the page.
Returns:
str: The source code of the page.
"""
response = await self._execute_command(
RuntimeCommands.evaluate_script(
'document.documentElement.outerHTML'
)
)
return response['result']['result']['value']
async def close(self):
"""
Closes the page.
This method closes the current page in the browser.
Returns:
None
"""
await self._execute_command(PageCommands.close())
async def get_cookies(self) -> list[dict]:
"""
Retrieves the cookies of the page.
Returns:
list[dict]: A list of dictionaries containing cookie data from
the current page.
"""
response = await self._execute_command(
NetworkCommands.get_all_cookies()
)
return response['result']['cookies']
async def set_cookies(self, cookies: list[dict]):
"""
Sets cookies for the page.
Args:
cookies (list[dict]): A list of dictionaries containing cookie
data to set for the current page.
"""
await self._execute_command(StorageCommands.set_cookies(cookies))
await self._execute_command(NetworkCommands.set_cookies(cookies))
async def delete_all_cookies(self):
"""
Deletes all cookies from the browser.
This clears both storage cookies and browser cookies associated with
the current page.
Returns:
None
"""
await self._execute_command(StorageCommands.clear_cookies())
await self._execute_command(NetworkCommands.clear_browser_cookies())
async def has_dialog(self) -> bool:
"""
Checks if a dialog is present on the page.
Returns:
bool: True if a dialog is present, False otherwise.
"""
if self._connection_handler.dialog:
return True
return False
async def get_dialog_message(self) -> str:
"""
Retrieves the message of the dialog on the page.
Returns:
str: The message of the dialog.
"""
if not await self.has_dialog():
raise LookupError('No dialog present on the page')
return self._connection_handler.dialog['params']['message']
async def accept_dialog(self):
"""
Accepts the dialog on the page.
Raises:
LookupError: If no dialog is present on the page.
"""
if not await self.has_dialog():
raise LookupError('No dialog present on the page')
await self._execute_command(PageCommands.handle_dialog(True))
async def go_to(self, url: str, timeout=300):
"""
Navigates to a URL in the page.
Args:
url (str): The URL to navigate to.
timeout (int): Maximum time in seconds to wait for page to load.
Defaults to 300 seconds.
Raises:
TimeoutError: If the page fails to load within the specified
timeout.
"""
if await self._refresh_if_url_not_changed(url):
return
await self._execute_command(PageCommands.go_to(url))
try:
await self._wait_page_load(timeout=timeout)
except asyncio.TimeoutError:
raise TimeoutError('Page load timed out')
async def refresh(self):
"""
Refreshes the page.
This method reloads the current page and waits for it to finish
loading.
Raises:
TimeoutError: If the page does not finish loading within the
default timeout period (300 seconds).
Returns:
None
"""
await self._execute_command(PageCommands.refresh())
try:
await self._wait_page_load()
except asyncio.TimeoutError:
raise TimeoutError('Page load timed out')
async def get_screenshot(self, path: str):
"""
Captures a screenshot of the page.
Args:
path (str): The file path to save the screenshot to.
Returns:
None
"""
fmt = path.split('.')[-1]
if fmt not in {'jpeg', 'jpg', 'png'}:
raise InvalidFileExtension(f'{fmt} extension is not supported.')
response = await self._execute_command(
PageCommands.screenshot(fmt=fmt)
)
screenshot_b64 = response['result']['data'].encode('utf-8')
screenshot_bytes = decode_image_to_bytes(screenshot_b64)
async with aiofiles.open(path, 'wb') as file:
await file.write(screenshot_bytes)
async def get_screenshot_base64(self):
"""
Retrieves the screenshot of the page as a base64 encoded string.
Returns:
str: The base64 encoded screenshot.
# TODO: remove the duplicated logic
"""
response = await self._execute_command(PageCommands.screenshot())
return response['result']['data']
async def set_download_path(self, path: str):
"""
Sets the download path for the page.
Args:
path (str): The path where the downloaded files should be saved.
"""
await self._execute_command(PageCommands.set_download_path(path))
async def get_pdf_base64(self):
"""
Retrieves the PDF data of the page.
Returns:
str: The PDF data of the page.
"""
response = await self._execute_command(PageCommands.print_to_pdf())
return response['result']['data']
async def print_to_pdf(self, path: str):
"""
Prints the page to a PDF file.
Args:
path (str): The file path to save the PDF file to.
"""
response = await self._execute_command(PageCommands.print_to_pdf())
pdf_b64 = response['result']['data'].encode('utf-8')
pdf_bytes = decode_image_to_bytes(pdf_b64)
async with aiofiles.open(path, 'wb') as file:
await file.write(pdf_bytes)
async def get_network_logs(self, matches: list[str] = []):
"""
Retrieves network logs from the page.
Args:
matches (list[str]): A list of URL patterns to match network logs
against. If empty, all logs are returned.
Returns:
list: A list of network logs that match the specified patterns.
Raises:
LookupError: If no network logs match the specified patterns.
"""
network_logs = self._connection_handler.network_logs
logs_matched = []
for log in network_logs:
if not log.get('params', {}).get('request', {}).get('url'):
continue
for match in matches:
if match in log['params']['request']['url']:
logs_matched.append(log)
break
if not logs_matched:
raise LookupError('No network logs matched the specified pattern')
return logs_matched
async def get_network_response_bodies(self, matches: list[str] = []):
"""
Retrieves the response bodies of network requests that match the
specified pattern.
Args:
matches (list): The URL patterns to match network requests against.
Returns:
list: A list of response bodies from network requests that match
the specified patterns.
"""
logs_matched = await self.get_network_logs(matches)
responses = []
for log in logs_matched:
try:
body, base64encoded = await self.get_network_response_body(
log['params']['requestId']
)
except KeyError:
continue
response = json.loads(body) if not base64encoded else body
responses.append(response)
return responses
async def get_network_response_body(self, request_id: str):
"""
Retrieves the response body of a network request.
Args:
request_id (str): The ID of the network request.
Returns:
tuple: A tuple containing:
- str: The response body content
- bool: Flag indicating if the body is base64 encoded
"""
response = await self._execute_command(
NetworkCommands.get_response_body(request_id)
)
return (
response['result']['body'],
response['result']['base64Encoded'],
)
async def enable_page_events(self):
"""
Enables page events for the page.
This allows listening for page-related events such as load, navigate,
and content change events. These events can be captured with the `on`
method.
Returns:
None
"""
await self._execute_command(PageCommands.enable_page())
self._page_events_enabled = True
async def enable_network_events(self):
"""
Enables network events for the page.
This allows listening for network-related events such as request and
response events. These events can be captured with the `on` method.
Returns:
None
"""
await self._execute_command(NetworkCommands.enable_network_events())
self._network_events_enabled = True
async def enable_fetch_events(
self, handle_auth: bool = False, resource_type: str = 'Document'
):
"""
Enables fetch events for the page.
This allows interception of network requests before they are sent.
Args:
handle_auth (bool): Whether to handle authentication requests.
Defaults to False.
resource_type (str): The type of resource to intercept.
Defaults to 'Document'.
Returns:
None
"""
await self._execute_command(
FetchCommands.enable_fetch_events(handle_auth, resource_type)
)
self._fetch_events_enabled = True
async def enable_dom_events(self):
"""
Enables DOM events for the page.
This allows listening for DOM-related events such as node creation,
attribute modification, and node removal events. These events can be
captured with the `on` method.
Returns:
None
"""
await self._execute_command(DomCommands.enable_dom_events())
self._dom_events_enabled = True
async def enable_intercept_file_chooser_dialog(self):
"""
Enable intercepting file chooser dialogs.
When file chooser interception is enabled, native file chooser dialog
is not shown. Instead, a protocol event Page.fileChooserOpened is
emitted.
Returns:
None
"""
await self._execute_command(
PageCommands.set_intercept_file_chooser_dialog(True)
)
self._intercept_file_chooser_dialog_enabled = True
async def disable_fetch_events(self):
"""
Disables fetch events for the page.
This stops the interception of network requests that was previously
enabled with enable_fetch_events().
Returns:
None
"""
await self._execute_command(FetchCommands.disable_fetch_events())
self._fetch_events_enabled = False
async def disable_page_events(self):
"""
Disables page events for the page.
This stops listening for page-related events that were previously
enabled with enable_page_events().
Returns:
None
"""
await self._execute_command(PageCommands.disable_page())
self._page_events_enabled = False
async def disable_intercept_file_chooser_dialog(self):
"""
Disable intercepting file chooser dialogs.
When file chooser interception is disabled, native file chooser
dialog is shown.
Returns:
None
"""
await self._execute_command(
PageCommands.set_intercept_file_chooser_dialog(False)
)
self._intercept_file_chooser_dialog_enabled = False
async def on(
self, event_name: str, callback: callable, temporary: bool = False
):
"""
Registers an event listener for the page.
Args:
event_name (str): The event name to listen for.
callback (callable): The callback function to execute when the
event is triggered.
temporary (bool): If True, the callback will be removed after it's
triggered once. Defaults to False.
Returns:
int: The ID of the registered callback, which can be used to
remove the listener later.
"""
async def callback_wrapper(event):
asyncio.create_task(callback(event))
if asyncio.iscoroutinefunction(callback):
function_to_register = callback_wrapper
else:
function_to_register = callback
return await self._connection_handler.register_callback(
event_name, function_to_register, temporary
)
async def execute_script(self, script: str, element: WebElement = None):
"""
Executes a JavaScript script in the page.
If an element is provided, the script will be executed in the context
of that element. To provide the element context, use the 'argument'
keyword in the script.
Examples:
```python
await page.execute_script('argument.click()', element)
await page.execute_script('argument.value = "Hello, World!"', element)
```
Args:
script (str): The JavaScript script to execute.
element (WebElement, optional): The element to execute the script
on. Use 'argument' in your script to refer to this element.
Defaults to None.
Returns:
dict: The result of the script execution from the browser.
"""
if element:
script = script.replace('argument', 'this')
script = f'function(){{ {script} }}'
object_id = element._object_id
command = RuntimeCommands.call_function_on(
object_id, script, return_by_value=True
)
else:
command = RuntimeCommands.evaluate_script(script)
return await self._execute_command(command)
async def _refresh_if_url_not_changed(self, url: str):
"""
Refreshes the page if the URL has not changed.
Args:
url (str): The URL to compare against.
"""
current_url = await self.current_url
if current_url == url:
await self.refresh()
return True
return False
async def _wait_page_load(self, timeout: int = 300):
"""
Waits for the page to finish loading.
Args:
timeout (int): Maximum time in seconds to wait for the page
to load. Defaults to 300 seconds.
Raises:
asyncio.TimeoutError: If the page does not finish loading within
the specified timeout.
"""
start_time = asyncio.get_event_loop().time()
while True:
response = await self._execute_command(
RuntimeCommands.evaluate_script('document.readyState')
)
if response['result']['result']['value'] == 'complete':
break
if asyncio.get_event_loop().time() - start_time > timeout:
raise asyncio.TimeoutError('Page load timed out')
await asyncio.sleep(0.5)
@asynccontextmanager
async def expect_file_chooser(
self, files: Union[str, Path, List[Union[str, Path]]]
):
"""
Provide a context manager that expects a file chooser dialog to be
opened and handles the file upload. When a file selection signal
is captured, the file is uploaded.
Args:
files (Union[str, Path, List[Union[str, Path]]]): The files to be
uploaded.
Returns:
"""
async def event_handler(event):
await self._execute_command(
DomCommands.upload_files(
files=files,
backend_node_id=event['params']['backendNodeId'],
)
)
if self.page_events_enabled is False:
_before_page_events_enabled = False
await self.enable_page_events()
else:
_before_page_events_enabled = True
if self.intercept_file_chooser_dialog_enabled is False:
await self.enable_intercept_file_chooser_dialog()
await self.on(
PageEvents.FILE_CHOOSER_OPENED, event_handler, temporary=True
)
yield
if self.intercept_file_chooser_dialog_enabled is True:
await self.disable_intercept_file_chooser_dialog()
if _before_page_events_enabled is False:
await self.disable_page_events()
async def _bypass_cloudflare(
self,
event: dict,
custom_selector: Optional[Tuple[By, str]] = None,
time_before_click: int = 2,
time_to_wait_captcha: int = 5,
):
"""
Attempt to bypass Cloudflare Turnstile captcha when detected.
Args:
event (dict): The event payload (unused)
custom_selector (Optional[Tuple[By, str]]): Custom selector
to locate the captcha element. Defaults to
(By.CLASS_NAME, 'cf-turnstile').
time_before_click (int): Time to wait before clicking the captcha
element in seconds. Defaults to 2 seconds.
time_to_wait_captcha (int): Timeout for the captcha element to be
found in seconds. Defaults to 5 seconds.
"""
try:
selector = custom_selector or (By.CLASS_NAME, 'cf-turnstile')
if element := await self.wait_element(
*selector, timeout=time_to_wait_captcha, raise_exc=False
):
# adjust the div size to shadow root size
await self.execute_script(
'argument.style="width: 300px"', element
)
await asyncio.sleep(time_before_click)
await element.click()
except Exception as exc:
logger.error(f'Error in cloudflare bypass: {exc}')
@asynccontextmanager
async def expect_and_bypass_cloudflare_captcha(
self,
custom_selector: Optional[Tuple[By, str]] = None,
time_before_click: int = 2,
time_to_wait_captcha: Optional[int] = 5,
):
"""
Context manager to handle Cloudflare Turnstile captcha.
This method sets up a callback that will automatically attempt to
bypass the Cloudflare captcha when the page loads. The main code
will wait until the captcha handling is complete before continuing.
It creates an event to coordinate between the callback and the main
code.
Args:
custom_selector (Optional[Tuple[By, str]]): Custom selector
to locate the captcha element. Defaults to
(By.CLASS_NAME, 'cf-turnstile').
time_before_click (int): Time to wait before clicking the captcha
element in seconds. Defaults to 2 seconds.
time_to_wait_captcha (Optional[int]): Timeout for the captcha
element to be found in seconds. Defaults to 5 seconds.
Returns:
None
"""
captcha_processed = asyncio.Event()
async def bypass_cloudflare(_: dict):
try:
await self._bypass_cloudflare(
_,
custom_selector,
time_before_click,
time_to_wait_captcha,
)
finally:
captcha_processed.set()
_before_page_events_enabled = self.page_events_enabled
if not _before_page_events_enabled:
await self.enable_page_events()
callback_id = await self.on(PageEvents.PAGE_LOADED, bypass_cloudflare)
try:
yield
await captcha_processed.wait()
finally:
await self._connection_handler.remove_callback(callback_id)
if not _before_page_events_enabled:
await self.disable_page_events()
async def enable_auto_solve_cloudflare_captcha(
self,
custom_selector: Optional[Tuple[By, str]] = None,
time_before_click: int = 2,
time_to_wait_captcha: int = 5,
):
"""
Enables automatic solving of Cloudflare Turnstile captcha.
This method sets up a callback that will automatically attempt to
bypass the Cloudflare captcha when the page loads. Unlike the
context manager version, this keeps the callback active until
explicitly disabled.
Args:
custom_selector (Optional[Tuple[By, str]]): Custom selector
to locate the captcha element. Defaults to
(By.CLASS_NAME, 'cf-turnstile').
time_before_click (int): Time to wait before clicking the captcha
element in seconds. Defaults to 2 seconds.
time_to_wait_captcha (int): Timeout for the captcha element to be
found in seconds. Defaults to 5 seconds.
Returns:
int: Callback ID that can be used to disable the auto-solving
"""
if not self.page_events_enabled:
await self.enable_page_events()
callback = partial(
self._bypass_cloudflare,
custom_selector=custom_selector,
time_before_click=time_before_click,
time_to_wait_captcha=time_to_wait_captcha,
)
self._cloudflare_captcha_callback_id = await self.on(
PageEvents.PAGE_LOADED, callback
)
async def disable_auto_solve_cloudflare_captcha(self):
"""
Disables automatic solving of Cloudflare Turnstile captcha.
Returns:
None
"""
await self._connection_handler.remove_callback(
self._cloudflare_captcha_callback_id
)
self._cloudflare_captcha_callback_id = None
| 5,539
|
f3a968ce0028c85f9d309a9d22ea60f7d3fcd2a98c7d49687251526a25d2701c
| 31.486216
| 79
| 0.586638
| 4.680267
| false
| false
| false
| false
|
openai/openai-agents-python
|
src/agents/extensions/handoff_filters.py
| 1,977
| 0
|
MIT License
|
from __future__ import annotations
from ..handoffs import HandoffInputData
from ..items import (
HandoffCallItem,
HandoffOutputItem,
RunItem,
ToolCallItem,
ToolCallOutputItem,
TResponseInputItem,
)
"""Contains common handoff input filters, for convenience. """
def remove_all_tools(handoff_input_data: HandoffInputData) -> HandoffInputData:
"""Filters out all tool items: file search, web search and function calls+output."""
history = handoff_input_data.input_history
new_items = handoff_input_data.new_items
filtered_history = (
_remove_tool_types_from_input(history) if isinstance(history, tuple) else history
)
filtered_pre_handoff_items = _remove_tools_from_items(handoff_input_data.pre_handoff_items)
filtered_new_items = _remove_tools_from_items(new_items)
return HandoffInputData(
input_history=filtered_history,
pre_handoff_items=filtered_pre_handoff_items,
new_items=filtered_new_items,
)
def _remove_tools_from_items(items: tuple[RunItem, ...]) -> tuple[RunItem, ...]:
filtered_items = []
for item in items:
if (
isinstance(item, HandoffCallItem)
or isinstance(item, HandoffOutputItem)
or isinstance(item, ToolCallItem)
or isinstance(item, ToolCallOutputItem)
):
continue
filtered_items.append(item)
return tuple(filtered_items)
def _remove_tool_types_from_input(
items: tuple[TResponseInputItem, ...],
) -> tuple[TResponseInputItem, ...]:
tool_types = [
"function_call",
"function_call_output",
"computer_call",
"computer_call_output",
"file_search_call",
"web_search_call",
]
filtered_items: list[TResponseInputItem] = []
for item in items:
itype = item.get("type")
if itype in tool_types:
continue
filtered_items.append(item)
return tuple(filtered_items)
| 533
|
7da5d4e08f0d80b8d44816665c6bc70024922afb10e68b599c44ce255823ce07
| 28.507463
| 95
| 0.650986
| 3.709193
| false
| false
| false
| false
|
browser-use/browser-use
|
examples/use-cases/google_sheets.py
| 7,576
| 0
|
MIT License
|
import os
import sys
from browser_use.browser.context import BrowserContext
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import asyncio
import pyperclip
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from browser_use import ActionResult, Agent, Controller
from browser_use.browser.browser import Browser, BrowserConfig
browser = Browser(
config=BrowserConfig(
browser_binary_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
),
)
# Load environment variables
load_dotenv()
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY is not set. Please add it to your environment variables.')
controller = Controller()
def is_google_sheet(page) -> bool:
return page.url.startswith('https://docs.google.com/spreadsheets/')
@controller.registry.action('Google Sheets: Open a specific Google Sheet')
async def open_google_sheet(browser: BrowserContext, google_sheet_url: str):
page = await browser.get_current_page()
if page.url != google_sheet_url:
await page.goto(google_sheet_url)
await page.wait_for_load_state()
if not is_google_sheet(page):
return ActionResult(error='Failed to open Google Sheet, are you sure you have permissions to access this sheet?')
return ActionResult(extracted_content=f'Opened Google Sheet {google_sheet_url}', include_in_memory=False)
@controller.registry.action('Google Sheets: Get the contents of the entire sheet', page_filter=is_google_sheet)
async def get_sheet_contents(browser: BrowserContext):
page = await browser.get_current_page()
# select all cells
await page.keyboard.press('Enter')
await page.keyboard.press('Escape')
await page.keyboard.press('ControlOrMeta+A')
await page.keyboard.press('ControlOrMeta+C')
extracted_tsv = pyperclip.paste()
return ActionResult(extracted_content=extracted_tsv, include_in_memory=True)
@controller.registry.action('Google Sheets: Select a specific cell or range of cells', page_filter=is_google_sheet)
async def select_cell_or_range(browser: BrowserContext, cell_or_range: str):
page = await browser.get_current_page()
await page.keyboard.press('Enter') # make sure we dont delete current cell contents if we were last editing
await page.keyboard.press('Escape') # to clear current focus (otherwise select range popup is additive)
await asyncio.sleep(0.1)
await page.keyboard.press('Home') # move cursor to the top left of the sheet first
await page.keyboard.press('ArrowUp')
await asyncio.sleep(0.1)
await page.keyboard.press('Control+G') # open the goto range popup
await asyncio.sleep(0.2)
await page.keyboard.type(cell_or_range, delay=0.05)
await asyncio.sleep(0.2)
await page.keyboard.press('Enter')
await asyncio.sleep(0.2)
await page.keyboard.press('Escape') # to make sure the popup still closes in the case where the jump failed
return ActionResult(extracted_content=f'Selected cell {cell_or_range}', include_in_memory=False)
@controller.registry.action('Google Sheets: Get the contents of a specific cell or range of cells', page_filter=is_google_sheet)
async def get_range_contents(browser: BrowserContext, cell_or_range: str):
page = await browser.get_current_page()
await select_cell_or_range(browser, cell_or_range)
await page.keyboard.press('ControlOrMeta+C')
await asyncio.sleep(0.1)
extracted_tsv = pyperclip.paste()
return ActionResult(extracted_content=extracted_tsv, include_in_memory=True)
@controller.registry.action('Google Sheets: Clear the currently selected cells', page_filter=is_google_sheet)
async def clear_selected_range(browser: BrowserContext):
page = await browser.get_current_page()
await page.keyboard.press('Backspace')
return ActionResult(extracted_content='Cleared selected range', include_in_memory=False)
@controller.registry.action('Google Sheets: Input text into the currently selected cell', page_filter=is_google_sheet)
async def input_selected_cell_text(browser: BrowserContext, text: str):
page = await browser.get_current_page()
await page.keyboard.type(text, delay=0.1)
await page.keyboard.press('Enter') # make sure to commit the input so it doesn't get overwritten by the next action
await page.keyboard.press('ArrowUp')
return ActionResult(extracted_content=f'Inputted text {text}', include_in_memory=False)
@controller.registry.action('Google Sheets: Batch update a range of cells', page_filter=is_google_sheet)
async def update_range_contents(browser: BrowserContext, range: str, new_contents_tsv: str):
page = await browser.get_current_page()
await select_cell_or_range(browser, range)
# simulate paste event from clipboard with TSV content
await page.evaluate(f"""
const clipboardData = new DataTransfer();
clipboardData.setData('text/plain', `{new_contents_tsv}`);
document.activeElement.dispatchEvent(new ClipboardEvent('paste', {{clipboardData}}));
""")
return ActionResult(extracted_content=f'Updated cell {range} with {new_contents_tsv}', include_in_memory=False)
# many more snippets for keyboard-shortcut based Google Sheets automation can be found here, see:
# - https://github.com/philc/sheetkeys/blob/master/content_scripts/sheet_actions.js
# - https://github.com/philc/sheetkeys/blob/master/content_scripts/commands.js
# - https://support.google.com/docs/answer/181110?hl=en&co=GENIE.Platform%3DDesktop#zippy=%2Cmac-shortcuts
# Tip: LLM is bad at spatial reasoning, don't make it navigate with arrow keys relative to current cell
# if given arrow keys, it will try to jump from G1 to A2 by pressing Down, without realizing needs to go Down+LeftLeftLeftLeft
async def main():
async with await browser.new_context() as context:
model = ChatOpenAI(model='gpt-4o')
eraser = Agent(
task="""
Clear all the existing values in columns A through F in this Google Sheet:
https://docs.google.com/spreadsheets/d/1INaIcfpYXlMRWO__de61SHFCaqt1lfHlcvtXZPItlpI/edit
""",
llm=model,
browser_context=context,
controller=controller,
)
await eraser.run()
researcher = Agent(
task="""
Google to find the full name, nationality, and date of birth of the CEO of the top 10 Fortune 100 companies.
For each company, append a row to this existing Google Sheet: https://docs.google.com/spreadsheets/d/1INaIcfpYXlMRWO__de61SHFCaqt1lfHlcvtXZPItlpI/edit
Make sure column headers are present and all existing values in the sheet are formatted correctly.
Columns:
A: Company Name
B: CEO Full Name
C: CEO Country of Birth
D: CEO Date of Birth (YYYY-MM-DD)
E: Source URL where the information was found
""",
llm=model,
browser_context=context,
controller=controller,
)
await researcher.run()
improvised_continuer = Agent(
task="""
Read the Google Sheet https://docs.google.com/spreadsheets/d/1INaIcfpYXlMRWO__de61SHFCaqt1lfHlcvtXZPItlpI/edit
Add 3 more rows to the bottom continuing the existing pattern, make sure any data you add is sourced correctly.
""",
llm=model,
browser_context=context,
controller=controller,
)
await improvised_continuer.run()
final_fact_checker = Agent(
task="""
Read the Google Sheet https://docs.google.com/spreadsheets/d/1INaIcfpYXlMRWO__de61SHFCaqt1lfHlcvtXZPItlpI/edit
Fact-check every entry, add a new column F with your findings for each row.
Make sure to check the source URL for each row, and make sure the information is correct.
""",
llm=model,
browser_context=context,
controller=controller,
)
await final_fact_checker.run()
if __name__ == '__main__':
asyncio.run(main())
| 2,298
|
623f6d876c280782cedee4a319aa635d451ac4dbde667498850d02757688e115
| 38.253886
| 154
| 0.757128
| 3.29678
| false
| false
| false
| false
|
HKUDS/AutoAgent
|
autoagent/workflows/math_solver_workflow_flow.py
| 8,795
| 0
|
MIT License
|
import asyncio
import json
import argparse
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageToolCall
from autoagent.flow import default_drive, EventInput, ReturnBehavior
from autoagent.flow.dynamic import goto_events, abort_this
import re
from autoagent import MetaChain
from autoagent.types import Response
from autoagent.registry import register_workflow
def extract_answer(response: str, key: str):
pattern = f"<{key}>(.*?)</{key}>"
matches = re.findall(pattern, response)
return matches[0] if len(matches) > 0 else None
from autoagent.agents import get_math_solver_agent
from autoagent.agents import get_vote_aggregator_agent
@default_drive.make_event
async def on_start(event: EventInput, global_ctx):
print("start the workflow:" + 'math_solver_workflow')
@default_drive.listen_group([on_start])
async def solve_with_gpt4(event: EventInput, global_ctx):
inputs = [{'key': 'math_problem', 'description': 'The math problem that needs to be solved.'}]
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = 'Solve the math problem using systematic approach and show detailed steps.'
outputs = [{'key': 'gpt4_solution', 'description': 'The solution generated by GPT-4 model.', 'condition': None, 'action': {'type': 'RESULT', 'value': None}}]
agent = get_math_solver_agent('gpt-4o-2024-08-06')
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
@default_drive.listen_group([on_start])
async def solve_with_claude(event: EventInput, global_ctx):
inputs = [{'key': 'math_problem', 'description': 'The math problem that needs to be solved.'}]
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = 'Solve the math problem using systematic approach and show detailed steps.'
outputs = [{'key': 'claude_solution', 'description': 'The solution generated by Claude model.', 'condition': None, 'action': {'type': 'RESULT', 'value': None}}]
agent = get_math_solver_agent('claude-3-5-sonnet-20241022')
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
@default_drive.listen_group([on_start])
async def solve_with_deepseek(event: EventInput, global_ctx):
inputs = [{'key': 'math_problem', 'description': 'The math problem that needs to be solved.'}]
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = 'Solve the math problem using systematic approach and show detailed steps.'
outputs = [{'key': 'deepseek_solution', 'description': 'The solution generated by Deepseek model.', 'condition': None, 'action': {'type': 'RESULT', 'value': None}}]
agent = get_math_solver_agent('deepseek/deepseek-chat')
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
@default_drive.listen_group([solve_with_gpt4, solve_with_claude, solve_with_deepseek])
async def aggregate_solutions(event: EventInput, global_ctx):
inputs = [{'key': 'gpt4_solution', 'description': 'The solution generated by GPT-4 model.'}, {'key': 'claude_solution', 'description': 'The solution generated by Claude model.'}, {'key': 'deepseek_solution', 'description': 'The solution generated by Deepseek model.'}]
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = 'Compare all solutions and determine the final answer through majority voting.'
outputs = [{'key': 'final_solution', 'description': 'The final agreed-upon solution after majority voting.', 'condition': None, 'action': {'type': 'RESULT', 'value': None}}]
agent = get_vote_aggregator_agent('gpt-4o-2024-08-06')
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
@register_workflow(name = 'majority_voting')
async def majority_voting(system_input: str):
storage_results = dict(math_problem = system_input)
await default_drive.invoke_event(
on_start,
global_ctx=storage_results,
)
system_output = storage_results.get('final_solution', None)
return system_output
| 2,320
|
c86700e0fd59c6b80869c02761a41ded8e8ea73a9979cb1a6bacfc6807888ce7
| 42.756219
| 272
| 0.612166
| 3.790948
| false
| false
| false
| false
|
MadcowD/ell
|
examples/claude.py
| 387
| 0
|
MIT License
|
import ell # type: ignore
@ell.simple(model="claude-3-5-sonnet-20241022", max_tokens=100)
def hello_from_claude():
"""You are an AI assistant. Your task is to respond to the user's message with a friendly greeting."""
return "Say hello to the world!!!"
if __name__ == "__main__":
ell.init(verbose=True, store="./logdir", autocommit=True)
print(hello_from_claude())
| 124
|
e4c04ea149b61475c86a19b1b3cec0ffaf179b02e6cef871cd068c354a327e47
| 31.25
| 106
| 0.666667
| 3.120968
| false
| false
| false
| false
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
| 37,347
| 0
|
MIT License
|
# pylint: disable=E1101,C0415,W0718,R0801
# scripts/train_stage2.py
"""
This is the main training script for stage 2 of the project.
It imports necessary packages, defines necessary classes and functions, and trains the model using the provided configuration.
The script includes the following classes and functions:
1. Net: A PyTorch model that takes noisy latents, timesteps, reference image latents, face embeddings,
and face masks as input and returns the denoised latents.
2. get_attention_mask: A function that rearranges the mask tensors to the required format.
3. get_noise_scheduler: A function that creates and returns the noise schedulers for training and validation.
4. process_audio_emb: A function that processes the audio embeddings to concatenate with other tensors.
5. log_validation: A function that logs the validation information using the given VAE, image encoder,
network, scheduler, accelerator, width, height, and configuration.
6. train_stage2_process: A function that processes the training stage 2 using the given configuration.
7. load_config: A function that loads the configuration file from the given path.
The script also includes the necessary imports and a brief description of the purpose of the file.
"""
import argparse
import copy
import logging
import math
import os
import random
import time
import warnings
from datetime import datetime
from typing import List, Tuple
import diffusers
import mlflow
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import DistributedDataParallelKwargs
from diffusers import AutoencoderKL, DDIMScheduler
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from einops import rearrange, repeat
from omegaconf import OmegaConf
from torch import nn
from tqdm.auto import tqdm
from hallo.animate.face_animate import FaceAnimatePipeline
from hallo.datasets.audio_processor import AudioProcessor
from hallo.datasets.image_processor import ImageProcessor
from hallo.datasets.talk_video import TalkingVideoDataset
from hallo.models.audio_proj import AudioProjModel
from hallo.models.face_locator import FaceLocator
from hallo.models.image_proj import ImageProjModel
from hallo.models.mutual_self_attention import ReferenceAttentionControl
from hallo.models.unet_2d_condition import UNet2DConditionModel
from hallo.models.unet_3d import UNet3DConditionModel
from hallo.utils.util import (compute_snr, delete_additional_ckpt,
import_filename, init_output_dir,
load_checkpoint, save_checkpoint,
seed_everything, tensor_to_video)
warnings.filterwarnings("ignore")
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.10.0.dev0")
logger = get_logger(__name__, log_level="INFO")
class Net(nn.Module):
"""
The Net class defines a neural network model that combines a reference UNet2DConditionModel,
a denoising UNet3DConditionModel, a face locator, and other components to animate a face in a static image.
Args:
reference_unet (UNet2DConditionModel): The reference UNet2DConditionModel used for face animation.
denoising_unet (UNet3DConditionModel): The denoising UNet3DConditionModel used for face animation.
face_locator (FaceLocator): The face locator model used for face animation.
reference_control_writer: The reference control writer component.
reference_control_reader: The reference control reader component.
imageproj: The image projection model.
audioproj: The audio projection model.
Forward method:
noisy_latents (torch.Tensor): The noisy latents tensor.
timesteps (torch.Tensor): The timesteps tensor.
ref_image_latents (torch.Tensor): The reference image latents tensor.
face_emb (torch.Tensor): The face embeddings tensor.
audio_emb (torch.Tensor): The audio embeddings tensor.
mask (torch.Tensor): Hard face mask for face locator.
full_mask (torch.Tensor): Pose Mask.
face_mask (torch.Tensor): Face Mask
lip_mask (torch.Tensor): Lip Mask
uncond_img_fwd (bool): A flag indicating whether to perform reference image unconditional forward pass.
uncond_audio_fwd (bool): A flag indicating whether to perform audio unconditional forward pass.
Returns:
torch.Tensor: The output tensor of the neural network model.
"""
def __init__(
self,
reference_unet: UNet2DConditionModel,
denoising_unet: UNet3DConditionModel,
face_locator: FaceLocator,
reference_control_writer,
reference_control_reader,
imageproj,
audioproj,
):
super().__init__()
self.reference_unet = reference_unet
self.denoising_unet = denoising_unet
self.face_locator = face_locator
self.reference_control_writer = reference_control_writer
self.reference_control_reader = reference_control_reader
self.imageproj = imageproj
self.audioproj = audioproj
def forward(
self,
noisy_latents: torch.Tensor,
timesteps: torch.Tensor,
ref_image_latents: torch.Tensor,
face_emb: torch.Tensor,
audio_emb: torch.Tensor,
mask: torch.Tensor,
full_mask: torch.Tensor,
face_mask: torch.Tensor,
lip_mask: torch.Tensor,
uncond_img_fwd: bool = False,
uncond_audio_fwd: bool = False,
):
"""
simple docstring to prevent pylint error
"""
face_emb = self.imageproj(face_emb)
mask = mask.to(device="cuda")
mask_feature = self.face_locator(mask)
audio_emb = audio_emb.to(
device=self.audioproj.device, dtype=self.audioproj.dtype)
audio_emb = self.audioproj(audio_emb)
# condition forward
if not uncond_img_fwd:
ref_timesteps = torch.zeros_like(timesteps)
ref_timesteps = repeat(
ref_timesteps,
"b -> (repeat b)",
repeat=ref_image_latents.size(0) // ref_timesteps.size(0),
)
self.reference_unet(
ref_image_latents,
ref_timesteps,
encoder_hidden_states=face_emb,
return_dict=False,
)
self.reference_control_reader.update(self.reference_control_writer)
if uncond_audio_fwd:
audio_emb = torch.zeros_like(audio_emb).to(
device=audio_emb.device, dtype=audio_emb.dtype
)
model_pred = self.denoising_unet(
noisy_latents,
timesteps,
mask_cond_fea=mask_feature,
encoder_hidden_states=face_emb,
audio_embedding=audio_emb,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask
).sample
return model_pred
def get_attention_mask(mask: torch.Tensor, weight_dtype: torch.dtype) -> torch.Tensor:
"""
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
"""
if isinstance(mask, List):
_mask = []
for m in mask:
_mask.append(
rearrange(m, "b f 1 h w -> (b f) (h w)").to(weight_dtype))
return _mask
mask = rearrange(mask, "b f 1 h w -> (b f) (h w)").to(weight_dtype)
return mask
def get_noise_scheduler(cfg: argparse.Namespace) -> Tuple[DDIMScheduler, DDIMScheduler]:
"""
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
"""
sched_kwargs = OmegaConf.to_container(cfg.noise_scheduler_kwargs)
if cfg.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
train_noise_scheduler = DDIMScheduler(**sched_kwargs)
return train_noise_scheduler, val_noise_scheduler
def process_audio_emb(audio_emb: torch.Tensor) -> torch.Tensor:
"""
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
"""
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [
audio_emb[max(min(i + j, audio_emb.shape[0] - 1), 0)]for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
return audio_emb
def log_validation(
accelerator: Accelerator,
vae: AutoencoderKL,
net: Net,
scheduler: DDIMScheduler,
width: int,
height: int,
clip_length: int = 24,
generator: torch.Generator = None,
cfg: dict = None,
save_dir: str = None,
global_step: int = 0,
times: int = None,
face_analysis_model_path: str = "",
) -> None:
"""
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
"""
ori_net = accelerator.unwrap_model(net)
reference_unet = ori_net.reference_unet
denoising_unet = ori_net.denoising_unet
face_locator = ori_net.face_locator
imageproj = ori_net.imageproj
audioproj = ori_net.audioproj
generator = torch.manual_seed(42)
tmp_denoising_unet = copy.deepcopy(denoising_unet)
pipeline = FaceAnimatePipeline(
vae=vae,
reference_unet=reference_unet,
denoising_unet=tmp_denoising_unet,
face_locator=face_locator,
image_proj=imageproj,
scheduler=scheduler,
)
pipeline = pipeline.to("cuda")
image_processor = ImageProcessor((width, height), face_analysis_model_path)
audio_processor = AudioProcessor(
cfg.data.sample_rate,
cfg.data.fps,
cfg.wav2vec_config.model_path,
cfg.wav2vec_config.features == "last",
os.path.dirname(cfg.audio_separator.model_path),
os.path.basename(cfg.audio_separator.model_path),
os.path.join(save_dir, '.cache', "audio_preprocess")
)
for idx, ref_img_path in enumerate(cfg.ref_img_path):
audio_path = cfg.audio_path[idx]
source_image_pixels, \
source_image_face_region, \
source_image_face_emb, \
source_image_full_mask, \
source_image_face_mask, \
source_image_lip_mask = image_processor.preprocess(
ref_img_path, os.path.join(save_dir, '.cache'), cfg.face_expand_ratio)
audio_emb, audio_length = audio_processor.preprocess(
audio_path, clip_length)
audio_emb = process_audio_emb(audio_emb)
source_image_pixels = source_image_pixels.unsqueeze(0)
source_image_face_region = source_image_face_region.unsqueeze(0)
source_image_face_emb = source_image_face_emb.reshape(1, -1)
source_image_face_emb = torch.tensor(source_image_face_emb)
source_image_full_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_full_mask
]
source_image_face_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_face_mask
]
source_image_lip_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_lip_mask
]
times = audio_emb.shape[0] // clip_length
tensor_result = []
generator = torch.manual_seed(42)
for t in range(times):
print(f"[{t+1}/{times}]")
if len(tensor_result) == 0:
# The first iteration
motion_zeros = source_image_pixels.repeat(
cfg.data.n_motion_frames, 1, 1, 1)
motion_zeros = motion_zeros.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_zeros], dim=0) # concat the ref image and the first motion frames
else:
motion_frames = tensor_result[-1][0]
motion_frames = motion_frames.permute(1, 0, 2, 3)
motion_frames = motion_frames[0 - cfg.data.n_motion_frames:]
motion_frames = motion_frames * 2.0 - 1.0
motion_frames = motion_frames.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_frames], dim=0) # concat the ref image and the motion frames
pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
audio_tensor = audio_emb[
t * clip_length: min((t + 1) * clip_length, audio_emb.shape[0])
]
audio_tensor = audio_tensor.unsqueeze(0)
audio_tensor = audio_tensor.to(
device=audioproj.device, dtype=audioproj.dtype)
audio_tensor = audioproj(audio_tensor)
pipeline_output = pipeline(
ref_image=pixel_values_ref_img,
audio_tensor=audio_tensor,
face_emb=source_image_face_emb,
face_mask=source_image_face_region,
pixel_values_full_mask=source_image_full_mask,
pixel_values_face_mask=source_image_face_mask,
pixel_values_lip_mask=source_image_lip_mask,
width=cfg.data.train_width,
height=cfg.data.train_height,
video_length=clip_length,
num_inference_steps=cfg.inference_steps,
guidance_scale=cfg.cfg_scale,
generator=generator,
)
tensor_result.append(pipeline_output.videos)
tensor_result = torch.cat(tensor_result, dim=2)
tensor_result = tensor_result.squeeze(0)
tensor_result = tensor_result[:, :audio_length]
audio_name = os.path.basename(audio_path).split('.')[0]
ref_name = os.path.basename(ref_img_path).split('.')[0]
output_file = os.path.join(save_dir,f"{global_step}_{ref_name}_{audio_name}.mp4")
# save the result after all iteration
tensor_to_video(tensor_result, output_file, audio_path)
# clean up
del tmp_denoising_unet
del pipeline
del image_processor
del audio_processor
torch.cuda.empty_cache()
return tensor_result
def train_stage2_process(cfg: argparse.Namespace) -> None:
"""
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
"""
kwargs = DistributedDataParallelKwargs(find_unused_parameters=False)
accelerator = Accelerator(
gradient_accumulation_steps=cfg.solver.gradient_accumulation_steps,
mixed_precision=cfg.solver.mixed_precision,
log_with="mlflow",
project_dir="./mlruns",
kwargs_handlers=[kwargs],
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if cfg.seed is not None:
seed_everything(cfg.seed)
# create output dir for training
exp_name = cfg.exp_name
save_dir = f"{cfg.output_dir}/{exp_name}"
checkpoint_dir = os.path.join(save_dir, "checkpoints")
module_dir = os.path.join(save_dir, "modules")
validation_dir = os.path.join(save_dir, "validation")
if accelerator.is_main_process:
init_output_dir([save_dir, checkpoint_dir, module_dir, validation_dir])
accelerator.wait_for_everyone()
if cfg.weight_dtype == "fp16":
weight_dtype = torch.float16
elif cfg.weight_dtype == "bf16":
weight_dtype = torch.bfloat16
elif cfg.weight_dtype == "fp32":
weight_dtype = torch.float32
else:
raise ValueError(
f"Do not support weight dtype: {cfg.weight_dtype} during training"
)
# Create Models
vae = AutoencoderKL.from_pretrained(cfg.vae_model_path).to(
"cuda", dtype=weight_dtype
)
reference_unet = UNet2DConditionModel.from_pretrained(
cfg.base_model_path,
subfolder="unet",
).to(device="cuda", dtype=weight_dtype)
denoising_unet = UNet3DConditionModel.from_pretrained_2d(
cfg.base_model_path,
cfg.mm_path,
subfolder="unet",
unet_additional_kwargs=OmegaConf.to_container(
cfg.unet_additional_kwargs),
use_landmark=False
).to(device="cuda", dtype=weight_dtype)
imageproj = ImageProjModel(
cross_attention_dim=denoising_unet.config.cross_attention_dim,
clip_embeddings_dim=512,
clip_extra_context_tokens=4,
).to(device="cuda", dtype=weight_dtype)
face_locator = FaceLocator(
conditioning_embedding_channels=320,
).to(device="cuda", dtype=weight_dtype)
audioproj = AudioProjModel(
seq_len=5,
blocks=12,
channels=768,
intermediate_dim=512,
output_dim=768,
context_tokens=32,
).to(device="cuda", dtype=weight_dtype)
# load module weight from stage 1
stage1_ckpt_dir = cfg.stage1_ckpt_dir
denoising_unet.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "denoising_unet.pth"),
map_location="cpu",
),
strict=False,
)
reference_unet.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "reference_unet.pth"),
map_location="cpu",
),
strict=False,
)
face_locator.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "face_locator.pth"),
map_location="cpu",
),
strict=False,
)
imageproj.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "imageproj.pth"),
map_location="cpu",
),
strict=False,
)
# Freeze
vae.requires_grad_(False)
imageproj.requires_grad_(False)
reference_unet.requires_grad_(False)
denoising_unet.requires_grad_(False)
face_locator.requires_grad_(False)
audioproj.requires_grad_(True)
# Set motion module learnable
trainable_modules = cfg.trainable_para
for name, module in denoising_unet.named_modules():
if any(trainable_mod in name for trainable_mod in trainable_modules):
for params in module.parameters():
params.requires_grad_(True)
reference_control_writer = ReferenceAttentionControl(
reference_unet,
do_classifier_free_guidance=False,
mode="write",
fusion_blocks="full",
)
reference_control_reader = ReferenceAttentionControl(
denoising_unet,
do_classifier_free_guidance=False,
mode="read",
fusion_blocks="full",
)
net = Net(
reference_unet,
denoising_unet,
face_locator,
reference_control_writer,
reference_control_reader,
imageproj,
audioproj,
).to(dtype=weight_dtype)
# get noise scheduler
train_noise_scheduler, val_noise_scheduler = get_noise_scheduler(cfg)
if cfg.solver.enable_xformers_memory_efficient_attention:
if is_xformers_available():
reference_unet.enable_xformers_memory_efficient_attention()
denoising_unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError(
"xformers is not available. Make sure it is installed correctly"
)
if cfg.solver.gradient_checkpointing:
reference_unet.enable_gradient_checkpointing()
denoising_unet.enable_gradient_checkpointing()
if cfg.solver.scale_lr:
learning_rate = (
cfg.solver.learning_rate
* cfg.solver.gradient_accumulation_steps
* cfg.data.train_bs
* accelerator.num_processes
)
else:
learning_rate = cfg.solver.learning_rate
# Initialize the optimizer
if cfg.solver.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError as exc:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
) from exc
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
trainable_params = list(
filter(lambda p: p.requires_grad, net.parameters()))
logger.info(f"Total trainable params {len(trainable_params)}")
optimizer = optimizer_cls(
trainable_params,
lr=learning_rate,
betas=(cfg.solver.adam_beta1, cfg.solver.adam_beta2),
weight_decay=cfg.solver.adam_weight_decay,
eps=cfg.solver.adam_epsilon,
)
# Scheduler
lr_scheduler = get_scheduler(
cfg.solver.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=cfg.solver.lr_warmup_steps
* cfg.solver.gradient_accumulation_steps,
num_training_steps=cfg.solver.max_train_steps
* cfg.solver.gradient_accumulation_steps,
)
# get data loader
train_dataset = TalkingVideoDataset(
img_size=(cfg.data.train_width, cfg.data.train_height),
sample_rate=cfg.data.sample_rate,
n_sample_frames=cfg.data.n_sample_frames,
n_motion_frames=cfg.data.n_motion_frames,
audio_margin=cfg.data.audio_margin,
data_meta_paths=cfg.data.train_meta_paths,
wav2vec_cfg=cfg.wav2vec_config,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=cfg.data.train_bs, shuffle=True, num_workers=16
)
# Prepare everything with our `accelerator`.
(
net,
optimizer,
train_dataloader,
lr_scheduler,
) = accelerator.prepare(
net,
optimizer,
train_dataloader,
lr_scheduler,
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / cfg.solver.gradient_accumulation_steps
)
# Afterwards we recalculate our number of training epochs
num_train_epochs = math.ceil(
cfg.solver.max_train_steps / num_update_steps_per_epoch
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
run_time = datetime.now().strftime("%Y%m%d-%H%M")
accelerator.init_trackers(
exp_name,
init_kwargs={"mlflow": {"run_name": run_time}},
)
# dump config file
mlflow.log_dict(
OmegaConf.to_container(
cfg), "config.yaml"
)
logger.info(f"save config to {save_dir}")
OmegaConf.save(
cfg, os.path.join(save_dir, "config.yaml")
)
# Train!
total_batch_size = (
cfg.data.train_bs
* accelerator.num_processes
* cfg.solver.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {cfg.data.train_bs}")
logger.info(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
)
logger.info(
f" Gradient Accumulation steps = {cfg.solver.gradient_accumulation_steps}"
)
logger.info(f" Total optimization steps = {cfg.solver.max_train_steps}")
global_step = 0
first_epoch = 0
# # Potentially load in the weights and states from a previous save
if cfg.resume_from_checkpoint:
logger.info(f"Loading checkpoint from {checkpoint_dir}")
global_step = load_checkpoint(cfg, checkpoint_dir, accelerator)
first_epoch = global_step // num_update_steps_per_epoch
# Only show the progress bar once on each machine.
progress_bar = tqdm(
range(global_step, cfg.solver.max_train_steps),
disable=not accelerator.is_local_main_process,
)
progress_bar.set_description("Steps")
for _ in range(first_epoch, num_train_epochs):
train_loss = 0.0
t_data_start = time.time()
for _, batch in enumerate(train_dataloader):
t_data = time.time() - t_data_start
with accelerator.accumulate(net):
# Convert videos to latent space
pixel_values_vid = batch["pixel_values_vid"].to(weight_dtype)
pixel_values_face_mask = batch["pixel_values_face_mask"]
pixel_values_face_mask = get_attention_mask(
pixel_values_face_mask, weight_dtype
)
pixel_values_lip_mask = batch["pixel_values_lip_mask"]
pixel_values_lip_mask = get_attention_mask(
pixel_values_lip_mask, weight_dtype
)
pixel_values_full_mask = batch["pixel_values_full_mask"]
pixel_values_full_mask = get_attention_mask(
pixel_values_full_mask, weight_dtype
)
with torch.no_grad():
video_length = pixel_values_vid.shape[1]
pixel_values_vid = rearrange(
pixel_values_vid, "b f c h w -> (b f) c h w"
)
latents = vae.encode(pixel_values_vid).latent_dist.sample()
latents = rearrange(
latents, "(b f) c h w -> b c f h w", f=video_length
)
latents = latents * 0.18215
noise = torch.randn_like(latents)
if cfg.noise_offset > 0:
noise += cfg.noise_offset * torch.randn(
(latents.shape[0], latents.shape[1], 1, 1, 1),
device=latents.device,
)
bsz = latents.shape[0]
# Sample a random timestep for each video
timesteps = torch.randint(
0,
train_noise_scheduler.num_train_timesteps,
(bsz,),
device=latents.device,
)
timesteps = timesteps.long()
# mask for face locator
pixel_values_mask = (
batch["pixel_values_mask"].unsqueeze(
1).to(dtype=weight_dtype)
)
pixel_values_mask = repeat(
pixel_values_mask,
"b f c h w -> b (repeat f) c h w",
repeat=video_length,
)
pixel_values_mask = pixel_values_mask.transpose(
1, 2)
uncond_img_fwd = random.random() < cfg.uncond_img_ratio
uncond_audio_fwd = random.random() < cfg.uncond_audio_ratio
start_frame = random.random() < cfg.start_ratio
pixel_values_ref_img = batch["pixel_values_ref_img"].to(
dtype=weight_dtype
)
# initialize the motion frames as zero maps
if start_frame:
pixel_values_ref_img[:, 1:] = 0.0
ref_img_and_motion = rearrange(
pixel_values_ref_img, "b f c h w -> (b f) c h w"
)
with torch.no_grad():
ref_image_latents = vae.encode(
ref_img_and_motion
).latent_dist.sample()
ref_image_latents = ref_image_latents * 0.18215
image_prompt_embeds = batch["face_emb"].to(
dtype=imageproj.dtype, device=imageproj.device
)
# add noise
noisy_latents = train_noise_scheduler.add_noise(
latents, noise, timesteps
)
# Get the target for loss depending on the prediction type
if train_noise_scheduler.prediction_type == "epsilon":
target = noise
elif train_noise_scheduler.prediction_type == "v_prediction":
target = train_noise_scheduler.get_velocity(
latents, noise, timesteps
)
else:
raise ValueError(
f"Unknown prediction type {train_noise_scheduler.prediction_type}"
)
# ---- Forward!!! -----
model_pred = net(
noisy_latents=noisy_latents,
timesteps=timesteps,
ref_image_latents=ref_image_latents,
face_emb=image_prompt_embeds,
mask=pixel_values_mask,
full_mask=pixel_values_full_mask,
face_mask=pixel_values_face_mask,
lip_mask=pixel_values_lip_mask,
audio_emb=batch["audio_tensor"].to(
dtype=weight_dtype),
uncond_img_fwd=uncond_img_fwd,
uncond_audio_fwd=uncond_audio_fwd,
)
if cfg.snr_gamma == 0:
loss = F.mse_loss(
model_pred.float(),
target.float(),
reduction="mean",
)
else:
snr = compute_snr(train_noise_scheduler, timesteps)
if train_noise_scheduler.config.prediction_type == "v_prediction":
# Velocity objective requires that we add one to SNR values before we divide by them.
snr = snr + 1
mse_loss_weights = (
torch.stack(
[snr, cfg.snr_gamma * torch.ones_like(timesteps)], dim=1
).min(dim=1)[0]
/ snr
)
loss = F.mse_loss(
model_pred.float(),
target.float(),
reduction="mean",
)
loss = (
loss.mean(dim=list(range(1, len(loss.shape))))
* mse_loss_weights
).mean()
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(
loss.repeat(cfg.data.train_bs)).mean()
train_loss += avg_loss.item() / cfg.solver.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(
trainable_params,
cfg.solver.max_grad_norm,
)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
reference_control_reader.clear()
reference_control_writer.clear()
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if global_step % cfg.val.validation_steps == 0 or global_step==1:
if accelerator.is_main_process:
generator = torch.Generator(device=accelerator.device)
generator.manual_seed(cfg.seed)
log_validation(
accelerator=accelerator,
vae=vae,
net=net,
scheduler=val_noise_scheduler,
width=cfg.data.train_width,
height=cfg.data.train_height,
clip_length=cfg.data.n_sample_frames,
cfg=cfg,
save_dir=validation_dir,
global_step=global_step,
times=cfg.single_inference_times if cfg.single_inference_times is not None else None,
face_analysis_model_path=cfg.face_analysis_model_path
)
logs = {
"step_loss": loss.detach().item(),
"lr": lr_scheduler.get_last_lr()[0],
"td": f"{t_data:.2f}s",
}
t_data_start = time.time()
progress_bar.set_postfix(**logs)
if (
global_step % cfg.checkpointing_steps == 0
or global_step == cfg.solver.max_train_steps
):
# save model
save_path = os.path.join(
checkpoint_dir, f"checkpoint-{global_step}")
if accelerator.is_main_process:
delete_additional_ckpt(checkpoint_dir, 30)
accelerator.wait_for_everyone()
accelerator.save_state(save_path)
# save model weight
unwrap_net = accelerator.unwrap_model(net)
if accelerator.is_main_process:
save_checkpoint(
unwrap_net,
module_dir,
"net",
global_step,
total_limit=30,
)
if global_step >= cfg.solver.max_train_steps:
break
# Create the pipeline using the trained modules and save it.
accelerator.wait_for_everyone()
accelerator.end_training()
def load_config(config_path: str) -> dict:
"""
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
"""
if config_path.endswith(".yaml"):
return OmegaConf.load(config_path)
if config_path.endswith(".py"):
return import_filename(config_path).cfg
raise ValueError("Unsupported format for config file")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", type=str, default="./configs/train/stage2.yaml"
)
args = parser.parse_args()
try:
config = load_config(args.config)
train_stage2_process(config)
except Exception as e:
logging.error("Failed to execute the training process: %s", e)
| 9,334
|
4d47c87601970edb30a6004ea1f343f7e1b3fffcbef50adb7d2729a39fc672af
| 36.686176
| 126
| 0.590891
| 4.001178
| false
| false
| false
| false
|
microsoft/TRELLIS
|
dataset_toolkits/datasets/HSSD.py
| 3,654
| 0
|
MIT License
|
import os
import re
import argparse
import tarfile
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
import pandas as pd
import huggingface_hub
from utils import get_file_hash
def add_args(parser: argparse.ArgumentParser):
pass
def get_metadata(**kwargs):
metadata = pd.read_csv("hf://datasets/JeffreyXiang/TRELLIS-500K/HSSD.csv")
return metadata
def download(metadata, output_dir, **kwargs):
os.makedirs(os.path.join(output_dir, 'raw'), exist_ok=True)
# check login
try:
huggingface_hub.whoami()
except:
print("\033[93m")
print("Haven't logged in to the Hugging Face Hub.")
print("Visit https://huggingface.co/settings/tokens to get a token.")
print("\033[0m")
huggingface_hub.login()
try:
huggingface_hub.hf_hub_download(repo_id="hssd/hssd-models", filename="README.md", repo_type="dataset")
except:
print("\033[93m")
print("Error downloading HSSD dataset.")
print("Check if you have access to the HSSD dataset.")
print("Visit https://huggingface.co/datasets/hssd/hssd-models for more information")
print("\033[0m")
downloaded = {}
metadata = metadata.set_index("file_identifier")
with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor, \
tqdm(total=len(metadata), desc="Downloading") as pbar:
def worker(instance: str) -> str:
try:
huggingface_hub.hf_hub_download(repo_id="hssd/hssd-models", filename=instance, repo_type="dataset", local_dir=os.path.join(output_dir, 'raw'))
sha256 = get_file_hash(os.path.join(output_dir, 'raw', instance))
pbar.update()
return sha256
except Exception as e:
pbar.update()
print(f"Error extracting for {instance}: {e}")
return None
sha256s = executor.map(worker, metadata.index)
executor.shutdown(wait=True)
for k, sha256 in zip(metadata.index, sha256s):
if sha256 is not None:
if sha256 == metadata.loc[k, "sha256"]:
downloaded[sha256] = os.path.join('raw', k)
else:
print(f"Error downloading {k}: sha256s do not match")
return pd.DataFrame(downloaded.items(), columns=['sha256', 'local_path'])
def foreach_instance(metadata, output_dir, func, max_workers=None, desc='Processing objects') -> pd.DataFrame:
import os
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
# load metadata
metadata = metadata.to_dict('records')
# processing objects
records = []
max_workers = max_workers or os.cpu_count()
try:
with ThreadPoolExecutor(max_workers=max_workers) as executor, \
tqdm(total=len(metadata), desc=desc) as pbar:
def worker(metadatum):
try:
local_path = metadatum['local_path']
sha256 = metadatum['sha256']
file = os.path.join(output_dir, local_path)
record = func(file, sha256)
if record is not None:
records.append(record)
pbar.update()
except Exception as e:
print(f"Error processing object {sha256}: {e}")
pbar.update()
executor.map(worker, metadata)
executor.shutdown(wait=True)
except:
print("Error happened during processing.")
return pd.DataFrame.from_records(records)
| 941
|
2941f745453ea7f0e0e270ce24debab54a8e2721fc0a4b92b0a5a14fbb63bf49
| 34.475728
| 158
| 0.594964
| 3.883103
| false
| false
| false
| false
|
crestalnetwork/intentkit
|
models/chat.py
| 13,110
| 0
|
MIT License
|
from datetime import datetime, timezone
from enum import Enum
from typing import Annotated, List, NotRequired, Optional, TypedDict
from epyxid import XID
from pydantic import BaseModel, ConfigDict, Field
from sqlalchemy import (
Column,
DateTime,
Index,
Integer,
String,
desc,
func,
select,
update,
)
from sqlalchemy.dialects.postgresql import JSONB
from models.base import Base
from models.db import get_session
class ChatMessageAttachmentType(str, Enum):
"""Type of chat message attachment."""
LINK = "link"
IMAGE = "image"
FILE = "file"
class AuthorType(str, Enum):
"""Type of message author."""
AGENT = "agent"
TRIGGER = "trigger"
SKILL = "skill"
TELEGRAM = "telegram"
TWITTER = "twitter"
WEB = "web"
SYSTEM = "system"
class ChatMessageAttachment(TypedDict):
"""Chat message attachment model.
An attachment can be a link, image, or file that is associated with a chat message.
"""
type: Annotated[
ChatMessageAttachmentType,
Field(
...,
description="Type of the attachment (link, image, or file)",
examples=["link"],
),
]
url: Annotated[
str,
Field(
...,
description="URL of the attachment",
examples=["https://example.com/image.jpg"],
),
]
class ChatMessageSkillCall(TypedDict):
"""TypedDict for skill call details."""
name: str
parameters: dict
success: bool
response: NotRequired[
str
] # Optional response from the skill call, trimmed to 100 characters
error_message: NotRequired[str] # Optional error message from the skill call
class ChatMessageRequest(BaseModel):
"""Request model for chat messages.
This model represents the request body for creating a new chat message.
It contains the necessary fields to identify the chat context, user,
and message content, along with optional attachments.
"""
chat_id: Annotated[
str,
Field(
...,
description="Unique identifier for the chat thread",
examples=["chat-123"],
min_length=1,
),
]
user_id: Annotated[
str,
Field(
...,
description="Unique identifier of the user sending the message",
examples=["user-456"],
min_length=1,
),
]
message: Annotated[
str,
Field(
...,
description="Content of the message",
examples=["Hello, how can you help me today?"],
min_length=1,
max_length=65535,
),
]
attachments: Annotated[
Optional[List[ChatMessageAttachment]],
Field(
None,
description="Optional list of attachments (links, images, or files)",
examples=[[{"type": "link", "url": "https://example.com"}]],
),
]
model_config = ConfigDict(
use_enum_values=True,
json_schema_extra={
"example": {
"chat_id": "chat-123",
"user_id": "user-456",
"message": "Hello, how can you help me today?",
"attachments": [
{
"type": "link",
"url": "https://example.com",
}
],
}
},
)
class ChatMessageTable(Base):
"""Chat message database table model."""
__tablename__ = "chat_messages"
__table_args__ = (Index("ix_chat_messages_chat_id", "chat_id"),)
id = Column(
String,
primary_key=True,
)
agent_id = Column(
String,
nullable=False,
)
chat_id = Column(
String,
nullable=False,
)
user_id = Column(
String,
nullable=True,
)
author_id = Column(
String,
nullable=False,
)
author_type = Column(
String,
nullable=False,
)
thread_type = Column(
String,
nullable=True,
)
reply_to = Column(
String,
nullable=True,
)
message = Column(
String,
nullable=False,
)
attachments = Column(
JSONB,
nullable=True,
)
skill_calls = Column(
JSONB,
nullable=True,
)
input_tokens = Column(
Integer,
default=0,
)
output_tokens = Column(
Integer,
default=0,
)
time_cost = Column(
Integer,
default=0,
)
cold_start_cost = Column(
Integer,
default=0,
)
created_at = Column(
DateTime(timezone=True),
nullable=False,
server_default=func.now(),
)
class ChatMessageCreate(BaseModel):
"""Base model for creating chat messages with fields needed for creation."""
model_config = ConfigDict(
use_enum_values=True,
from_attributes=True,
)
id: Annotated[
str,
Field(
default_factory=lambda: str(XID()),
description="Unique identifier for the chat message",
),
]
agent_id: Annotated[
str, Field(description="ID of the agent this message belongs to")
]
chat_id: Annotated[str, Field(description="ID of the chat this message belongs to")]
user_id: Annotated[
Optional[str],
Field(description="ID of the user this message belongs to or reply to"),
]
author_id: Annotated[str, Field(description="ID of the message author")]
author_type: Annotated[AuthorType, Field(description="Type of the message author")]
thread_type: Annotated[
Optional[AuthorType],
Field(None, description="Author Type of the message thread start"),
]
reply_to: Annotated[
Optional[str],
Field(None, description="ID of the message this message is a reply to"),
]
message: Annotated[str, Field(description="Content of the message")]
attachments: Annotated[
Optional[List[ChatMessageAttachment]],
Field(None, description="List of attachments in the message"),
]
skill_calls: Annotated[
Optional[List[ChatMessageSkillCall]],
Field(None, description="Skill call details"),
]
input_tokens: Annotated[
int, Field(0, description="Number of tokens in the input message")
]
output_tokens: Annotated[
int, Field(0, description="Number of tokens in the output message")
]
time_cost: Annotated[
float, Field(0.0, description="Time cost for the message in seconds")
]
cold_start_cost: Annotated[
float,
Field(0.0, description="Cost for the cold start of the message in seconds"),
]
async def save(self) -> "ChatMessage":
"""Save the chat message to the database.
Returns:
ChatMessage: The saved chat message with all fields populated
"""
message_record = ChatMessageTable(**self.model_dump())
async with get_session() as db:
db.add(message_record)
await db.commit()
await db.refresh(message_record)
# Create and return a full ChatMessage instance
return ChatMessage.model_validate(message_record)
class ChatMessage(ChatMessageCreate):
"""Chat message model with all fields including server-generated ones."""
model_config = ConfigDict(
use_enum_values=True,
json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")},
from_attributes=True,
)
created_at: Annotated[
datetime, Field(description="Timestamp when this message was created")
]
def __str__(self):
resp = ""
if self.skill_calls:
for call in self.skill_calls:
resp += f"{call['name']} {call['parameters']}: {call['response'] if call['success'] else call['error_message']}\n"
resp += "\n"
resp += self.message
return resp
class ChatTable(Base):
"""Chat database table model."""
__tablename__ = "chats"
__table_args__ = (Index("ix_chats_agent_user", "agent_id", "user_id"),)
id = Column(
String,
primary_key=True,
)
agent_id = Column(
String,
nullable=False,
)
user_id = Column(
String,
nullable=False,
)
summary = Column(
String,
default="",
)
rounds = Column(
Integer,
default=0,
)
created_at = Column(
DateTime(timezone=True),
nullable=False,
server_default=func.now(),
)
updated_at = Column(
DateTime(timezone=True),
nullable=False,
server_default=func.now(),
onupdate=lambda: datetime.now(timezone.utc),
)
class ChatCreate(BaseModel):
"""Base model for creating chats with fields needed for creation."""
model_config = ConfigDict(from_attributes=True)
id: Annotated[
str,
Field(
default_factory=lambda: str(XID()),
description="Unique identifier for the chat",
),
]
agent_id: Annotated[str, Field(description="ID of the agent this chat belongs to")]
user_id: Annotated[str, Field(description="User ID of the chat")]
summary: Annotated[str, Field("", description="Summary of the chat")]
rounds: Annotated[int, Field(0, description="Number of rounds in the chat")]
async def save(self) -> "Chat":
"""Create a new chat in the database.
Returns:
Chat: The saved chat with all fields populated
"""
# Set timestamps
chat_record = ChatTable(**self.model_dump(exclude_unset=True))
async with get_session() as db:
db.add(chat_record)
await db.commit()
await db.refresh(chat_record)
# Create and return a full Chat instance
return Chat.model_validate(chat_record)
class Chat(ChatCreate):
"""Chat model with all fields including server-generated ones."""
model_config = ConfigDict(
from_attributes=True,
json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")},
)
created_at: Annotated[
datetime, Field(description="Timestamp when this chat was created")
]
updated_at: Annotated[
datetime, Field(description="Timestamp when this chat was updated")
]
@classmethod
async def get(cls, id: str) -> Optional["Chat"]:
"""Get a chat by its ID.
Args:
id: ID of the chat to get
Returns:
Chat if found, None otherwise
"""
async with get_session() as db:
chat_record = await db.get(ChatTable, id)
if chat_record:
return cls.model_validate(chat_record)
return None
async def delete(self):
"""Delete the chat from the database."""
async with get_session() as db:
chat_record = await db.get(ChatTable, self.id)
if chat_record:
await db.delete(chat_record)
await db.commit()
async def add_round(self):
"""Increment the number of rounds in the chat on the database server.
Uses a direct SQL UPDATE statement to increment the rounds counter
on the server side, avoiding potential race conditions.
"""
async with get_session() as db:
stmt = (
update(ChatTable)
.where(ChatTable.id == self.id)
.values(rounds=ChatTable.rounds + 1)
)
await db.execute(stmt)
await db.commit()
# Update local object
self.rounds += 1
async def update_summary(self, summary: str) -> "Chat":
"""Update the chat summary in the database.
Uses a direct SQL UPDATE statement to set the summary field.
Args:
summary: New summary text for the chat
Returns:
Chat: The updated chat instance
"""
async with get_session() as db:
stmt = (
update(ChatTable).where(ChatTable.id == self.id).values(summary=summary)
)
await db.execute(stmt)
await db.commit()
# Update local object
self.summary = summary
return self
@classmethod
async def get_by_agent_user(cls, agent_id: str, user_id: str) -> List["Chat"]:
"""Get all chats for a specific agent and user.
Args:
agent_id: ID of the agent
user_id: ID of the user
Returns:
List of chats
"""
async with get_session() as db:
results = await db.scalars(
select(ChatTable)
.order_by(desc(ChatTable.updated_at))
.limit(10)
.where(ChatTable.agent_id == agent_id, ChatTable.user_id == user_id)
)
return [cls.model_validate(chat) for chat in results]
| 3,097
|
cb04f18b0eb11daf597ef026763242849b49b234c6b258405f98bea0f105c1dd
| 26.086777
| 130
| 0.567963
| 4.233129
| false
| false
| false
| false
|
HKUDS/LightRAG
|
examples/test_faiss.py
| 3,131
| 0
|
MIT License
|
import os
import logging
import asyncio
import numpy as np
from dotenv import load_dotenv
from sentence_transformers import SentenceTransformer
from openai import AzureOpenAI
from lightrag import LightRAG, QueryParam
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
# Configure Logging
logging.basicConfig(level=logging.INFO)
# Load environment variables from .env file
load_dotenv()
AZURE_OPENAI_API_VERSION = os.getenv("AZURE_OPENAI_API_VERSION")
AZURE_OPENAI_DEPLOYMENT = os.getenv("AZURE_OPENAI_DEPLOYMENT")
AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str:
# Create a client for AzureOpenAI
client = AzureOpenAI(
api_key=AZURE_OPENAI_API_KEY,
api_version=AZURE_OPENAI_API_VERSION,
azure_endpoint=AZURE_OPENAI_ENDPOINT,
)
# Build the messages list for the conversation
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
if history_messages:
messages.extend(history_messages)
messages.append({"role": "user", "content": prompt})
# Call the LLM
chat_completion = client.chat.completions.create(
model=AZURE_OPENAI_DEPLOYMENT,
messages=messages,
temperature=kwargs.get("temperature", 0),
top_p=kwargs.get("top_p", 1),
n=kwargs.get("n", 1),
)
return chat_completion.choices[0].message.content
async def embedding_func(texts: list[str]) -> np.ndarray:
model = SentenceTransformer("all-MiniLM-L6-v2")
embeddings = model.encode(texts, convert_to_numpy=True)
return embeddings
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=8192,
func=embedding_func,
),
vector_storage="FaissVectorDBStorage",
vector_db_storage_cls_kwargs={
"cosine_better_than_threshold": 0.2 # Your desired threshold
},
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Insert the custom chunks into LightRAG
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")
rag.insert([book1.read(), book2.read()])
query_text = "What are the main themes?"
print("Result (Naive):")
print(rag.query(query_text, param=QueryParam(mode="naive")))
print("\nResult (Local):")
print(rag.query(query_text, param=QueryParam(mode="local")))
print("\nResult (Global):")
print(rag.query(query_text, param=QueryParam(mode="global")))
print("\nResult (Hybrid):")
print(rag.query(query_text, param=QueryParam(mode="hybrid")))
if __name__ == "__main__":
main()
| 891
|
f9aa4ec3013a075aa647c538d48f23174dfc25344700f11a0d1e1fa15d13a51d
| 27.990741
| 87
| 0.672629
| 3.514029
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/providers/inline/agents/meta_reference/agent_instance.py
| 39,347
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import copy
import json
import re
import secrets
import string
import uuid
from datetime import datetime, timezone
from typing import AsyncGenerator, List, Optional, Union
import httpx
from llama_stack.apis.agents import (
AgentConfig,
AgentToolGroup,
AgentToolGroupWithArgs,
AgentTurnCreateRequest,
AgentTurnResponseEvent,
AgentTurnResponseEventType,
AgentTurnResponseStepCompletePayload,
AgentTurnResponseStepProgressPayload,
AgentTurnResponseStepStartPayload,
AgentTurnResponseStreamChunk,
AgentTurnResponseTurnAwaitingInputPayload,
AgentTurnResponseTurnCompletePayload,
AgentTurnResumeRequest,
Attachment,
Document,
InferenceStep,
ShieldCallStep,
StepType,
ToolExecutionStep,
Turn,
)
from llama_stack.apis.common.content_types import (
URL,
TextContentItem,
ToolCallDelta,
ToolCallParseStatus,
)
from llama_stack.apis.inference import (
ChatCompletionResponseEventType,
CompletionMessage,
Inference,
Message,
SamplingParams,
StopReason,
SystemMessage,
ToolDefinition,
ToolParamDefinition,
ToolResponse,
ToolResponseMessage,
UserMessage,
)
from llama_stack.apis.safety import Safety
from llama_stack.apis.tools import ToolGroups, ToolInvocationResult, ToolRuntime
from llama_stack.apis.vector_io import VectorIO
from llama_stack.log import get_logger
from llama_stack.models.llama.datatypes import (
BuiltinTool,
ToolCall,
)
from llama_stack.providers.utils.kvstore import KVStore
from llama_stack.providers.utils.telemetry import tracing
from .persistence import AgentPersistence
from .safety import SafetyException, ShieldRunnerMixin
def make_random_string(length: int = 8):
return "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(length))
TOOLS_ATTACHMENT_KEY_REGEX = re.compile(r"__tools_attachment__=(\{.*?\})")
MEMORY_QUERY_TOOL = "knowledge_search"
WEB_SEARCH_TOOL = "web_search"
RAG_TOOL_GROUP = "builtin::rag"
logger = get_logger(name=__name__, category="agents")
class ChatAgent(ShieldRunnerMixin):
def __init__(
self,
agent_id: str,
agent_config: AgentConfig,
tempdir: str,
inference_api: Inference,
safety_api: Safety,
tool_runtime_api: ToolRuntime,
tool_groups_api: ToolGroups,
vector_io_api: VectorIO,
persistence_store: KVStore,
):
self.agent_id = agent_id
self.agent_config = agent_config
self.tempdir = tempdir
self.inference_api = inference_api
self.safety_api = safety_api
self.vector_io_api = vector_io_api
self.storage = AgentPersistence(agent_id, persistence_store)
self.tool_runtime_api = tool_runtime_api
self.tool_groups_api = tool_groups_api
ShieldRunnerMixin.__init__(
self,
safety_api,
input_shields=agent_config.input_shields,
output_shields=agent_config.output_shields,
)
def turn_to_messages(self, turn: Turn) -> List[Message]:
messages = []
# NOTE: if a toolcall response is in a step, we do not add it when processing the input messages
tool_call_ids = set()
for step in turn.steps:
if step.step_type == StepType.tool_execution.value:
for response in step.tool_responses:
tool_call_ids.add(response.call_id)
for m in turn.input_messages:
msg = m.model_copy()
# We do not want to keep adding RAG context to the input messages
# May be this should be a parameter of the agentic instance
# that can define its behavior in a custom way
if isinstance(msg, UserMessage):
msg.context = None
if isinstance(msg, ToolResponseMessage):
if msg.call_id in tool_call_ids:
# NOTE: do not add ToolResponseMessage here, we'll add them in tool_execution steps
continue
messages.append(msg)
for step in turn.steps:
if step.step_type == StepType.inference.value:
messages.append(step.model_response)
elif step.step_type == StepType.tool_execution.value:
for response in step.tool_responses:
messages.append(
ToolResponseMessage(
call_id=response.call_id,
content=response.content,
)
)
elif step.step_type == StepType.shield_call.value:
if step.violation:
# CompletionMessage itself in the ShieldResponse
messages.append(
CompletionMessage(
content=step.violation.user_message,
stop_reason=StopReason.end_of_turn,
)
)
return messages
async def create_session(self, name: str) -> str:
return await self.storage.create_session(name)
async def get_messages_from_turns(self, turns: List[Turn]) -> List[Message]:
messages = []
if self.agent_config.instructions != "":
messages.append(SystemMessage(content=self.agent_config.instructions))
for turn in turns:
messages.extend(self.turn_to_messages(turn))
return messages
async def create_and_execute_turn(self, request: AgentTurnCreateRequest) -> AsyncGenerator:
span = tracing.get_current_span()
if span:
span.set_attribute("session_id", request.session_id)
span.set_attribute("agent_id", self.agent_id)
span.set_attribute("request", request.model_dump_json())
turn_id = str(uuid.uuid4())
span.set_attribute("turn_id", turn_id)
await self._initialize_tools(request.toolgroups)
async for chunk in self._run_turn(request, turn_id):
yield chunk
async def resume_turn(self, request: AgentTurnResumeRequest) -> AsyncGenerator:
span = tracing.get_current_span()
if span:
span.set_attribute("agent_id", self.agent_id)
span.set_attribute("session_id", request.session_id)
span.set_attribute("request", request.model_dump_json())
span.set_attribute("turn_id", request.turn_id)
await self._initialize_tools()
async for chunk in self._run_turn(request):
yield chunk
async def _run_turn(
self,
request: Union[AgentTurnCreateRequest, AgentTurnResumeRequest],
turn_id: Optional[str] = None,
) -> AsyncGenerator:
assert request.stream is True, "Non-streaming not supported"
is_resume = isinstance(request, AgentTurnResumeRequest)
session_info = await self.storage.get_session_info(request.session_id)
if session_info is None:
raise ValueError(f"Session {request.session_id} not found")
turns = await self.storage.get_session_turns(request.session_id)
if is_resume and len(turns) == 0:
raise ValueError("No turns found for session")
steps = []
messages = await self.get_messages_from_turns(turns)
if is_resume:
tool_response_messages = [
ToolResponseMessage(call_id=x.call_id, content=x.content) for x in request.tool_responses
]
messages.extend(tool_response_messages)
last_turn = turns[-1]
last_turn_messages = self.turn_to_messages(last_turn)
last_turn_messages = [
x for x in last_turn_messages if isinstance(x, UserMessage) or isinstance(x, ToolResponseMessage)
]
last_turn_messages.extend(tool_response_messages)
# get steps from the turn
steps = last_turn.steps
# mark tool execution step as complete
# if there's no tool execution in progress step (due to storage, or tool call parsing on client),
# we'll create a new tool execution step with current time
in_progress_tool_call_step = await self.storage.get_in_progress_tool_call_step(
request.session_id, request.turn_id
)
now = datetime.now(timezone.utc).isoformat()
tool_execution_step = ToolExecutionStep(
step_id=(in_progress_tool_call_step.step_id if in_progress_tool_call_step else str(uuid.uuid4())),
turn_id=request.turn_id,
tool_calls=(in_progress_tool_call_step.tool_calls if in_progress_tool_call_step else []),
tool_responses=request.tool_responses,
completed_at=now,
started_at=(in_progress_tool_call_step.started_at if in_progress_tool_call_step else now),
)
steps.append(tool_execution_step)
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepCompletePayload(
step_type=StepType.tool_execution.value,
step_id=tool_execution_step.step_id,
step_details=tool_execution_step,
)
)
)
input_messages = last_turn.input_messages
turn_id = request.turn_id
start_time = last_turn.started_at
else:
messages.extend(request.messages)
start_time = datetime.now(timezone.utc).isoformat()
input_messages = request.messages
output_message = None
async for chunk in self.run(
session_id=request.session_id,
turn_id=turn_id,
input_messages=messages,
sampling_params=self.agent_config.sampling_params,
stream=request.stream,
documents=request.documents if not is_resume else None,
):
if isinstance(chunk, CompletionMessage):
output_message = chunk
continue
assert isinstance(chunk, AgentTurnResponseStreamChunk), f"Unexpected type {type(chunk)}"
event = chunk.event
if event.payload.event_type == AgentTurnResponseEventType.step_complete.value:
steps.append(event.payload.step_details)
yield chunk
assert output_message is not None
turn = Turn(
turn_id=turn_id,
session_id=request.session_id,
input_messages=input_messages,
output_message=output_message,
started_at=start_time,
completed_at=datetime.now(timezone.utc).isoformat(),
steps=steps,
)
await self.storage.add_turn_to_session(request.session_id, turn)
if output_message.tool_calls:
chunk = AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseTurnAwaitingInputPayload(
turn=turn,
)
)
)
else:
chunk = AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseTurnCompletePayload(
turn=turn,
)
)
)
yield chunk
async def run(
self,
session_id: str,
turn_id: str,
input_messages: List[Message],
sampling_params: SamplingParams,
stream: bool = False,
documents: Optional[List[Document]] = None,
) -> AsyncGenerator:
# Doing async generators makes downstream code much simpler and everything amenable to
# streaming. However, it also makes things complicated here because AsyncGenerators cannot
# return a "final value" for the `yield from` statement. we simulate that by yielding a
# final boolean (to see whether an exception happened) and then explicitly testing for it.
if len(self.input_shields) > 0:
async for res in self.run_multiple_shields_wrapper(
turn_id, input_messages, self.input_shields, "user-input"
):
if isinstance(res, bool):
return
else:
yield res
async for res in self._run(
session_id,
turn_id,
input_messages,
sampling_params,
stream,
documents,
):
if isinstance(res, bool):
return
elif isinstance(res, CompletionMessage):
final_response = res
break
else:
yield res
assert final_response is not None
# for output shields run on the full input and output combination
messages = input_messages + [final_response]
if len(self.output_shields) > 0:
async for res in self.run_multiple_shields_wrapper(
turn_id, messages, self.output_shields, "assistant-output"
):
if isinstance(res, bool):
return
else:
yield res
yield final_response
async def run_multiple_shields_wrapper(
self,
turn_id: str,
messages: List[Message],
shields: List[str],
touchpoint: str,
) -> AsyncGenerator:
async with tracing.span("run_shields") as span:
span.set_attribute("input", [m.model_dump_json() for m in messages])
if len(shields) == 0:
span.set_attribute("output", "no shields")
return
step_id = str(uuid.uuid4())
shield_call_start_time = datetime.now(timezone.utc).isoformat()
try:
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepStartPayload(
step_type=StepType.shield_call.value,
step_id=step_id,
metadata=dict(touchpoint=touchpoint),
)
)
)
await self.run_multiple_shields(messages, shields)
except SafetyException as e:
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepCompletePayload(
step_type=StepType.shield_call.value,
step_id=step_id,
step_details=ShieldCallStep(
step_id=step_id,
turn_id=turn_id,
violation=e.violation,
started_at=shield_call_start_time,
completed_at=datetime.now(timezone.utc).isoformat(),
),
)
)
)
span.set_attribute("output", e.violation.model_dump_json())
yield CompletionMessage(
content=str(e),
stop_reason=StopReason.end_of_turn,
)
yield False
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepCompletePayload(
step_type=StepType.shield_call.value,
step_id=step_id,
step_details=ShieldCallStep(
step_id=step_id,
turn_id=turn_id,
violation=None,
started_at=shield_call_start_time,
completed_at=datetime.now(timezone.utc).isoformat(),
),
)
)
)
span.set_attribute("output", "no violations")
async def _run(
self,
session_id: str,
turn_id: str,
input_messages: List[Message],
sampling_params: SamplingParams,
stream: bool = False,
documents: Optional[List[Document]] = None,
) -> AsyncGenerator:
# if document is passed in a turn, we parse the raw text of the document
# and sent it as a user message
if documents:
contexts = []
for document in documents:
raw_document_text = await get_raw_document_text(document)
contexts.append(raw_document_text)
attached_context = "\n".join(contexts)
if isinstance(input_messages[-1].content, str):
input_messages[-1].content += attached_context
elif isinstance(input_messages[-1].content, list):
input_messages[-1].content.append(TextContentItem(text=attached_context))
else:
input_messages[-1].content = [
input_messages[-1].content,
TextContentItem(text=attached_context),
]
session_info = await self.storage.get_session_info(session_id)
# if the session has a memory bank id, let the memory tool use it
if session_info and session_info.vector_db_id:
for tool_name in self.tool_name_to_args.keys():
if tool_name == MEMORY_QUERY_TOOL:
if "vector_db_ids" not in self.tool_name_to_args[tool_name]:
self.tool_name_to_args[tool_name]["vector_db_ids"] = [session_info.vector_db_id]
else:
self.tool_name_to_args[tool_name]["vector_db_ids"].append(session_info.vector_db_id)
output_attachments = []
n_iter = await self.storage.get_num_infer_iters_in_turn(session_id, turn_id) or 0
# Build a map of custom tools to their definitions for faster lookup
client_tools = {}
for tool in self.agent_config.client_tools:
client_tools[tool.name] = tool
while True:
step_id = str(uuid.uuid4())
inference_start_time = datetime.now(timezone.utc).isoformat()
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepStartPayload(
step_type=StepType.inference.value,
step_id=step_id,
)
)
)
tool_calls = []
content = ""
stop_reason = None
async with tracing.span("inference") as span:
async for chunk in await self.inference_api.chat_completion(
self.agent_config.model,
input_messages,
tools=self.tool_defs,
tool_prompt_format=self.agent_config.tool_config.tool_prompt_format,
response_format=self.agent_config.response_format,
stream=True,
sampling_params=sampling_params,
tool_config=self.agent_config.tool_config,
):
event = chunk.event
if event.event_type == ChatCompletionResponseEventType.start:
continue
elif event.event_type == ChatCompletionResponseEventType.complete:
stop_reason = StopReason.end_of_turn
continue
delta = event.delta
if delta.type == "tool_call":
if delta.parse_status == ToolCallParseStatus.succeeded:
tool_calls.append(delta.tool_call)
elif delta.parse_status == ToolCallParseStatus.failed:
# If we cannot parse the tools, set the content to the unparsed raw text
content = delta.tool_call
if stream:
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepProgressPayload(
step_type=StepType.inference.value,
step_id=step_id,
delta=delta,
)
)
)
elif delta.type == "text":
content += delta.text
if stream and event.stop_reason is None:
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepProgressPayload(
step_type=StepType.inference.value,
step_id=step_id,
delta=delta,
)
)
)
else:
raise ValueError(f"Unexpected delta type {type(delta)}")
if event.stop_reason is not None:
stop_reason = event.stop_reason
span.set_attribute("stop_reason", stop_reason)
span.set_attribute(
"input",
json.dumps([json.loads(m.model_dump_json()) for m in input_messages]),
)
output_attr = json.dumps(
{
"content": content,
"tool_calls": [json.loads(t.model_dump_json()) for t in tool_calls],
}
)
span.set_attribute("output", output_attr)
n_iter += 1
await self.storage.set_num_infer_iters_in_turn(session_id, turn_id, n_iter)
stop_reason = stop_reason or StopReason.out_of_tokens
# If tool calls are parsed successfully,
# if content is not made null the tool call str will also be in the content
# and tokens will have tool call syntax included twice
if tool_calls:
content = ""
message = CompletionMessage(
content=content,
stop_reason=stop_reason,
tool_calls=tool_calls,
)
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepCompletePayload(
step_type=StepType.inference.value,
step_id=step_id,
step_details=InferenceStep(
# somewhere deep, we are re-assigning message or closing over some
# variable which causes message to mutate later on. fix with a
# `deepcopy` for now, but this is symptomatic of a deeper issue.
step_id=step_id,
turn_id=turn_id,
model_response=copy.deepcopy(message),
started_at=inference_start_time,
completed_at=datetime.now(timezone.utc).isoformat(),
),
)
)
)
if n_iter >= self.agent_config.max_infer_iters:
logger.info(f"done with MAX iterations ({n_iter}), exiting.")
# NOTE: mark end_of_turn to indicate to client that we are done with the turn
# Do not continue the tool call loop after this point
message.stop_reason = StopReason.end_of_turn
yield message
break
if stop_reason == StopReason.out_of_tokens:
logger.info("out of token budget, exiting.")
yield message
break
if len(message.tool_calls) == 0:
if stop_reason == StopReason.end_of_turn:
# TODO: UPDATE RETURN TYPE TO SEND A TUPLE OF (MESSAGE, ATTACHMENTS)
if len(output_attachments) > 0:
if isinstance(message.content, list):
message.content += output_attachments
else:
message.content = [message.content] + output_attachments
yield message
else:
logger.debug(f"completion message with EOM (iter: {n_iter}): {str(message)}")
input_messages = input_messages + [message]
else:
input_messages = input_messages + [message]
# Process tool calls in the message
client_tool_calls = []
non_client_tool_calls = []
# Separate client and non-client tool calls
for tool_call in message.tool_calls:
if tool_call.tool_name in client_tools:
client_tool_calls.append(tool_call)
else:
non_client_tool_calls.append(tool_call)
# Process non-client tool calls first
for tool_call in non_client_tool_calls:
step_id = str(uuid.uuid4())
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepStartPayload(
step_type=StepType.tool_execution.value,
step_id=step_id,
)
)
)
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepProgressPayload(
step_type=StepType.tool_execution.value,
step_id=step_id,
delta=ToolCallDelta(
parse_status=ToolCallParseStatus.in_progress,
tool_call=tool_call,
),
)
)
)
# Execute the tool call
async with tracing.span(
"tool_execution",
{
"tool_name": tool_call.tool_name,
"input": message.model_dump_json(),
},
) as span:
tool_execution_start_time = datetime.now(timezone.utc).isoformat()
tool_result = await self.execute_tool_call_maybe(
session_id,
tool_call,
)
if tool_result.content is None:
raise ValueError(
f"Tool call result (id: {tool_call.call_id}, name: {tool_call.tool_name}) does not have any content"
)
result_message = ToolResponseMessage(
call_id=tool_call.call_id,
content=tool_result.content,
)
span.set_attribute("output", result_message.model_dump_json())
# Store tool execution step
tool_execution_step = ToolExecutionStep(
step_id=step_id,
turn_id=turn_id,
tool_calls=[tool_call],
tool_responses=[
ToolResponse(
call_id=tool_call.call_id,
tool_name=tool_call.tool_name,
content=tool_result.content,
metadata=tool_result.metadata,
)
],
started_at=tool_execution_start_time,
completed_at=datetime.now(timezone.utc).isoformat(),
)
# Yield the step completion event
yield AgentTurnResponseStreamChunk(
event=AgentTurnResponseEvent(
payload=AgentTurnResponseStepCompletePayload(
step_type=StepType.tool_execution.value,
step_id=step_id,
step_details=tool_execution_step,
)
)
)
# Add the result message to input_messages for the next iteration
input_messages.append(result_message)
# TODO: add tool-input touchpoint and a "start" event for this step also
# but that needs a lot more refactoring of Tool code potentially
if (type(result_message.content) is str) and (
out_attachment := _interpret_content_as_attachment(result_message.content)
):
# NOTE: when we push this message back to the model, the model may ignore the
# attached file path etc. since the model is trained to only provide a user message
# with the summary. We keep all generated attachments and then attach them to final message
output_attachments.append(out_attachment)
# If there are client tool calls, yield a message with only those tool calls
if client_tool_calls:
await self.storage.set_in_progress_tool_call_step(
session_id,
turn_id,
ToolExecutionStep(
step_id=step_id,
turn_id=turn_id,
tool_calls=client_tool_calls,
tool_responses=[],
started_at=datetime.now(timezone.utc).isoformat(),
),
)
# Create a copy of the message with only client tool calls
client_message = message.model_copy(deep=True)
client_message.tool_calls = client_tool_calls
# NOTE: mark end_of_message to indicate to client that it may
# call the tool and continue the conversation with the tool's response.
client_message.stop_reason = StopReason.end_of_message
# Yield the message with client tool calls
yield client_message
return
async def _initialize_tools(
self,
toolgroups_for_turn: Optional[List[AgentToolGroup]] = None,
) -> None:
toolgroup_to_args = {}
for toolgroup in (self.agent_config.toolgroups or []) + (toolgroups_for_turn or []):
if isinstance(toolgroup, AgentToolGroupWithArgs):
tool_group_name, _ = self._parse_toolgroup_name(toolgroup.name)
toolgroup_to_args[tool_group_name] = toolgroup.args
# Determine which tools to include
tool_groups_to_include = toolgroups_for_turn or self.agent_config.toolgroups or []
agent_config_toolgroups = []
for toolgroup in tool_groups_to_include:
name = toolgroup.name if isinstance(toolgroup, AgentToolGroupWithArgs) else toolgroup
if name not in agent_config_toolgroups:
agent_config_toolgroups.append(name)
toolgroup_to_args = toolgroup_to_args or {}
tool_name_to_def = {}
tool_name_to_args = {}
for tool_def in self.agent_config.client_tools:
if tool_name_to_def.get(tool_def.name, None):
raise ValueError(f"Tool {tool_def.name} already exists")
tool_name_to_def[tool_def.name] = ToolDefinition(
tool_name=tool_def.name,
description=tool_def.description,
parameters={
param.name: ToolParamDefinition(
param_type=param.parameter_type,
description=param.description,
required=param.required,
default=param.default,
)
for param in tool_def.parameters
},
)
for toolgroup_name_with_maybe_tool_name in agent_config_toolgroups:
toolgroup_name, input_tool_name = self._parse_toolgroup_name(toolgroup_name_with_maybe_tool_name)
tools = await self.tool_groups_api.list_tools(toolgroup_id=toolgroup_name)
if not tools.data:
available_tool_groups = ", ".join(
[t.identifier for t in (await self.tool_groups_api.list_tool_groups()).data]
)
raise ValueError(f"Toolgroup {toolgroup_name} not found, available toolgroups: {available_tool_groups}")
if input_tool_name is not None and not any(tool.identifier == input_tool_name for tool in tools.data):
raise ValueError(
f"Tool {input_tool_name} not found in toolgroup {toolgroup_name}. Available tools: {', '.join([tool.identifier for tool in tools.data])}"
)
for tool_def in tools.data:
if toolgroup_name.startswith("builtin") and toolgroup_name != RAG_TOOL_GROUP:
identifier: str | BuiltinTool | None = tool_def.identifier
if identifier == "web_search":
identifier = BuiltinTool.brave_search
else:
identifier = BuiltinTool(identifier)
else:
# add if tool_name is unspecified or the tool_def identifier is the same as the tool_name
if input_tool_name in (None, tool_def.identifier):
identifier = tool_def.identifier
else:
identifier = None
if tool_name_to_def.get(identifier, None):
raise ValueError(f"Tool {identifier} already exists")
if identifier:
tool_name_to_def[tool_def.identifier] = ToolDefinition(
tool_name=identifier,
description=tool_def.description,
parameters={
param.name: ToolParamDefinition(
param_type=param.parameter_type,
description=param.description,
required=param.required,
default=param.default,
)
for param in tool_def.parameters
},
)
tool_name_to_args[tool_def.identifier] = toolgroup_to_args.get(toolgroup_name, {})
self.tool_defs, self.tool_name_to_args = (
list(tool_name_to_def.values()),
tool_name_to_args,
)
def _parse_toolgroup_name(self, toolgroup_name_with_maybe_tool_name: str) -> tuple[str, Optional[str]]:
"""Parse a toolgroup name into its components.
Args:
toolgroup_name: The toolgroup name to parse (e.g. "builtin::rag/knowledge_search")
Returns:
A tuple of (tool_type, tool_group, tool_name)
"""
split_names = toolgroup_name_with_maybe_tool_name.split("/")
if len(split_names) == 2:
# e.g. "builtin::rag"
tool_group, tool_name = split_names
else:
tool_group, tool_name = split_names[0], None
return tool_group, tool_name
async def execute_tool_call_maybe(
self,
session_id: str,
tool_call: ToolCall,
) -> ToolInvocationResult:
tool_name = tool_call.tool_name
registered_tool_names = [tool_def.tool_name for tool_def in self.tool_defs]
if tool_name not in registered_tool_names:
raise ValueError(
f"Tool {tool_name} not found in provided tools, registered tools: {', '.join([str(x) for x in registered_tool_names])}"
)
if isinstance(tool_name, BuiltinTool):
if tool_name == BuiltinTool.brave_search:
tool_name_str = WEB_SEARCH_TOOL
else:
tool_name_str = tool_name.value
else:
tool_name_str = tool_name
logger.info(f"executing tool call: {tool_name_str} with args: {tool_call.arguments}")
result = await self.tool_runtime_api.invoke_tool(
tool_name=tool_name_str,
kwargs={
"session_id": session_id,
# get the arguments generated by the model and augment with toolgroup arg overrides for the agent
**tool_call.arguments,
**self.tool_name_to_args.get(tool_name_str, {}),
},
)
logger.debug(f"tool call {tool_name_str} completed with result: {result}")
return result
async def load_data_from_url(url: str) -> str:
if url.startswith("http"):
async with httpx.AsyncClient() as client:
r = await client.get(url)
resp = r.text
return resp
raise ValueError(f"Unexpected URL: {type(url)}")
async def get_raw_document_text(document: Document) -> str:
if not document.mime_type.startswith("text/"):
raise ValueError(f"Unexpected document mime type: {document.mime_type}")
if isinstance(document.content, URL):
return await load_data_from_url(document.content.uri)
elif isinstance(document.content, str):
return document.content
elif isinstance(document.content, TextContentItem):
return document.content.text
else:
raise ValueError(f"Unexpected document content type: {type(document.content)}")
def _interpret_content_as_attachment(
content: str,
) -> Optional[Attachment]:
match = re.search(TOOLS_ATTACHMENT_KEY_REGEX, content)
if match:
snippet = match.group(1)
data = json.loads(snippet)
return Attachment(
url=URL(uri="file://" + data["filepath"]),
mime_type=data["mimetype"],
)
return None
| 8,299
|
58a40e03fad2e8c63aac99a8d2fbae5d83eb98f5aeb87ead175fc0e4e7d2fa90
| 41.308602
| 157
| 0.528401
| 4.741174
| false
| false
| false
| false
|
xinnan-tech/xiaozhi-esp32-server
|
main/xiaozhi-server/core/providers/asr/tencent.py
| 8,954
| 0
|
MIT License
|
import base64
import hashlib
import hmac
import json
import time
from datetime import datetime, timezone
import os
import uuid
from typing import Optional, Tuple, List
import wave
import opuslib_next
import requests
from core.providers.asr.base import ASRProviderBase
from config.logger import setup_logging
TAG = __name__
logger = setup_logging()
class ASRProvider(ASRProviderBase):
API_URL = "https://asr.tencentcloudapi.com"
API_VERSION = "2019-06-14"
FORMAT = "pcm" # 支持的音频格式:pcm, wav, mp3
def __init__(self, config: dict, delete_audio_file: bool = True):
self.secret_id = config.get("secret_id")
self.secret_key = config.get("secret_key")
self.output_dir = config.get("output_dir")
# 确保输出目录存在
os.makedirs(self.output_dir, exist_ok=True)
def save_audio_to_file(self, opus_data: List[bytes], session_id: str) -> str:
"""将Opus音频数据解码并保存为WAV文件"""
file_name = f"tencent_asr_{session_id}_{uuid.uuid4()}.wav"
file_path = os.path.join(self.output_dir, file_name)
decoder = opuslib_next.Decoder(16000, 1) # 16kHz, 单声道
pcm_data = []
for opus_packet in opus_data:
try:
pcm_frame = decoder.decode(opus_packet, 960) # 960 samples = 60ms
pcm_data.append(pcm_frame)
except opuslib_next.OpusError as e:
logger.bind(tag=TAG).error(f"Opus解码错误: {e}", exc_info=True)
with wave.open(file_path, "wb") as wf:
wf.setnchannels(1)
wf.setsampwidth(2) # 2 bytes = 16-bit
wf.setframerate(16000)
wf.writeframes(b"".join(pcm_data))
return file_path
@staticmethod
def decode_opus(opus_data: List[bytes]) -> bytes:
"""将Opus音频数据解码为PCM数据"""
import opuslib_next
decoder = opuslib_next.Decoder(16000, 1) # 16kHz, 单声道
pcm_data = []
for opus_packet in opus_data:
try:
pcm_frame = decoder.decode(opus_packet, 960) # 960 samples = 60ms
pcm_data.append(pcm_frame)
except opuslib_next.OpusError as e:
logger.bind(tag=TAG).error(f"Opus解码错误: {e}", exc_info=True)
return b"".join(pcm_data)
async def speech_to_text(self, opus_data: List[bytes], session_id: str) -> Tuple[Optional[str], Optional[str]]:
"""将语音数据转换为文本"""
if not opus_data:
logger.bind(tag=TAG).warn("音频数据为空!")
return None, None
try:
# 检查配置是否已设置
if not self.secret_id or not self.secret_key:
logger.bind(tag=TAG).error("腾讯云语音识别配置未设置,无法进行识别")
return None, None
# 将Opus音频数据解码为PCM
pcm_data = self.decode_opus(opus_data)
# 将音频数据转换为Base64编码
base64_audio = base64.b64encode(pcm_data).decode('utf-8')
# 构建请求体
request_body = self._build_request_body(base64_audio)
# 获取认证头
timestamp, authorization = self._get_auth_headers(request_body)
# 发送请求
start_time = time.time()
result = self._send_request(request_body, timestamp, authorization)
if result:
logger.bind(tag=TAG).debug(f"腾讯云语音识别耗时: {time.time() - start_time:.3f}s | 结果: {result}")
return result, None
except Exception as e:
logger.bind(tag=TAG).error(f"处理音频时发生错误!{e}", exc_info=True)
return None, None
def _build_request_body(self, base64_audio: str) -> str:
"""构建请求体"""
request_map = {
"ProjectId": 0,
"SubServiceType": 2, # 一句话识别
"EngSerViceType": "16k_zh", # 中文普通话通用
"SourceType": 1, # 音频数据来源为语音文件
"VoiceFormat": self.FORMAT, # 音频格式
"Data": base64_audio, # Base64编码的音频数据
"DataLen": len(base64_audio) # 数据长度
}
return json.dumps(request_map)
def _get_auth_headers(self, request_body: str) -> Tuple[str, str]:
"""获取认证头"""
try:
# 获取当前UTC时间戳
now = datetime.now(timezone.utc)
timestamp = str(int(now.timestamp()))
date = now.strftime("%Y-%m-%d")
# 服务名称必须是 "asr"
service = "asr"
# 拼接凭证范围
credential_scope = f"{date}/{service}/tc3_request"
# 使用TC3-HMAC-SHA256签名方法
algorithm = "TC3-HMAC-SHA256"
# 构建规范请求字符串
http_request_method = "POST"
canonical_uri = "/"
canonical_query_string = ""
# 注意:头部信息需要按照ASCII升序排列,且key和value都转为小写
# 必须包含content-type和host头部
content_type = "application/json; charset=utf-8"
host = "asr.tencentcloudapi.com"
action = "SentenceRecognition" # 接口名称
# 构建规范头部信息,注意顺序和格式
canonical_headers = f"content-type:{content_type.lower()}\n" + \
f"host:{host.lower()}\n" + \
f"x-tc-action:{action.lower()}\n"
signed_headers = "content-type;host;x-tc-action"
# 请求体哈希值
payload_hash = self._sha256_hex(request_body)
# 构建规范请求字符串
canonical_request = f"{http_request_method}\n" + \
f"{canonical_uri}\n" + \
f"{canonical_query_string}\n" + \
f"{canonical_headers}\n" + \
f"{signed_headers}\n" + \
f"{payload_hash}"
# 计算规范请求的哈希值
hashed_canonical_request = self._sha256_hex(canonical_request)
# 构建待签名字符串
string_to_sign = f"{algorithm}\n" + \
f"{timestamp}\n" + \
f"{credential_scope}\n" + \
f"{hashed_canonical_request}"
# 计算签名密钥
secret_date = self._hmac_sha256(f"TC3{self.secret_key}", date)
secret_service = self._hmac_sha256(secret_date, service)
secret_signing = self._hmac_sha256(secret_service, "tc3_request")
# 计算签名
signature = self._bytes_to_hex(self._hmac_sha256(secret_signing, string_to_sign))
# 构建授权头
authorization = f"{algorithm} " + \
f"Credential={self.secret_id}/{credential_scope}, " + \
f"SignedHeaders={signed_headers}, " + \
f"Signature={signature}"
return timestamp, authorization
except Exception as e:
logger.bind(tag=TAG).error(f"生成认证头失败: {e}", exc_info=True)
raise RuntimeError(f"生成认证头失败: {e}")
def _send_request(self, request_body: str, timestamp: str, authorization: str) -> Optional[str]:
"""发送请求到腾讯云API"""
headers = {
"Content-Type": "application/json; charset=utf-8",
"Host": "asr.tencentcloudapi.com",
"Authorization": authorization,
"X-TC-Action": "SentenceRecognition",
"X-TC-Version": self.API_VERSION,
"X-TC-Timestamp": timestamp,
"X-TC-Region": "ap-shanghai"
}
try:
response = requests.post(self.API_URL, headers=headers, data=request_body)
if not response.ok:
raise IOError(f"请求失败: {response.status_code} {response.reason}")
response_json = response.json()
# 检查是否有错误
if "Response" in response_json and "Error" in response_json["Response"]:
error = response_json["Response"]["Error"]
error_code = error["Code"]
error_message = error["Message"]
raise IOError(f"API返回错误: {error_code}: {error_message}")
# 提取识别结果
if "Response" in response_json and "Result" in response_json["Response"]:
return response_json["Response"]["Result"]
else:
logger.bind(tag=TAG).warn(f"响应中没有识别结果: {response_json}")
return ""
except Exception as e:
logger.bind(tag=TAG).error(f"发送请求失败: {e}", exc_info=True)
return None
def _sha256_hex(self, data: str) -> str:
"""计算字符串的SHA256哈希值"""
digest = hashlib.sha256(data.encode('utf-8')).digest()
return self._bytes_to_hex(digest)
def _hmac_sha256(self, key, data: str) -> bytes:
"""计算HMAC-SHA256"""
if isinstance(key, str):
key = key.encode('utf-8')
return hmac.new(key, data.encode('utf-8'), hashlib.sha256).digest()
def _bytes_to_hex(self, bytes_data: bytes) -> str:
"""字节数组转十六进制字符串"""
return ''.join(f"{b:02x}" for b in bytes_data)
| 2,921
|
faace4123bc1aeb555bc9667aa91f08c1a9c642aa83e618651f119e3bfaf5b5a
| 34.677291
| 115
| 0.528032
| 3.065389
| false
| false
| false
| false
|
browser-use/browser-use
|
browser_use/browser/utils/screen_resolution.py
| 1,284
| 0
|
MIT License
|
import sys
def get_screen_resolution():
if sys.platform == 'darwin': # macOS
try:
from AppKit import NSScreen
screen = NSScreen.mainScreen().frame()
return {'width': int(screen.size.width), 'height': int(screen.size.height)}
except ImportError:
print('AppKit is not available. Make sure you are running this on macOS with pyobjc installed.')
except Exception as e:
print(f'Error retrieving macOS screen resolution: {e}')
return {'width': 2560, 'height': 1664}
else: # Windows & Linux
try:
from screeninfo import get_monitors
monitors = get_monitors()
if not monitors:
raise Exception('No monitors detected.')
monitor = monitors[0]
return {'width': monitor.width, 'height': monitor.height}
except ImportError:
print("screeninfo package not found. Install it using 'pip install screeninfo'.")
except Exception as e:
print(f'Error retrieving screen resolution: {e}')
return {'width': 1920, 'height': 1080}
def get_window_adjustments():
"""Returns recommended x, y offsets for window positioning"""
if sys.platform == 'darwin': # macOS
return -4, 24 # macOS has a small title bar, no border
elif sys.platform == 'win32': # Windows
return -8, 0 # Windows has a border on the left
else: # Linux
return 0, 0
| 374
|
3a765131be43abbcf5407364ce053cdff4dff10a22b0b9c2728fd75e64fd3da0
| 30.317073
| 99
| 0.692368
| 3.433155
| false
| false
| false
| false
|
circlemind-ai/fast-graphrag
|
fast_graphrag/_llm/_base.py
| 3,829
| 0
|
MIT License
|
"""LLM Services module."""
from dataclasses import dataclass, field
from typing import Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from pydantic import BaseModel
from fast_graphrag._models import BaseModelAlias
from fast_graphrag._prompt import PROMPTS
T_model = TypeVar("T_model", bound=Union[BaseModel, BaseModelAlias])
async def format_and_send_prompt(
prompt_key: str,
llm: "BaseLLMService",
format_kwargs: dict[str, Any],
response_model: Type[T_model],
**args: Any,
) -> Tuple[T_model, list[dict[str, str]]]:
"""Get a prompt, format it with the supplied args, and send it to the LLM.
Args:
prompt_key (str): The key for the prompt in the PROMPTS dictionary.
llm (BaseLLMService): The LLM service to use for sending the message.
response_model (Type[T_model]): The expected response model.
format_kwargs (dict[str, Any]): Dictionary of arguments to format the prompt.
model (str | None): The model to use for the LLM. Defaults to None.
max_tokens (int | None): The maximum number of tokens for the response. Defaults to None.
**args (Any): Additional keyword arguments to pass to the LLM.
Returns:
T_model: The response from the LLM.
"""
# Get the prompt from the PROMPTS dictionary
prompt = PROMPTS[prompt_key]
# Format the prompt with the supplied arguments
formatted_prompt = prompt.format(**format_kwargs)
# Send the formatted prompt to the LLM
return await llm.send_message(prompt=formatted_prompt, response_model=response_model, **args)
@dataclass
class BaseLLMService:
"""Base class for Language Model implementations."""
model: Optional[str] = field(default=None)
base_url: Optional[str] = field(default=None)
api_key: Optional[str] = field(default=None)
llm_async_client: Any = field(init=False, default=None)
async def send_message(
self,
prompt: str,
model: str | None = None,
system_prompt: str | None = None,
history_messages: list[dict[str, str]] | None = None,
response_model: Type[T_model] | None = None,
**kwargs: Any,
) -> Tuple[T_model, list[dict[str, str]]]:
"""Send a message to the language model and receive a response.
Args:
prompt (str): The input message to send to the language model.
model (str): The name of the model to use.
system_prompt (str, optional): The system prompt to set the context for the conversation. Defaults to None.
history_messages (list, optional): A list of previous messages in the conversation. Defaults to empty.
response_model (Type[T], optional): The Pydantic model to parse the response. Defaults to None.
**kwargs: Additional keyword arguments that may be required by specific LLM implementations.
Returns:
str: The response from the language model.
"""
raise NotImplementedError
@dataclass
class BaseEmbeddingService:
"""Base class for Language Model implementations."""
embedding_dim: int = field(default=1536)
model: Optional[str] = field(default="text-embedding-3-small")
base_url: Optional[str] = field(default=None)
api_key: Optional[str] = field(default=None)
embedding_async_client: Any = field(init=False, default=None)
async def encode(
self, texts: list[str], model: Optional[str] = None
) -> np.ndarray[Any, np.dtype[np.float32]]:
"""Get the embedding representation of the input text.
Args:
texts (str): The input text to embed.
model (str): The name of the model to use.
Returns:
list[float]: The embedding vector as a list of floats.
"""
raise NotImplementedError
| 962
|
b047af1d2b65078aa3e40258aa3678f411741ee177c9acaefdbe945e1952a1af
| 36.174757
| 119
| 0.66153
| 3.980249
| false
| false
| false
| false
|
docling-project/docling
|
tests/test_code_formula.py
| 2,071
| 0
|
MIT License
|
from pathlib import Path
from docling_core.types.doc import CodeItem, TextItem
from docling_core.types.doc.labels import CodeLanguageLabel, DocItemLabel
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
from docling.datamodel.base_models import InputFormat
from docling.datamodel.document import ConversionResult
from docling.datamodel.pipeline_options import PdfPipelineOptions
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
def get_converter():
pipeline_options = PdfPipelineOptions()
pipeline_options.generate_page_images = True
pipeline_options.do_ocr = False
pipeline_options.do_table_structure = False
pipeline_options.do_code_enrichment = True
pipeline_options.do_formula_enrichment = True
converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_cls=StandardPdfPipeline,
pipeline_options=pipeline_options,
)
}
)
return converter
def test_code_and_formula_conversion():
pdf_path = Path("tests/data/pdf/code_and_formula.pdf")
converter = get_converter()
print(f"converting {pdf_path}")
doc_result: ConversionResult = converter.convert(pdf_path)
results = doc_result.document.texts
code_blocks = [el for el in results if isinstance(el, CodeItem)]
assert len(code_blocks) == 1
gt = "function add(a, b) {\n return a + b;\n}\nconsole.log(add(3, 5));"
predicted = code_blocks[0].text.strip()
assert predicted == gt, f"mismatch in text {predicted=}, {gt=}"
assert code_blocks[0].code_language == CodeLanguageLabel.JAVASCRIPT
formula_blocks = [
el
for el in results
if isinstance(el, TextItem) and el.label == DocItemLabel.FORMULA
]
assert len(formula_blocks) == 1
gt = "a ^ { 2 } + 8 = 1 2"
predicted = formula_blocks[0].text
assert predicted == gt, f"mismatch in text {predicted=}, {gt=}"
| 562
|
ff9026cda4c757cecca7e85aeab6cfa133cc55fb1245f8318cda98e784abe2db
| 31.359375
| 78
| 0.703042
| 3.685053
| false
| false
| false
| false
|
BrainBlend-AI/atomic-agents
|
atomic-examples/orchestration-agent/orchestration_agent/orchestrator.py
| 7,052
| 0
|
MIT License
|
from typing import Union
import openai
from pydantic import Field
from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig
from atomic_agents.lib.base.base_io_schema import BaseIOSchema
from atomic_agents.lib.components.agent_memory import AgentMemory
from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator, SystemPromptContextProviderBase
from orchestration_agent.tools.searxng_search import (
SearxNGSearchTool,
SearxNGSearchToolConfig,
SearxNGSearchToolInputSchema,
SearxNGSearchToolOutputSchema,
)
from orchestration_agent.tools.calculator import (
CalculatorTool,
CalculatorToolConfig,
CalculatorToolInputSchema,
CalculatorToolOutputSchema,
)
import instructor
from datetime import datetime
########################
# INPUT/OUTPUT SCHEMAS #
########################
class OrchestratorInputSchema(BaseIOSchema):
"""Input schema for the Orchestrator Agent. Contains the user's message to be processed."""
chat_message: str = Field(..., description="The user's input message to be analyzed and responded to.")
class OrchestratorOutputSchema(BaseIOSchema):
"""Combined output schema for the Orchestrator Agent. Contains the tool to use and its parameters."""
tool: str = Field(..., description="The tool to use: 'search' or 'calculator'")
tool_parameters: Union[SearxNGSearchToolInputSchema, CalculatorToolInputSchema] = Field(
..., description="The parameters for the selected tool"
)
class FinalAnswerSchema(BaseIOSchema):
"""Schema for the final answer generated by the Orchestrator Agent."""
final_answer: str = Field(..., description="The final answer generated based on the tool output and user query.")
#######################
# AGENT CONFIGURATION #
#######################
class OrchestratorAgentConfig(BaseAgentConfig):
"""Configuration for the Orchestrator Agent."""
searxng_config: SearxNGSearchToolConfig
calculator_config: CalculatorToolConfig
#####################
# CONTEXT PROVIDERS #
#####################
class CurrentDateProvider(SystemPromptContextProviderBase):
def __init__(self, title):
super().__init__(title)
self.date = datetime.now().strftime("%Y-%m-%d")
def get_info(self) -> str:
return f"Current date in format YYYY-MM-DD: {self.date}"
######################
# ORCHESTRATOR AGENT #
######################
orchestrator_agent = BaseAgent(
BaseAgentConfig(
client=instructor.from_openai(openai.OpenAI()),
model="gpt-4o-mini",
system_prompt_generator=SystemPromptGenerator(
background=[
"You are an Orchestrator Agent that decides between using a search tool or a calculator tool based on user input.",
"Use the search tool for queries requiring factual information, current events, or specific data.",
"Use the calculator tool for mathematical calculations and expressions.",
],
output_instructions=[
"Analyze the input to determine whether it requires a web search or a calculation.",
"For search queries, use the 'search' tool and provide 1-3 relevant search queries.",
"For calculations, use the 'calculator' tool and provide the mathematical expression to evaluate.",
"When uncertain, prefer using the search tool.",
"Format the output using the appropriate schema.",
],
),
input_schema=OrchestratorInputSchema,
output_schema=OrchestratorOutputSchema,
)
)
# Register the current date provider
orchestrator_agent.register_context_provider("current_date", CurrentDateProvider("Current Date"))
def execute_tool(
searxng_tool: SearxNGSearchTool, calculator_tool: CalculatorTool, orchestrator_output: OrchestratorOutputSchema
) -> Union[SearxNGSearchToolOutputSchema, CalculatorToolOutputSchema]:
if orchestrator_output.tool == "search":
return searxng_tool.run(orchestrator_output.tool_parameters)
elif orchestrator_output.tool == "calculator":
return calculator_tool.run(orchestrator_output.tool_parameters)
else:
raise ValueError(f"Unknown tool: {orchestrator_output.tool}")
#################
# EXAMPLE USAGE #
#################
if __name__ == "__main__":
import os
from dotenv import load_dotenv
from rich.console import Console
from rich.panel import Panel
from rich.syntax import Syntax
load_dotenv()
# Set up the OpenAI client
client = instructor.from_openai(openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY")))
# Initialize the tools
searxng_tool = SearxNGSearchTool(SearxNGSearchToolConfig(base_url="http://localhost:8080", max_results=5))
calculator_tool = CalculatorTool(CalculatorToolConfig())
# Initialize Rich console
console = Console()
# Print the full system prompt
console.print(Panel(orchestrator_agent.system_prompt_generator.generate_prompt(), title="System Prompt", expand=False))
console.print("\n")
# Example inputs
inputs = [
"Who won the Nobel Prize in Physics in 2024?",
"Please calculate the sine of pi/3 to the third power",
]
for user_input in inputs:
console.print(Panel(f"[bold cyan]User Input:[/bold cyan] {user_input}", expand=False))
# Create the input schema
input_schema = OrchestratorInputSchema(chat_message=user_input)
# Print the input schema
console.print("\n[bold yellow]Generated Input Schema:[/bold yellow]")
input_syntax = Syntax(str(input_schema.model_dump_json(indent=2)), "json", theme="monokai", line_numbers=True)
console.print(input_syntax)
# Run the orchestrator to get the tool selection and input
orchestrator_output = orchestrator_agent.run(input_schema)
# Print the orchestrator output
console.print("\n[bold magenta]Orchestrator Output:[/bold magenta]")
orchestrator_syntax = Syntax(
str(orchestrator_output.model_dump_json(indent=2)), "json", theme="monokai", line_numbers=True
)
console.print(orchestrator_syntax)
# Run the selected tool
response = execute_tool(searxng_tool, calculator_tool, orchestrator_output)
# Print the tool output
console.print("\n[bold green]Tool Output:[/bold green]")
output_syntax = Syntax(str(response.model_dump_json(indent=2)), "json", theme="monokai", line_numbers=True)
console.print(output_syntax)
console.print("\n" + "-" * 80 + "\n")
orchestrator_agent.output_schema = FinalAnswerSchema
orchestrator_agent.memory.add_message("system", response)
final_answer = orchestrator_agent.run(input_schema)
console.print(f"\n[bold blue]Final Answer:[/bold blue] {final_answer.final_answer}")
orchestrator_agent.output_schema = OrchestratorOutputSchema
# Reset the memory after each response
orchestrator_agent.memory = AgentMemory()
| 1,777
|
a5a1ea4bb55d4cb16242092454a35173e9a7cffa35926069bb9f1bb2d6f78e39
| 37.535519
| 131
| 0.682218
| 3.968486
| false
| true
| false
| false
|
HKUDS/LightRAG
|
examples/lightrag_zhipu_demo.py
| 2,013
| 0
|
MIT License
|
import os
import logging
import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
api_key = os.environ.get("ZHIPUAI_API_KEY")
if api_key is None:
raise Exception("Please set ZHIPU_API_KEY in your environment")
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=zhipu_complete,
llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
llm_model_max_async=4,
llm_model_max_token_size=32768,
embedding_func=EmbeddingFunc(
embedding_dim=2048, # Zhipu embedding-3 dimension
max_token_size=8192,
func=lambda texts: zhipu_embedding(texts),
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="naive")
)
)
# Perform local search
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="local")
)
)
# Perform global search
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="global")
)
)
# Perform hybrid search
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
)
)
if __name__ == "__main__":
main()
| 561
|
ca2ae1c1805a6a8cc533daf5430cbfedf2dfa590b73749df9df5f9ea1fa1f4fa
| 24.1625
| 116
| 0.630402
| 3.588235
| false
| false
| false
| false
|
abus-aikorea/voice-pro
|
src/demucs/train.py
| 8,068
| 0
|
MIT License
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Main training script entry point"""
import logging
import os
from pathlib import Path
import sys
from dora import hydra_main
import hydra
from hydra.core.global_hydra import GlobalHydra
from omegaconf import OmegaConf
import torch
from torch import nn
import torchaudio
from torch.utils.data import ConcatDataset
from . import distrib
from .wav import get_wav_datasets, get_musdb_wav_datasets
from .demucs import Demucs
from .hdemucs import HDemucs
from .htdemucs import HTDemucs
from .repitch import RepitchedWrapper
from .solver import Solver
from .states import capture_init
from .utils import random_subset
logger = logging.getLogger(__name__)
class TorchHDemucsWrapper(nn.Module):
"""Wrapper around torchaudio HDemucs implementation to provide the proper metadata
for model evaluation.
See https://pytorch.org/audio/stable/tutorials/hybrid_demucs_tutorial.html"""
@capture_init
def __init__(self, **kwargs):
super().__init__()
try:
from torchaudio.models import HDemucs as TorchHDemucs
except ImportError:
raise ImportError("Please upgrade torchaudio for using its implementation of HDemucs")
self.samplerate = kwargs.pop('samplerate')
self.segment = kwargs.pop('segment')
self.sources = kwargs['sources']
self.torch_hdemucs = TorchHDemucs(**kwargs)
def forward(self, mix):
return self.torch_hdemucs.forward(mix)
def get_model(args):
extra = {
'sources': list(args.dset.sources),
'audio_channels': args.dset.channels,
'samplerate': args.dset.samplerate,
'segment': args.model_segment or 4 * args.dset.segment,
}
klass = {
'demucs': Demucs,
'hdemucs': HDemucs,
'htdemucs': HTDemucs,
'torch_hdemucs': TorchHDemucsWrapper,
}[args.model]
kw = OmegaConf.to_container(getattr(args, args.model), resolve=True)
model = klass(**extra, **kw)
return model
def get_optimizer(model, args):
seen_params = set()
other_params = []
groups = []
for n, module in model.named_modules():
if hasattr(module, "make_optim_group"):
group = module.make_optim_group()
params = set(group["params"])
assert params.isdisjoint(seen_params)
seen_params |= set(params)
groups.append(group)
for param in model.parameters():
if param not in seen_params:
other_params.append(param)
groups.insert(0, {"params": other_params})
parameters = groups
if args.optim.optim == "adam":
return torch.optim.Adam(
parameters,
lr=args.optim.lr,
betas=(args.optim.momentum, args.optim.beta2),
weight_decay=args.optim.weight_decay,
)
elif args.optim.optim == "adamw":
return torch.optim.AdamW(
parameters,
lr=args.optim.lr,
betas=(args.optim.momentum, args.optim.beta2),
weight_decay=args.optim.weight_decay,
)
else:
raise ValueError("Invalid optimizer %s", args.optim.optimizer)
def get_datasets(args):
if args.dset.backend:
torchaudio.set_audio_backend(args.dset.backend)
if args.dset.use_musdb:
train_set, valid_set = get_musdb_wav_datasets(args.dset)
else:
train_set, valid_set = [], []
if args.dset.wav:
extra_train_set, extra_valid_set = get_wav_datasets(args.dset)
if len(args.dset.sources) <= 4:
train_set = ConcatDataset([train_set, extra_train_set])
valid_set = ConcatDataset([valid_set, extra_valid_set])
else:
train_set = extra_train_set
valid_set = extra_valid_set
if args.dset.wav2:
extra_train_set, extra_valid_set = get_wav_datasets(args.dset, "wav2")
weight = args.dset.wav2_weight
if weight is not None:
b = len(train_set)
e = len(extra_train_set)
reps = max(1, round(e / b * (1 / weight - 1)))
else:
reps = 1
train_set = ConcatDataset([train_set] * reps + [extra_train_set])
if args.dset.wav2_valid:
if weight is not None:
b = len(valid_set)
n_kept = int(round(weight * b / (1 - weight)))
valid_set = ConcatDataset(
[valid_set, random_subset(extra_valid_set, n_kept)]
)
else:
valid_set = ConcatDataset([valid_set, extra_valid_set])
if args.dset.valid_samples is not None:
valid_set = random_subset(valid_set, args.dset.valid_samples)
assert len(train_set)
assert len(valid_set)
return train_set, valid_set
def get_solver(args, model_only=False):
distrib.init()
torch.manual_seed(args.seed)
model = get_model(args)
if args.misc.show:
logger.info(model)
mb = sum(p.numel() for p in model.parameters()) * 4 / 2**20
logger.info('Size: %.1f MB', mb)
if hasattr(model, 'valid_length'):
field = model.valid_length(1)
logger.info('Field: %.1f ms', field / args.dset.samplerate * 1000)
sys.exit(0)
# torch also initialize cuda seed if available
if torch.cuda.is_available():
model.cuda()
# optimizer
optimizer = get_optimizer(model, args)
assert args.batch_size % distrib.world_size == 0
args.batch_size //= distrib.world_size
if model_only:
return Solver(None, model, optimizer, args)
train_set, valid_set = get_datasets(args)
if args.augment.repitch.proba:
vocals = []
if 'vocals' in args.dset.sources:
vocals.append(args.dset.sources.index('vocals'))
else:
logger.warning('No vocal source found')
if args.augment.repitch.proba:
train_set = RepitchedWrapper(train_set, vocals=vocals, **args.augment.repitch)
logger.info("train/valid set size: %d %d", len(train_set), len(valid_set))
train_loader = distrib.loader(
train_set, batch_size=args.batch_size, shuffle=True,
num_workers=args.misc.num_workers, drop_last=True)
if args.dset.full_cv:
valid_loader = distrib.loader(
valid_set, batch_size=1, shuffle=False,
num_workers=args.misc.num_workers)
else:
valid_loader = distrib.loader(
valid_set, batch_size=args.batch_size, shuffle=False,
num_workers=args.misc.num_workers, drop_last=True)
loaders = {"train": train_loader, "valid": valid_loader}
# Construct Solver
return Solver(loaders, model, optimizer, args)
def get_solver_from_sig(sig, model_only=False):
inst = GlobalHydra.instance()
hyd = None
if inst.is_initialized():
hyd = inst.hydra
inst.clear()
xp = main.get_xp_from_sig(sig)
if hyd is not None:
inst.clear()
inst.initialize(hyd)
with xp.enter(stack=True):
return get_solver(xp.cfg, model_only)
@hydra_main(config_path="../conf", config_name="config", version_base="1.1")
def main(args):
global __file__
__file__ = hydra.utils.to_absolute_path(__file__)
for attr in ["musdb", "wav", "metadata"]:
val = getattr(args.dset, attr)
if val is not None:
setattr(args.dset, attr, hydra.utils.to_absolute_path(val))
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
if args.misc.verbose:
logger.setLevel(logging.DEBUG)
logger.info("For logs, checkpoints and samples check %s", os.getcwd())
logger.debug(args)
from dora import get_xp
logger.debug(get_xp().cfg)
solver = get_solver(args)
solver.train()
if '_DORA_TEST_PATH' in os.environ:
main.dora.dir = Path(os.environ['_DORA_TEST_PATH'])
if __name__ == "__main__":
main()
| 2,338
|
13d2629975155efa37788395a489b7afced2ae31142770e96d81707f3bcd99a7
| 31.143426
| 98
| 0.620476
| 3.450813
| false
| false
| false
| false
|
HKUDS/AutoAgent
|
autoagent/agents/meta_agent/workflow_creator.py
| 3,359
| 0
|
MIT License
|
from autoagent.registry import register_agent
from autoagent.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent, create_orchestrator_agent
from autoagent.tools.meta.edit_workflow import list_workflows, create_workflow, run_workflow
from autoagent.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
from autoagent.types import Agent
from autoagent.io_utils import read_file
@register_agent(name = "Workflow Creator Agent", func_name="get_workflow_creator_agent")
def get_workflow_creator_agent(model: str) -> str:
"""
The workflow creator is an agent that can be used to create the workflow.
"""
def instructions(context_variables):
return f"""\
You are a Workflow Creator specialized in the MetaChain framework. Your primary responsibility is to create and manage workflows based on XML-formatted workflow forms.
CORE RESPONSIBILITIES:
1. Parse and implement workflow forms
2. Create necessary agents if specified in the workflow
3. Create and manage workflows
4. Execute workflows as needed
AVAILABLE FUNCTIONS:
1. Workflow Management:
- `create_workflow`: Create new workflows based on the workflow form
- `run_workflow`: Execute the created workflow
- `list_workflows`: Display all available workflows
2. Agent Management (when needed):
- `create_agent`: Create new agents if specified in the workflow form. If no tools are explicitly specified, use empty tool list ([])
- `read_agent`: Retrieve existing agent definitions before updates
- `list_agents`: Display all available agents
3. System Tools:
- `execute_command`: Handle system dependencies
- `terminal_page_down`, `terminal_page_up`, `terminal_page_to`: Navigate terminal output
WORKFLOW CREATION PROCESS:
1. Parse Workflow Form:
- Analyze the workflow form carefully
- Identify any new agents that need to be created
- Understand the workflow structure and requirements
2. Create Required Agents:
- For each new agent in the workflow form:
* Use `create_agent` with appropriate parameters
* If no tools specified, use empty tool list ([])
* Verify agent creation success
3. Create Workflow:
- Use `create_workflow` to generate the workflow
- Ensure all required agents exist
- Validate workflow structure
4. Execute Workflow:
- Use `run_workflow` to execute the created workflow
- Monitor execution progress
- Handle any errors appropriately
BEST PRACTICES:
1. Always check if required agents exist before creating new ones
2. Use empty tool list ([]) when no specific tools are mentioned
3. Validate workflow creation before execution
4. Follow the exact specifications from the workflow form XML
5. Handle errors and dependencies appropriately
Remember: Your primary goal is to create and execute workflows according to the provided workflow forms, creating any necessary agents along the way.
"""
tool_list = [list_agents, create_agent, execute_command, read_agent, terminal_page_down, terminal_page_up, terminal_page_to, list_workflows, create_workflow, run_workflow]
return Agent(
name="Workflow Creator Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)
| 797
|
ebc3e05594c6ce2eb1bb890d4e6ec0c712b969b681fc59cf377285bd093c6494
| 40.469136
| 175
| 0.7532
| 4.214555
| false
| false
| false
| false
|
autoscrape-labs/pydoll
|
pydoll/commands/runtime.py
| 3,010
| 0
|
MIT License
|
import copy
class RuntimeCommands:
"""
A class for interacting with the JavaScript runtime using Chrome
DevTools Protocol.
This class provides methods to create commands for evaluating JavaScript
expressions, calling functions on JavaScript objects, and retrieving
object properties through CDP.
Attributes:
EVALUATE_TEMPLATE (dict): Template for the Runtime.evaluate command.
CALL_FUNCTION_ON_TEMPLATE (dict): Template for the
Runtime.callFunctionOn command.
GET_PROPERTIES (dict): Template for the Runtime.getProperties command.
"""
EVALUATE_TEMPLATE = {'method': 'Runtime.evaluate', 'params': {}}
CALL_FUNCTION_ON_TEMPLATE = {
'method': 'Runtime.callFunctionOn',
'params': {},
}
GET_PROPERTIES = {
'method': 'Runtime.getProperties',
'params': {},
}
@classmethod
def get_properties(cls, object_id: str) -> dict:
"""
Generates a command to get the properties of a specific
JavaScript object.
Args:
object_id (str): The object ID of the JavaScript object.
Returns:
dict: The CDP command to retrieve the object's properties.
"""
command = copy.deepcopy(cls.GET_PROPERTIES)
command['params']['objectId'] = object_id
command['params']['ownProperties'] = True
return command
@classmethod
def call_function_on(
cls,
object_id: str,
function_declaration: str,
return_by_value: bool = False,
) -> dict:
"""
Generates a command to call a function on a specific
JavaScript object.
Args:
object_id (str): The object ID of the JavaScript object to call
the function on.
function_declaration (str): The JavaScript function to execute
on the object.
return_by_value (bool, optional): Whether to return the result by
value instead of as a remote object reference. Defaults to
False.
Returns:
dict: The CDP command to call the function on the specified object.
"""
command = copy.deepcopy(cls.CALL_FUNCTION_ON_TEMPLATE)
command['params']['objectId'] = object_id
command['params']['functionDeclaration'] = function_declaration
command['params']['returnByValue'] = return_by_value
return command
@classmethod
def evaluate_script(cls, expression: str) -> dict:
"""
Generates a command to evaluate JavaScript code in the browser context.
Args:
expression (str): The JavaScript expression to evaluate.
Returns:
dict: The CDP command to evaluate the JavaScript expression.
"""
command = copy.deepcopy(cls.EVALUATE_TEMPLATE)
command['params'] = {
'expression': expression,
'returnByValue': False,
}
return command
| 622
|
fc8866232d07b2e5894bec3cde87098a1d69cb8f027907c0adcccb9eeab44d3b
| 31.717391
| 79
| 0.609635
| 4.839228
| false
| false
| false
| false
|
xinnan-tech/xiaozhi-esp32-server
|
main/xiaozhi-server/plugins_func/functions/play_music.py
| 7,114
| 0
|
MIT License
|
from config.logger import setup_logging
import os
import re
import time
import random
import asyncio
import difflib
import traceback
from pathlib import Path
from core.utils import p3
from core.handle.sendAudioHandle import send_stt_message
from plugins_func.register import register_function,ToolType, ActionResponse, Action
TAG = __name__
logger = setup_logging()
MUSIC_CACHE = {}
play_music_function_desc = {
"type": "function",
"function": {
"name": "play_music",
"description": "唱歌、听歌、播放音乐的方法。",
"parameters": {
"type": "object",
"properties": {
"song_name": {
"type": "string",
"description": "歌曲名称,如果用户没有指定具体歌名则为'random', 明确指定的时返回音乐的名字 示例: ```用户:播放两只老虎\n参数:两只老虎``` ```用户:播放音乐 \n参数:random ```"
}
},
"required": ["song_name"]
}
}
}
@register_function('play_music', play_music_function_desc, ToolType.SYSTEM_CTL)
def play_music(conn, song_name: str):
try:
music_intent = f"播放音乐 {song_name}" if song_name != "random" else "随机播放音乐"
# 检查事件循环状态
if not conn.loop.is_running():
logger.bind(tag=TAG).error("事件循环未运行,无法提交任务")
return ActionResponse(action=Action.RESPONSE, result="系统繁忙", response="请稍后再试")
# 提交异步任务
future = asyncio.run_coroutine_threadsafe(
handle_music_command(conn, music_intent),
conn.loop
)
# 非阻塞回调处理
def handle_done(f):
try:
f.result() # 可在此处理成功逻辑
logger.bind(tag=TAG).info("播放完成")
except Exception as e:
logger.bind(tag=TAG).error(f"播放失败: {e}")
future.add_done_callback(handle_done)
return ActionResponse(action=Action.RESPONSE, result="指令已接收", response="正在为您播放音乐")
except Exception as e:
logger.bind(tag=TAG).error(f"处理音乐意图错误: {e}")
return ActionResponse(action=Action.RESPONSE, result=str(e), response="播放音乐时出错了")
def _extract_song_name(text):
"""从用户输入中提取歌名"""
for keyword in ["播放音乐"]:
if keyword in text:
parts = text.split(keyword)
if len(parts) > 1:
return parts[1].strip()
return None
def _find_best_match(potential_song, music_files):
"""查找最匹配的歌曲"""
best_match = None
highest_ratio = 0
for music_file in music_files:
song_name = os.path.splitext(music_file)[0]
ratio = difflib.SequenceMatcher(None, potential_song, song_name).ratio()
if ratio > highest_ratio and ratio > 0.4:
highest_ratio = ratio
best_match = music_file
return best_match
def get_music_files(music_dir, music_ext):
music_dir = Path(music_dir)
music_files = []
music_file_names = []
for file in music_dir.rglob("*"):
# 判断是否是文件
if file.is_file():
# 获取文件扩展名
ext = file.suffix.lower()
# 判断扩展名是否在列表中
if ext in music_ext:
# 添加相对路径
music_files.append(str(file.relative_to(music_dir)))
music_file_names.append(os.path.splitext(str(file.relative_to(music_dir)))[0])
return music_files, music_file_names
def initialize_music_handler(conn):
global MUSIC_CACHE
if MUSIC_CACHE == {}:
if "play_music" in conn.config["plugins"]:
MUSIC_CACHE["music_config"] = conn.config["plugins"]["play_music"]
MUSIC_CACHE["music_dir"] = os.path.abspath(
MUSIC_CACHE["music_config"].get("music_dir", "./music") # 默认路径修改
)
MUSIC_CACHE["music_ext"] = MUSIC_CACHE["music_config"].get("music_ext", (".mp3", ".wav", ".p3"))
MUSIC_CACHE["refresh_time"] = MUSIC_CACHE["music_config"].get("refresh_time", 60)
else:
MUSIC_CACHE["music_dir"] = os.path.abspath("./music")
MUSIC_CACHE["music_ext"] = (".mp3", ".wav", ".p3")
MUSIC_CACHE["refresh_time"] = 60
# 获取音乐文件列表
MUSIC_CACHE["music_files"], MUSIC_CACHE["music_file_names"] = get_music_files(MUSIC_CACHE["music_dir"],
MUSIC_CACHE["music_ext"])
MUSIC_CACHE["scan_time"] = time.time()
return MUSIC_CACHE
async def handle_music_command(conn, text):
initialize_music_handler(conn)
global MUSIC_CACHE
"""处理音乐播放指令"""
clean_text = re.sub(r'[^\w\s]', '', text).strip()
logger.bind(tag=TAG).debug(f"检查是否是音乐命令: {clean_text}")
# 尝试匹配具体歌名
if os.path.exists(MUSIC_CACHE["music_dir"]):
if time.time() - MUSIC_CACHE["scan_time"] > MUSIC_CACHE["refresh_time"]:
# 刷新音乐文件列表
MUSIC_CACHE["music_files"], MUSIC_CACHE["music_file_names"] = get_music_files(MUSIC_CACHE["music_dir"],
MUSIC_CACHE["music_ext"])
MUSIC_CACHE["scan_time"] = time.time()
potential_song = _extract_song_name(clean_text)
if potential_song:
best_match = _find_best_match(potential_song, MUSIC_CACHE["music_files"])
if best_match:
logger.bind(tag=TAG).info(f"找到最匹配的歌曲: {best_match}")
await play_local_music(conn, specific_file=best_match)
return True
# 检查是否是通用播放音乐命令
await play_local_music(conn)
return True
async def play_local_music(conn, specific_file=None):
global MUSIC_CACHE
"""播放本地音乐文件"""
try:
if not os.path.exists(MUSIC_CACHE["music_dir"]):
logger.bind(tag=TAG).error(f"音乐目录不存在: " + MUSIC_CACHE["music_dir"])
return
# 确保路径正确性
if specific_file:
selected_music = specific_file
music_path = os.path.join(MUSIC_CACHE["music_dir"], specific_file)
else:
if not MUSIC_CACHE["music_files"]:
logger.bind(tag=TAG).error("未找到MP3音乐文件")
return
selected_music = random.choice(MUSIC_CACHE["music_files"])
music_path = os.path.join(MUSIC_CACHE["music_dir"], selected_music)
if not os.path.exists(music_path):
logger.bind(tag=TAG).error(f"选定的音乐文件不存在: {music_path}")
return
text = f"正在播放{selected_music}"
await send_stt_message(conn, text)
conn.tts_first_text_index = 0
conn.tts_last_text_index = 0
conn.llm_finish_task = True
if music_path.endswith(".p3"):
opus_packets, duration = p3.decode_opus_from_file(music_path)
else:
opus_packets, duration = conn.tts.audio_to_opus_data(music_path)
conn.audio_play_queue.put((opus_packets, selected_music, 0))
except Exception as e:
logger.bind(tag=TAG).error(f"播放音乐失败: {str(e)}")
logger.bind(tag=TAG).error(f"详细错误: {traceback.format_exc()}")
| 2,444
|
ceea6b3d7735760141878e0a75f18e6bc4ca9447411f68596e80e2454cf8a76d
| 35.295918
| 147
| 0.553556
| 2.910802
| false
| false
| false
| false
|
hkust-nlp/simpleRL-reason
|
train/openrlhf/trainer/ppo_utils/math_equal_file.py
| 9,637
| 0
|
MIT License
|
import re
import regex
import multiprocessing
from math import isclose
from typing import Union
from sympy import simplify, N
from sympy.parsing.sympy_parser import parse_expr
from sympy.parsing.latex import parse_latex
from latex2sympy2 import latex2sympy
from timeout_decorator import timeout, TimeoutError
def parse_digits(num):
num = regex.sub(',', '', str(num))
try:
return float(num)
except:
if num.endswith('%'):
num = num[:-1]
if num.endswith('\\'):
num = num[:-1]
try:
return float(num) / 100
except:
pass
return None
def is_digit(num):
# paired with parse_digits
return parse_digits(num) is not None
def str_to_pmatrix(input_str):
input_str = input_str.strip()
matrix_str = re.findall(r'\{.*,.*\}', input_str)
pmatrix_list = []
for m in matrix_str:
m = m.strip('{}')
pmatrix = r'\begin{pmatrix}' + m.replace(',', '\\') + r'\end{pmatrix}'
pmatrix_list.append(pmatrix)
return ', '.join(pmatrix_list)
def extract_inside_str(input_str):
if type(input_str) == str and input_str.strip().startswith("\\(") and input_str.strip().endswith("\\)") :
try:
input_str = input_str.strip().split("\\(")[1].split("\\)")[-2]
except:
pass
if type(input_str) == str and input_str.strip().startswith("\\[") and input_str.strip().endswith("\\]"):
try:
input_str = input_str.strip().split("\\[")[1].split("\\]")[-2]
except:
pass
if type(input_str) == str and input_str.strip().startswith("(") and input_str.strip().endswith(")"):
try:
input_str = input_str.strip().split("(")[1].split(")")[-2]
except:
pass
if type(input_str) == str and input_str.strip().startswith("[") and input_str.strip().endswith("]"):
try:
input_str = input_str.strip().split("[")[1].split("]")[-2]
except:
pass
if type(input_str) == str and input_str.strip().startswith("\\text{"):
try:
input_str = input_str.strip().split("\\text{")[1].split("}")[-2]
except:
pass
if type(input_str) == str and input_str.strip().endswith("```"):
try:
input_str = input_str[:-len("```")]
except:
pass
return input_str
def math_equal(prediction: Union[bool, float, str],
reference: Union[float, str],
include_percentage: bool = True,
is_close: bool = True,
use_timeout: bool = False,
) -> bool:
"""
Exact match of math if and only if:
1. numerical equal: both can convert to float and are equal
2. symbolic equal: both can convert to sympy expression and are equal
"""
# print("Judge:", prediction, reference)
if str(prediction) == str(reference):
return True
prediction = extract_inside_str(prediction)
reference = extract_inside_str(reference)
try: # 1. numerical equal
if is_digit(prediction) and is_digit(reference):
prediction = parse_digits(prediction)
reference = parse_digits(reference)
# number questions
if include_percentage:
gt_result = [reference / 100, reference, reference * 100]
else:
gt_result = [reference]
for item in gt_result:
try:
if is_close:
if numeric_equal(prediction, item):
return True
else:
if item == prediction:
return True
except Exception:
continue
return False
except:
pass
if not prediction and prediction not in [0, False]:
return False
# print("try math_eval")
# 2. symbolic equal
reference = str(reference).strip()
prediction = str(prediction).strip()
## pmatrix (amps)
if "pmatrix" in prediction and not 'pmatrix' in reference:
reference = str_to_pmatrix(reference)
## deal with [], (), {}
pred_str, ref_str = prediction, reference
if (prediction.startswith("[") and prediction.endswith("]") and not reference.startswith("(")) or \
(prediction.startswith("(") and prediction.endswith(")") and not reference.startswith("[")):
pred_str = pred_str.strip("[]()")
ref_str = ref_str.strip("[]()")
for s in ['{', "}", "(", ")"]:
ref_str = ref_str.replace(s, "")
pred_str = pred_str.replace(s, "")
if pred_str.lower() == ref_str.lower():
return True
## [a, b] vs. [c, d], return a==c and b==d
if regex.match(r'(\(|\[).+(\)|\])', prediction) is not None and regex.match(r'(\(|\[).+(\)|\])', reference) is not None:
pred_parts = prediction[1:-1].split(",")
ref_parts = reference[1:-1].split(",")
if len(pred_parts) == len(ref_parts):
if all([math_equal(pred_parts[i], ref_parts[i], include_percentage, is_close) for i in range(len(pred_parts))]):
return True
if (prediction.startswith("\\begin{pmatrix}") or prediction.startswith("\\begin{bmatrix}")) and (prediction.endswith("\\end{pmatrix}") or prediction.endswith("\\end{bmatrix}")) and \
(reference.startswith("\\begin{pmatrix}") or reference.startswith("\\begin{bmatrix}")) and (reference.endswith("\\end{pmatrix}") or reference.endswith("\\end{bmatrix}")):
pred_lines = [line.strip() for line in prediction[len("\\begin{pmatrix}"): -len("\\end{pmatrix}")].split("\\\\") if line.strip()]
ref_lines = [line.strip() for line in reference[len("\\begin{pmatrix}"): -len("\\end{pmatrix}")].split("\\\\") if line.strip()]
matched = True
if len(pred_lines) == len(ref_lines):
for pred_line, ref_line in zip(pred_lines, ref_lines):
pred_parts = pred_line.split("&")
ref_parts = ref_line.split("&")
if len(pred_parts) == len(ref_parts):
if not all([math_equal(pred_parts[i], ref_parts[i], include_percentage, is_close) for i in range(len(pred_parts))]):
matched = False
break
else:
matched = False
if not matched:
break
else:
matched = False
if matched:
return True
if prediction.count('=') == 1 and reference.count('=') == 1:
pred = prediction.split('=')
pred = f"{pred[0].strip()} - ({pred[1].strip()})"
ref = reference.split('=')
ref = f"{ref[0].strip()} - ({ref[1].strip()})"
if symbolic_equal(pred, ref) or symbolic_equal(f"-({pred})", ref):
return True
elif prediction.count('=') == 1 and len(prediction.split('=')[0].strip()) <= 2 and '=' not in reference:
if math_equal(prediction.split('=')[1], reference, include_percentage, is_close):
return True
elif reference.count('=') == 1 and len(reference.split('=')[0].strip()) <= 2 and '=' not in prediction:
if math_equal(prediction, reference.split('=')[1], include_percentage, is_close):
return True
if use_timeout:
try:
if timeout(use_timeout)(symbolic_equal)(prediction, reference):
return True
except TimeoutError:
print({"type": "timeout", "prediction": prediction, "reference": reference})
pass
else:
if symbolic_equal(prediction, reference):
return True
return False
def math_equal_process(param):
return math_equal(param[-2], param[-1])
def numeric_equal(prediction: float, reference: float):
return isclose(reference, prediction, abs_tol=1e-3)
def symbolic_equal(a, b):
def _parse(s):
for f in [parse_latex, parse_expr, latex2sympy]:
try:
return f(s.replace("\\\\", "\\"))
except:
try:
return f(s)
except:
pass
return s
a = _parse(a)
b = _parse(b)
# direct equal
try:
if str(a) == str(b) or a == b:
return True
except:
pass
# print("try simplify")
# simplify equal
try:
if a.equals(b) or simplify(a-b) == 0:
return True
except:
pass
# print("try equation")
# equation equal
try:
if (abs(a.lhs - a.rhs)).equals(abs(b.lhs - b.rhs)):
return True
except:
pass
try:
if numeric_equal(float(N(a)), float(N(b))):
return True
except:
pass
return False
def call_with_timeout(func, args=(), kwargs=None, timeout_duration=5):
if kwargs is None:
kwargs = {}
try:
with multiprocessing.get_start_method("spawn").Pool(1) as p:
result = p.apply_async(func, args, kwargs)
return result.get(timeout=timeout_duration)
except TimeoutError:
print("Timeout reached")
return False
except Exception as e:
print(f"Error: {e}")
return False
def _test_math_equal():
# print(
# math_equal(
# "\\begin{pmatrix} a & b \\\\ c & d\\end{pmatrix}",
# "\\begin{pmatrix} a & b \\\\ c & d\\end{pmatrix}",
# use_timeout=True
# )
# )
print(
math_equal(
"a = b",
"b = a"
)
)
| 2,439
|
337e3da794a21fc80933d900f433ed749e7780a19132741c2312f9267c11d40e
| 32.814035
| 186
| 0.531701
| 3.95121
| false
| false
| false
| false
|
meta-llama/llama-stack
|
tests/integration/post_training/test_post_training.py
| 3,693
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List
import pytest
from llama_stack.apis.common.job_types import JobStatus
from llama_stack.apis.post_training import (
Checkpoint,
DataConfig,
LoraFinetuningConfig,
OptimizerConfig,
PostTrainingJob,
PostTrainingJobArtifactsResponse,
PostTrainingJobStatusResponse,
TrainingConfig,
)
# How to run this test:
#
# pytest llama_stack/providers/tests/post_training/test_post_training.py
# -m "torchtune_post_training_huggingface_datasetio"
# -v -s --tb=short --disable-warnings
@pytest.mark.skip(reason="FIXME FIXME @yanxi0830 this needs to be migrated to use the API")
class TestPostTraining:
@pytest.mark.asyncio
async def test_supervised_fine_tune(self, post_training_stack):
algorithm_config = LoraFinetuningConfig(
type="LoRA",
lora_attn_modules=["q_proj", "v_proj", "output_proj"],
apply_lora_to_mlp=True,
apply_lora_to_output=False,
rank=8,
alpha=16,
)
data_config = DataConfig(
dataset_id="alpaca",
batch_size=1,
shuffle=False,
)
optimizer_config = OptimizerConfig(
optimizer_type="adamw",
lr=3e-4,
lr_min=3e-5,
weight_decay=0.1,
num_warmup_steps=100,
)
training_config = TrainingConfig(
n_epochs=1,
data_config=data_config,
optimizer_config=optimizer_config,
max_steps_per_epoch=1,
gradient_accumulation_steps=1,
)
post_training_impl = post_training_stack
response = await post_training_impl.supervised_fine_tune(
job_uuid="1234",
model="Llama3.2-3B-Instruct",
algorithm_config=algorithm_config,
training_config=training_config,
hyperparam_search_config={},
logger_config={},
checkpoint_dir="null",
)
assert isinstance(response, PostTrainingJob)
assert response.job_uuid == "1234"
@pytest.mark.asyncio
async def test_get_training_jobs(self, post_training_stack):
post_training_impl = post_training_stack
jobs_list = await post_training_impl.get_training_jobs()
assert isinstance(jobs_list, List)
assert jobs_list[0].job_uuid == "1234"
@pytest.mark.asyncio
async def test_get_training_job_status(self, post_training_stack):
post_training_impl = post_training_stack
job_status = await post_training_impl.get_training_job_status("1234")
assert isinstance(job_status, PostTrainingJobStatusResponse)
assert job_status.job_uuid == "1234"
assert job_status.status == JobStatus.completed
assert isinstance(job_status.checkpoints[0], Checkpoint)
@pytest.mark.asyncio
async def test_get_training_job_artifacts(self, post_training_stack):
post_training_impl = post_training_stack
job_artifacts = await post_training_impl.get_training_job_artifacts("1234")
assert isinstance(job_artifacts, PostTrainingJobArtifactsResponse)
assert job_artifacts.job_uuid == "1234"
assert isinstance(job_artifacts.checkpoints[0], Checkpoint)
assert job_artifacts.checkpoints[0].identifier == "Llama3.2-3B-Instruct-sft-0"
assert job_artifacts.checkpoints[0].epoch == 0
assert "/.llama/checkpoints/Llama3.2-3B-Instruct-sft-0" in job_artifacts.checkpoints[0].path
| 997
|
19dc93260b79a3b5fe78e702dfced43cc2457669864bf462fbd705fd3803745d
| 35.564356
| 100
| 0.650149
| 3.704112
| false
| true
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama3/template_data.py
| 2,960
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# top-level folder for each specific model found within the models/ directory at
# the top-level of this source tree.
from ..datatypes import BuiltinTool, StopReason, ToolCall
from .prompt_templates import (
BuiltinToolGenerator,
JsonCustomToolGenerator,
ToolResponseGenerator,
)
INSTRUCTION = "You are a helpful assistant."
def system_message_builtin_tools_only():
return {
"builtin_tools": BuiltinToolGenerator().data_examples()[0],
"custom_tools": [],
"instruction": INSTRUCTION,
}
def system_message_builtin_code_only():
return {
"builtin_tools": BuiltinToolGenerator().data_examples()[1],
"custom_tools": [],
"instruction": "",
}
def system_message_custom_tools_only():
return {
"builtin_tools": [],
"custom_tools": JsonCustomToolGenerator().data_examples()[0],
"instruction": INSTRUCTION,
}
def system_message_builtin_and_custom_tools():
return {
"builtin_tools": BuiltinToolGenerator().data_examples()[0],
"custom_tools": JsonCustomToolGenerator().data_examples()[0],
"instruction": INSTRUCTION,
}
def system_default():
return {
"builtin_tools": [],
"custom_tools": [],
"instruction": INSTRUCTION,
}
def tool_success():
return ToolResponseGenerator().data_examples()[0]
def tool_failure():
return ToolResponseGenerator().data_examples()[1]
def assistant_builtin_tool_call():
return {
"content": "",
"tool_call": ToolCall(
call_id="uuid",
tool_name=BuiltinTool.brave_search,
arguments={
"query": "Who won NBA in 2024?",
},
),
"stop_reason": StopReason.end_of_message,
}
def assistant_custom_tool_call():
return {
"content": "",
"tool_call": ToolCall(
call_id="uuid",
tool_name="trending_songs",
arguments={"country": "US", "n": 10},
),
"stop_reason": StopReason.end_of_turn,
}
def assistant_default():
return {
"content": "Hi, I am a helpful assistant. What can I help you with today?",
"tool_call": None,
"stop_reason": StopReason.end_of_turn,
}
def user_default():
return {"content": "Please tell me how to plan a trip to New York"}
def user_images():
return {"content": "<|image|><|image|>What do these images depict?"}
def user_interleaved_images():
return {"content": "<|image|>Describe the image in one sentence.<|image|>Write a haiku about these images"}
| 780
|
df75136a7cb5c9b06c0c600012fbf217f5316a43f910dd840b8661622bd8ed37
| 24.517241
| 111
| 0.621959
| 3.794872
| false
| false
| false
| false
|
deepseek-ai/smallpond
|
tests/test_dataframe.py
| 7,075
| 0
|
MIT License
|
from typing import List
import pandas as pd
import pyarrow as pa
import pytest
from smallpond.dataframe import Session
def test_pandas(sp: Session):
pandas_df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
df = sp.from_pandas(pandas_df)
assert df.to_pandas().equals(pandas_df)
def test_arrow(sp: Session):
arrow_table = pa.table({"a": [1, 2, 3], "b": [4, 5, 6]})
df = sp.from_arrow(arrow_table)
assert df.to_arrow() == arrow_table
def test_items(sp: Session):
df = sp.from_items([1, 2, 3])
assert df.take_all() == [{"item": 1}, {"item": 2}, {"item": 3}]
df = sp.from_items([{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}])
assert df.take_all() == [{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}]
def test_csv(sp: Session):
df = sp.read_csv(
"tests/data/mock_urls/*.tsv",
schema={"urlstr": "varchar", "valstr": "varchar"},
delim=r"\t",
)
assert df.count() == 1000
def test_parquet(sp: Session):
df = sp.read_parquet("tests/data/mock_urls/*.parquet")
assert df.count() == 1000
def test_take(sp: Session):
df = sp.from_pandas(pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}))
assert df.take(2) == [{"a": 1, "b": 4}, {"a": 2, "b": 5}]
assert df.take_all() == [{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}]
def test_map(sp: Session):
df = sp.from_arrow(pa.table({"a": [1, 2, 3], "b": [4, 5, 6]}))
df1 = df.map("a + b as c")
assert df1.to_arrow() == pa.table({"c": [5, 7, 9]})
df2 = df.map(lambda r: {"c": r["a"] + r["b"]})
assert df2.to_arrow() == pa.table({"c": [5, 7, 9]})
# user need to specify the schema if can not be inferred from the mapping values
df3 = df.map(
lambda r: {"c": None if r["a"] == 1 else r["a"] + r["b"]},
schema=pa.schema([("c", pa.int64())]),
)
assert df3.to_arrow() == pa.table({"c": pa.array([None, 7, 9], type=pa.int64())})
def test_flat_map(sp: Session):
df = sp.from_arrow(pa.table({"a": [1, 2, 3], "b": [4, 5, 6]}))
df1 = df.flat_map(lambda r: [{"c": r["a"]}, {"c": r["b"]}])
assert df1.to_arrow() == pa.table({"c": [1, 4, 2, 5, 3, 6]})
df2 = df.flat_map("unnest(array[a, b]) as c")
assert df2.to_arrow() == pa.table({"c": [1, 4, 2, 5, 3, 6]})
# user need to specify the schema if can not be inferred from the mapping values
df3 = df.flat_map(lambda r: [{"c": None}], schema=pa.schema([("c", pa.int64())]))
assert df3.to_arrow() == pa.table({"c": pa.array([None, None, None], type=pa.int64())})
def test_map_batches(sp: Session):
df = sp.read_parquet("tests/data/mock_urls/*.parquet")
df = df.map_batches(
lambda batch: pa.table({"num_rows": [batch.num_rows]}),
batch_size=350,
)
assert df.take_all() == [{"num_rows": 350}, {"num_rows": 350}, {"num_rows": 300}]
def test_filter(sp: Session):
df = sp.from_arrow(pa.table({"a": [1, 2, 3], "b": [4, 5, 6]}))
df1 = df.filter("a > 1")
assert df1.to_arrow() == pa.table({"a": [2, 3], "b": [5, 6]})
df2 = df.filter(lambda r: r["a"] > 1)
assert df2.to_arrow() == pa.table({"a": [2, 3], "b": [5, 6]})
def test_random_shuffle(sp: Session):
df = sp.from_items(list(range(1000))).repartition(10, by_rows=True)
df = df.random_shuffle()
shuffled = [d["item"] for d in df.take_all()]
assert sorted(shuffled) == list(range(1000))
def count_inversions(arr: List[int]) -> int:
return sum(sum(1 for j in range(i + 1, len(arr)) if arr[i] > arr[j]) for i in range(len(arr)))
# check the shuffle is random enough
# the expected number of inversions is n*(n-1)/4 = 249750
assert 220000 <= count_inversions(shuffled) <= 280000
def test_partition_by(sp: Session):
df = sp.from_items(list(range(1000))).repartition(10, by="item % 10")
df = df.map("min(item % 10) as min, max(item % 10) as max")
assert df.take_all() == [{"min": i, "max": i} for i in range(10)]
def test_partition_by_key_out_of_range(sp: Session):
df = sp.from_items(list(range(1000))).repartition(10, by="item % 11")
try:
df.to_arrow()
except Exception as ex:
assert "partition key 10 is out of range 0-9" in str(ex)
else:
assert False, "expected exception"
def test_partition_by_hash(sp: Session):
df = sp.from_items(list(range(1000))).repartition(10, hash_by="item")
items = [d["item"] for d in df.take_all()]
assert sorted(items) == list(range(1000))
def test_count(sp: Session):
df = sp.from_items([1, 2, 3])
assert df.count() == 3
def test_limit(sp: Session):
df = sp.from_items(list(range(1000))).repartition(10, by_rows=True)
assert df.limit(2).count() == 2
@pytest.mark.skip(reason="limit can not be pushed down to sql node for now")
@pytest.mark.timeout(10)
def test_limit_large(sp: Session):
# limit will be fused with the previous select
# otherwise, it will be timeout
df = sp.partial_sql("select * from range(1000000000)")
assert df.limit(2).count() == 2
def test_partial_sql(sp: Session):
# no input deps
df = sp.partial_sql("select * from range(3)")
assert df.to_arrow() == pa.table({"range": [0, 1, 2]})
# join
df1 = sp.from_arrow(pa.table({"id1": [1, 2, 3], "val1": ["a", "b", "c"]}))
df2 = sp.from_arrow(pa.table({"id2": [1, 2, 3], "val2": ["d", "e", "f"]}))
joined = sp.partial_sql("select id1, val1, val2 from {0} join {1} on id1 = id2", df1, df2)
assert joined.to_arrow() == pa.table(
{"id1": [1, 2, 3], "val1": ["a", "b", "c"], "val2": ["d", "e", "f"]},
schema=pa.schema(
[
("id1", pa.int64()),
("val1", pa.large_string()),
("val2", pa.large_string()),
]
),
)
def test_error_message(sp: Session):
df = sp.from_items([1, 2, 3])
df = sp.partial_sql("select a,, from {0}", df)
try:
df.to_arrow()
except Exception as ex:
# sql query should be in the exception message
assert "select a,, from" in str(ex)
else:
assert False, "expected exception"
def test_unpicklable_task_exception(sp: Session):
from loguru import logger
df = sp.from_items([1, 2, 3])
try:
df.map(lambda x: logger.info("use outside logger")).to_arrow()
except Exception as ex:
assert "Can't pickle task" in str(ex)
assert "HINT: DO NOT use externally imported loguru logger in your task. Please import it within the task." in str(ex)
else:
assert False, "expected exception"
def test_log(sp: Session):
df = sp.from_items([1, 2, 3])
def log_record(x):
import logging
import sys
from loguru import logger
print("stdout")
print("stderr", file=sys.stderr)
logger.info("loguru")
logging.info("logging")
return x
df.map(log_record).to_arrow()
# TODO: check logs should be see in the log file
# FIXME: logs in unit test are not written to the log file
# because we share the same ray instance for all tests
| 2,442
|
58d6267a114d139c2381ba62c6db950cad9d20f3af1a1c26f0233bd1b5aaafbb
| 32.215962
| 126
| 0.561413
| 2.897215
| false
| true
| false
| false
|
MadcowD/ell
|
src/ell/types/lmp.py
| 146
| 0
|
MIT License
|
import enum
class LMPType(str, enum.Enum):
LM = "LM"
TOOL = "TOOL"
LABELER = "LABELER"
FUNCTION = "FUNCTION"
OTHER = "OTHER"
| 51
|
b51ce17d5d59adacb616a90c90f349dbaea12d0040393ab5f7727bd23aa0f959
| 15.333333
| 30
| 0.582192
| 2.862745
| false
| false
| false
| false
|
HKUDS/AutoAgent
|
autoagent/memory/rag_memory.py
| 6,592
| 0
|
MIT License
|
import uuid
import os.path
from datetime import datetime
from typing import List, Dict
import chromadb
from chromadb.utils import embedding_functions
from abc import ABC, abstractmethod
from openai import OpenAI
import numpy as np
from chromadb.api.types import QueryResult
chromadb.logger.setLevel(chromadb.logging.ERROR)
class Memory:
def __init__(
self,
project_path: str,
db_name: str = '.sa',
platform: str = 'OpenAI',
api_key: str = None,
embedding_model: str = "text-embedding-3-small"
):
"""
Memory: memory and external knowledge management.
Args:
project_path: the path to store the data.
embedding_model: the embedding model to use, default will use the embedding model from ChromaDB,
if the OpenAI has been set in the configuration, it will use the OpenAI embedding model
"text-embedding-ada-002".
"""
self.db_name = db_name
self.collection_name = 'memory'
self.client = chromadb.PersistentClient(path=os.path.join(project_path, self.db_name))
self.client.get_or_create_collection(
self.collection_name,
)
# use the OpenAI embedding function if the openai section is set in the configuration.
if platform == 'OpenAI':
openai_client = OpenAI(api_key=api_key or os.environ["OPENAI_API_KEY"])
self.embedder = lambda x: [i.embedding for i in openai_client.embeddings.create(input=x, model=embedding_model).data]
else:
# self.embedder = embedding_functions.DefaultEmbeddingFunction()
self.embedder = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="all-MiniLM-L6-v2")
def add_query(
self,
queries: List[Dict[str, str]],
collection: str = None,
idx: List[str] = None
):
"""
add_query: add the queries to the memery.
Args:
queries: the queries to add to the memery. Should be in the format of
{
"query": "the query",
"response": "the response"
}
collection: the name of the collection to add the queries.
idx: the ids of the queries, should be in the same length as the queries.
If not provided, the ids will be generated by UUID.
Return: A list of generated IDs.
"""
if idx:
ids = idx
else:
ids = [str(uuid.uuid4()) for _ in range(len(queries))]
if not collection:
collection = self.collection_name
query_list = [query['query'] for query in queries]
embeddings = self.embedder(query_list)
added_time = datetime.now().isoformat()
resp_list = [{'response': query['response'], 'created_at': added_time} for query in queries]
# insert the record into the database
self.client.get_or_create_collection(collection).add(
documents=query_list,
metadatas=resp_list,
ids=ids,
embeddings=embeddings
)
return ids
def query(self, query_texts: List[str], collection: str = None, n_results: int = 5) -> QueryResult:
"""
query: query the memery.
Args:
query_texts: the query texts to search in the memery.
collection: the name of the collection to search.
n_results: the number of results to return.
Returns: QueryResult
class QueryResult(TypedDict):
ids: List[IDs]
embeddings: Optional[
Union[
List[Embeddings],
List[PyEmbeddings],
List[NDArray[Union[np.int32, np.float32]]],
]
]
documents: Optional[List[List[Document]]]
uris: Optional[List[List[URI]]]
data: Optional[List[Loadable]]
metadatas: Optional[List[List[Metadata]]]
distances: Optional[List[List[float]]]
included: Include
"""
if not collection:
collection = self.collection_name
query_embedding = self.embedder(query_texts)
return self.client.get_or_create_collection(collection).query(query_embeddings=query_embedding, n_results=n_results)
def peek(self, collection: str = None, n_results: int = 20):
"""
peek: peek the memery.
Args:
collection: the name of the collection to peek.
n_results: the number of results to return.
Returns: the top k results.
"""
if not collection:
collection = self.collection_name
return self.client.get_or_create_collection(collection).peek(limit=n_results)
def get(self, collection: str = None, record_id: str = None):
"""
get: get the record by the id.
Args:
record_id: the id of the record.
collection: the name of the collection to get the record.
Returns: the record.
"""
if not collection:
collection = self.collection_name
collection = self.client.get_collection(collection)
if not record_id:
return collection.get()
return collection.get(record_id)
def delete(self, collection_name=None):
"""
delete: delete the memery collections.
Args:
collection_name: the name of the collection to delete.
"""
if not collection_name:
collection_name = self.collection_name
return self.client.delete_collection(name=collection_name)
def count(self, collection_name=None):
"""
count: count the number of records in the memery.
Args:
collection_name: the name of the collection to count.
"""
if not collection_name:
collection_name = self.collection_name
return self.client.get_or_create_collection(name=collection_name).count()
def reset(self):
"""
reset: reset the memory.
Notice: You may need to set the environment variable `ALLOW_RESET` to `TRUE` to enable this function.
"""
self.client.reset()
class Reranker:
def __init__(self, model: str) -> None:
self.model = model
@abstractmethod
def rerank(self, query_text: str, query_results: List[Dict]) -> List[Dict]:
raise NotImplementedError("Reranker is not implemented")
| 1,529
|
0a1ea561dc56cc2f288b5a2b010cfc0f3513b83b5232d4620329f5a9952fed83
| 35.832402
| 129
| 0.590413
| 4.311315
| false
| false
| false
| false
|
deepseek-ai/DeepSeek-VL2
|
inference.py
| 6,418
| 0
|
MIT License
|
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from argparse import ArgumentParser
from typing import List, Dict
import torch
from transformers import AutoModelForCausalLM
import PIL.Image
from deepseek_vl2.models import DeepseekVLV2ForCausalLM, DeepseekVLV2Processor
from deepseek_vl2.serve.app_modules.utils import parse_ref_bbox
def load_pil_images(conversations: List[Dict[str, str]]) -> List[PIL.Image.Image]:
"""
Args:
conversations (List[Dict[str, str]]): the conversations with a list of messages. An example is :
[
{
"role": "User",
"content": "<image>\nExtract all information from this image and convert them into markdown format.",
"images": ["./examples/table_datasets.png"]
},
{"role": "Assistant", "content": ""},
]
Returns:
pil_images (List[PIL.Image.Image]): the list of PIL images.
"""
pil_images = []
for message in conversations:
if "images" not in message:
continue
for image_path in message["images"]:
pil_img = PIL.Image.open(image_path)
pil_img = pil_img.convert("RGB")
pil_images.append(pil_img)
return pil_images
def main(args):
dtype = torch.bfloat16
# specify the path to the model
model_path = args.model_path
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(
model_path,
trust_remote_code=True,
torch_dtype=dtype
)
vl_gpt = vl_gpt.cuda().eval()
# multiple images conversation example
# Please note that <|grounding|> token is specifically designed for the grounded caption feature. It is not needed for normal conversations.
conversation = [
{
"role": "<|User|>",
"content": "<image>\n<image>\n<|grounding|>In the first image, an object within the red rectangle is marked. Locate the object of the same category in the second image.",
"images": [
"images/incontext_visual_grounding_1.jpeg",
"images/icl_vg_2.jpeg"
],
},
{"role": "<|Assistant|>", "content": ""},
]
# load images and prepare for inputs
pil_images = load_pil_images(conversation)
print(f"len(pil_images) = {len(pil_images)}")
prepare_inputs = vl_chat_processor.__call__(
conversations=conversation,
images=pil_images,
force_batchify=True,
system_prompt=""
).to(vl_gpt.device, dtype=dtype)
with torch.no_grad():
if args.chunk_size == -1:
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
past_key_values = None
else:
# incremental_prefilling when using 40G GPU for vl2-small
inputs_embeds, past_key_values = vl_gpt.incremental_prefilling(
input_ids=prepare_inputs.input_ids,
images=prepare_inputs.images,
images_seq_mask=prepare_inputs.images_seq_mask,
images_spatial_crop=prepare_inputs.images_spatial_crop,
attention_mask=prepare_inputs.attention_mask,
chunk_size=args.chunk_size
)
# run the model to get the response
outputs = vl_gpt.generate(
# inputs_embeds=inputs_embeds[:, -1:],
# input_ids=prepare_inputs.input_ids[:, -1:],
inputs_embeds=inputs_embeds,
input_ids=prepare_inputs.input_ids,
images=prepare_inputs.images,
images_seq_mask=prepare_inputs.images_seq_mask,
images_spatial_crop=prepare_inputs.images_spatial_crop,
attention_mask=prepare_inputs.attention_mask,
past_key_values=past_key_values,
pad_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=512,
# do_sample=False,
# repetition_penalty=1.1,
do_sample=True,
temperature=0.4,
top_p=0.9,
repetition_penalty=1.1,
use_cache=True,
)
answer = tokenizer.decode(outputs[0][len(prepare_inputs.input_ids[0]):].cpu().tolist(), skip_special_tokens=False)
print(f"{prepare_inputs['sft_format'][0]}", answer)
vg_image = parse_ref_bbox(answer, image=pil_images[-1])
if vg_image is not None:
vg_image.save("./vg.jpg", format="JPEG", quality=85)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--model_path", type=str, required=True,
default="deepseek-ai/deepseek-vl2",
help="model name or local path to the model")
parser.add_argument("--chunk_size", type=int, default=-1,
help="chunk size for the model for prefiiling. "
"When using 40G gpu for vl2-small, set a chunk_size for incremental_prefilling."
"Otherwise, default value is -1, which means we do not use incremental_prefilling.")
args = parser.parse_args()
main(args)
| 1,661
|
d0b895db8029e0f75d4dfb8314b7b9af39f7895db1086090d364ad43505e8f00
| 37.431138
| 182
| 0.624338
| 3.863937
| false
| false
| false
| false
|
trycua/cua
|
libs/computer/computer/interface/models.py
| 2,570
| 0
|
MIT License
|
from enum import Enum
from typing import Dict, List, Any, TypedDict, Union, Literal
# Navigation key literals
NavigationKey = Literal['pagedown', 'pageup', 'home', 'end', 'left', 'right', 'up', 'down']
# Special key literals
SpecialKey = Literal['enter', 'esc', 'tab', 'space', 'backspace', 'del']
# Function key literals
FunctionKey = Literal['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12']
class Key(Enum):
"""Keyboard keys that can be used with press_key.
These key names map to PyAutoGUI's expected key names.
"""
# Navigation
PAGE_DOWN = 'pagedown'
PAGE_UP = 'pageup'
HOME = 'home'
END = 'end'
LEFT = 'left'
RIGHT = 'right'
UP = 'up'
DOWN = 'down'
# Special keys
RETURN = 'enter'
ENTER = 'enter'
ESCAPE = 'esc'
ESC = 'esc'
TAB = 'tab'
SPACE = 'space'
BACKSPACE = 'backspace'
DELETE = 'del'
# Function keys
F1 = 'f1'
F2 = 'f2'
F3 = 'f3'
F4 = 'f4'
F5 = 'f5'
F6 = 'f6'
F7 = 'f7'
F8 = 'f8'
F9 = 'f9'
F10 = 'f10'
F11 = 'f11'
F12 = 'f12'
@classmethod
def from_string(cls, key: str) -> 'Key | str':
"""Convert a string key name to a Key enum value.
Args:
key: String key name to convert
Returns:
Key enum value if the string matches a known key,
otherwise returns the original string for single character keys
"""
# Map common alternative names to enum values
key_mapping = {
'page_down': cls.PAGE_DOWN,
'page down': cls.PAGE_DOWN,
'pagedown': cls.PAGE_DOWN,
'page_up': cls.PAGE_UP,
'page up': cls.PAGE_UP,
'pageup': cls.PAGE_UP,
'return': cls.RETURN,
'enter': cls.ENTER,
'escape': cls.ESCAPE,
'esc': cls.ESC,
'delete': cls.DELETE,
'del': cls.DELETE
}
normalized = key.lower().strip()
return key_mapping.get(normalized, key)
# Combined key type
KeyType = Union[Key, NavigationKey, SpecialKey, FunctionKey, str]
class AccessibilityWindow(TypedDict):
"""Information about a window in the accessibility tree."""
app_name: str
pid: int
frontmost: bool
has_windows: bool
windows: List[Dict[str, Any]]
class AccessibilityTree(TypedDict):
"""Complete accessibility tree information."""
success: bool
frontmost_application: str
windows: List[AccessibilityWindow]
| 752
|
8fe3c72508de4b012496bc474c9c9a60afee8f8065d5e34435b1b44815b3003b
| 25.505155
| 96
| 0.556031
| 3.417553
| false
| false
| false
| false
|
openai/openai-agents-python
|
tests/mcp/test_caching.py
| 1,928
| 0
|
MIT License
|
from unittest.mock import AsyncMock, patch
import pytest
from mcp.types import ListToolsResult, Tool as MCPTool
from agents.mcp import MCPServerStdio
from .helpers import DummyStreamsContextManager, tee
@pytest.mark.asyncio
@patch("mcp.client.stdio.stdio_client", return_value=DummyStreamsContextManager())
@patch("mcp.client.session.ClientSession.initialize", new_callable=AsyncMock, return_value=None)
@patch("mcp.client.session.ClientSession.list_tools")
async def test_server_caching_works(
mock_list_tools: AsyncMock, mock_initialize: AsyncMock, mock_stdio_client
):
"""Test that if we turn caching on, the list of tools is cached and not fetched from the server
on each call to `list_tools()`.
"""
server = MCPServerStdio(
params={
"command": tee,
},
cache_tools_list=True,
)
tools = [
MCPTool(name="tool1", inputSchema={}),
MCPTool(name="tool2", inputSchema={}),
]
mock_list_tools.return_value = ListToolsResult(tools=tools)
async with server:
# Call list_tools() multiple times
tools = await server.list_tools()
assert tools == tools
assert mock_list_tools.call_count == 1, "list_tools() should have been called once"
# Call list_tools() again, should return the cached value
tools = await server.list_tools()
assert tools == tools
assert mock_list_tools.call_count == 1, "list_tools() should not have been called again"
# Invalidate the cache and call list_tools() again
server.invalidate_tools_cache()
tools = await server.list_tools()
assert tools == tools
assert mock_list_tools.call_count == 2, "list_tools() should be called again"
# Without invalidating the cache, calling list_tools() again should return the cached value
tools = await server.list_tools()
assert tools == tools
| 493
|
483f30cf6277cec4b8738e70af0fa115600a1e1b78f8ee8f0f4fa797057ddff1
| 32.824561
| 99
| 0.673237
| 3.910751
| false
| true
| false
| false
|
browser-use/browser-use
|
browser_use/browser/tests/test_clicks.py
| 2,942
| 0
|
MIT License
|
import asyncio
import json
import pytest
from browser_use.browser.browser import Browser, BrowserConfig
from browser_use.dom.views import DOMBaseNode, DOMElementNode, DOMTextNode
from browser_use.utils import time_execution_sync
class ElementTreeSerializer:
@staticmethod
def dom_element_node_to_json(element_tree: DOMElementNode) -> dict:
def node_to_dict(node: DOMBaseNode) -> dict:
if isinstance(node, DOMTextNode):
return {'type': 'text', 'text': node.text}
elif isinstance(node, DOMElementNode):
return {
'type': 'element',
'tag_name': node.tag_name,
'attributes': node.attributes,
'highlight_index': node.highlight_index,
'children': [node_to_dict(child) for child in node.children],
}
return {}
return node_to_dict(element_tree)
# run with: pytest browser_use/browser/tests/test_clicks.py
@pytest.mark.asyncio
async def test_highlight_elements():
browser = Browser(config=BrowserConfig(headless=False, disable_security=True))
async with await browser.new_context() as context:
page = await context.get_current_page()
# await page.goto('https://immobilienscout24.de')
# await page.goto('https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/service-plans')
# await page.goto('https://google.com/search?q=elon+musk')
# await page.goto('https://kayak.com')
# await page.goto('https://www.w3schools.com/tags/tryit.asp?filename=tryhtml_iframe')
# await page.goto('https://dictionary.cambridge.org')
# await page.goto('https://github.com')
await page.goto('https://huggingface.co/')
await asyncio.sleep(1)
while True:
try:
# await asyncio.sleep(10)
state = await context.get_state()
with open('./tmp/page.json', 'w') as f:
json.dump(
ElementTreeSerializer.dom_element_node_to_json(state.element_tree),
f,
indent=1,
)
# await time_execution_sync('highlight_selector_map_elements')(
# browser.highlight_selector_map_elements
# )(state.selector_map)
# Find and print duplicate XPaths
xpath_counts = {}
if not state.selector_map:
continue
for selector in state.selector_map.values():
xpath = selector.xpath
if xpath in xpath_counts:
xpath_counts[xpath] += 1
else:
xpath_counts[xpath] = 1
print('\nDuplicate XPaths found:')
for xpath, count in xpath_counts.items():
if count > 1:
print(f'XPath: {xpath}')
print(f'Count: {count}\n')
print(list(state.selector_map.keys()), 'Selector map keys')
print(state.element_tree.clickable_elements_to_string())
action = input('Select next action: ')
await time_execution_sync('remove_highlight_elements')(context.remove_highlights)()
node_element = state.selector_map[int(action)]
# check if index of selector map are the same as index of items in dom_items
await context._click_element_node(node_element)
except Exception as e:
print(e)
| 915
|
b2bcfeab491a9051ad100449ab0374d187ae00672c83f6f152344f045c1a7cb8
| 30.297872
| 100
| 0.690687
| 3.215301
| false
| true
| false
| false
|
crestalnetwork/intentkit
|
skills/defillama/yields/fetch_pools.py
| 5,191
| 0
|
MIT License
|
"""Tool for fetching pool data via DeFi Llama API."""
from typing import Optional
from langchain.schema.runnable import RunnableConfig
from pydantic import BaseModel, Field
from skills.defillama.api import fetch_pools
from skills.defillama.base import DefiLlamaBaseTool
FETCH_POOLS_PROMPT = """
This tool fetches comprehensive data about yield-generating pools from DeFi Llama.
Returns data including:
- Pool details (chain, project, symbol)
- TVL and APY information
- Statistical metrics (mean, standard deviation)
- Risk assessments and predictions
- Historical performance data
"""
class PredictionData(BaseModel):
"""Model representing prediction data for a pool."""
predictedClass: Optional[str] = Field(
None, description="Predicted direction of APY movement"
)
predictedProbability: Optional[float] = Field(
None, description="Probability of the prediction"
)
binnedConfidence: Optional[int] = Field(None, description="Confidence level bucket")
class PoolData(BaseModel):
"""Model representing a single pool's data."""
chain: str = Field(..., description="Blockchain network")
project: str = Field(..., description="Protocol or project name")
symbol: str = Field(..., description="Token or pool symbol")
tvlUsd: float = Field(..., description="Total Value Locked in USD")
apyBase: Optional[float] = Field(None, description="Base APY without rewards")
apyReward: Optional[float] = Field(None, description="Additional APY from rewards")
apy: Optional[float] = Field(None, description="Total APY including rewards")
rewardTokens: Optional[list[str]] = Field(
None, description="List of reward token addresses"
)
pool: Optional[str] = Field(None, description="Pool identifier")
apyPct1D: Optional[float] = Field(None, description="1-day APY percentage change")
apyPct7D: Optional[float] = Field(None, description="7-day APY percentage change")
apyPct30D: Optional[float] = Field(None, description="30-day APY percentage change")
stablecoin: bool = Field(False, description="Whether pool involves stablecoins")
ilRisk: str = Field("no", description="Impermanent loss risk assessment")
exposure: str = Field("single", description="Asset exposure type")
predictions: Optional[PredictionData] = Field(
None, description="APY movement predictions"
)
poolMeta: Optional[str] = Field(None, description="Additional pool metadata")
mu: Optional[float] = Field(None, description="Mean APY value")
sigma: Optional[float] = Field(None, description="APY standard deviation")
count: Optional[int] = Field(None, description="Number of data points")
outlier: bool = Field(False, description="Whether pool is an outlier")
underlyingTokens: Optional[list[str]] = Field(
None, description="List of underlying token addresses"
)
il7d: Optional[float] = Field(None, description="7-day impermanent loss")
apyBase7d: Optional[float] = Field(None, description="7-day base APY")
apyMean30d: Optional[float] = Field(None, description="30-day mean APY")
volumeUsd1d: Optional[float] = Field(None, description="24h volume in USD")
volumeUsd7d: Optional[float] = Field(None, description="7-day volume in USD")
apyBaseInception: Optional[float] = Field(
None, description="Base APY since inception"
)
class FetchPoolsResponse(BaseModel):
"""Response schema for pool data."""
status: str = Field("success", description="Response status")
data: list[PoolData] = Field(default_factory=list, description="List of pool data")
error: Optional[str] = Field(None, description="Error message if any")
class DefiLlamaFetchPools(DefiLlamaBaseTool):
"""Tool for fetching pool data from DeFi Llama.
This tool retrieves comprehensive data about yield-generating pools,
including TVL, APYs, risk metrics, and predictions.
Example:
pools_tool = DefiLlamaFetchPools(
skill_store=store,
agent_id="agent_123",
agent_store=agent_store
)
result = await pools_tool._arun()
"""
name: str = "defillama_fetch_pools"
description: str = FETCH_POOLS_PROMPT
args_schema: None = None # No input parameters needed
async def _arun(self, config: RunnableConfig) -> FetchPoolsResponse:
"""Fetch pool data.
Returns:
FetchPoolsResponse containing pool data or error
"""
try:
# Check rate limiting
context = self.context_from_config(config)
is_rate_limited, error_msg = await self.check_rate_limit(context)
if is_rate_limited:
return FetchPoolsResponse(error=error_msg)
# Fetch pool data from API
result = await fetch_pools()
# Check for API errors
if isinstance(result, dict) and "error" in result:
return FetchPoolsResponse(error=result["error"])
# Return the response matching the API structure
return FetchPoolsResponse(**result)
except Exception as e:
return FetchPoolsResponse(error=str(e))
| 1,306
|
9f60a384fe6119cf337b889c19aefe6d106f3034bd03f9c281b48e36c5ec8e05
| 40.198413
| 88
| 0.690618
| 3.974732
| false
| false
| false
| false
|
xinnan-tech/xiaozhi-esp32-server
|
main/xiaozhi-server/plugins_func/functions/hass_get_state.py
| 1,766
| 0
|
MIT License
|
from plugins_func.register import register_function, ToolType, ActionResponse, Action
from plugins_func.functions.hass_init import initialize_hass_handler
from config.logger import setup_logging
import asyncio
import requests
TAG = __name__
logger = setup_logging()
hass_get_state_function_desc = {
"type": "function",
"function": {
"name": "hass_get_state",
"description": "获取homeassistant里设备的状态,包括灯光亮度,媒体播放器的音量,设备的暂停、继续操作",
"parameters": {
"type": "object",
"properties": {
"entity_id": {
"type": "string",
"description": "需要操作的设备id,homeassistant里的entity_id"
}
},
"required": ["entity_id"]
}
}
}
@register_function("hass_get_state", hass_get_state_function_desc, ToolType.SYSTEM_CTL)
def hass_get_state(conn, entity_id=''):
try:
future = asyncio.run_coroutine_threadsafe(
handle_hass_get_state(conn, entity_id),
conn.loop
)
ha_response = future.result()
return ActionResponse(action=Action.REQLLM, result="执行成功", response=ha_response)
except Exception as e:
logger.bind(tag=TAG).error(f"处理设置属性意图错误: {e}")
async def handle_hass_get_state(conn, entity_id):
HASS_CACHE = initialize_hass_handler(conn)
api_key = HASS_CACHE['api_key']
base_url = HASS_CACHE['base_url']
url = f"{base_url}/api/states/{entity_id}"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()['state']
else:
return f"切换失败,错误码: {response.status_code}"
| 566
|
85a99d7171e828943a77cfc394e17f982e1b9f0d7414861e747a6ed98005dfa5
| 30.535714
| 88
| 0.607022
| 3.120141
| false
| false
| false
| false
|
MadcowD/ell
|
x/openai_realtime/tests/test_mock.py
| 8,400
| 0
|
MIT License
|
# x/openai_realtime/tests/test_mock.py
import pytest
from unittest.mock import Mock, patch, AsyncMock
import numpy as np
import json
from openai_realtime.client import RealtimeClient
from openai_realtime.utils import RealtimeUtils
@pytest.fixture
def client():
client = RealtimeClient()
client.realtime = Mock()
client.conversation = Mock()
# Mock methods within realtime
client.realtime.connect = AsyncMock(return_value=True)
client.realtime.send = Mock()
client.realtime.disconnect = AsyncMock()
client.realtime.is_connected = Mock(return_value=True)
# Ensure that send returns a Mock that can have .get called on it
client.realtime.send.return_value = Mock(get=Mock(return_value=None))
# Mock methods within conversation
client.conversation.clear = Mock()
# Initialize other necessary attributes
client.input_audio_buffer = np.array([], dtype=np.int16)
# Ensure session_config is properly initialized
client._reset_config()
return client
def test_init(client):
assert isinstance(client, RealtimeClient)
assert client.session_created == False
assert client.tools == {}
assert client.session_config == client.default_session_config
def test_reset(client):
client.session_created = True
client.tools = {'test_tool': {}}
client.reset()
assert client.session_created == False
assert client.tools == {}
client.realtime.disconnect.assert_called_once()
client.conversation.clear.assert_called_once()
@pytest.mark.asyncio
async def test_connect(client):
await client.connect()
client.realtime.connect.assert_awaited_once()
expected_session = client.session_config.copy()
client.realtime.send.assert_called_once_with('session.update', {'session': expected_session})
def test_add_tool(client):
tool_definition = {'name': 'test_tool', 'description': 'A test tool'}
tool_handler = Mock()
client.add_tool(tool_definition, tool_handler)
assert 'test_tool' in client.tools
assert client.tools['test_tool']['definition'] == tool_definition
assert client.tools['test_tool']['handler'] == tool_handler
expected_session = client.session_config.copy()
expected_session['tools'] = [{
'name': 'test_tool',
'description': 'A test tool',
'type': 'function'
}]
client.realtime.send.assert_called_once_with('session.update', {'session': expected_session})
def test_remove_tool(client):
# Setup: Add a tool first
client.tools = {'test_tool': {'definition': {'name': 'test_tool', 'description': 'A test tool'}}}
# Remove the tool
client.remove_tool('test_tool')
# Assertions
assert 'test_tool' not in client.tools
# Ensure 'session.update' was NOT called automatically
client.realtime.send.assert_not_called()
# If session synchronization is needed, it should be done explicitly
# For example:
client.update_session()
expected_session = client.session_config.copy()
expected_session['tools'] = []
client.realtime.send.assert_called_once_with('session.update', {'session': expected_session})
def test_delete_item(client):
client.delete_item('item_id')
client.realtime.send.assert_called_once_with('conversation.item.delete', {'item_id': 'item_id'})
def test_update_session(client):
client.update_session(modalities=['text'])
assert client.session_config['modalities'] == ['text']
expected_session = client.session_config.copy()
client.realtime.send.assert_called_once_with('session.update', {'session': expected_session})
def test_send_user_message_content(client):
content = [{'type': 'text', 'text': 'Hello'}]
client.send_user_message_content(content)
expected_calls = [
('conversation.item.create', {
'item': {
'type': 'message',
'role': 'user',
'content': content
}
}),
('response.create',)
]
assert client.realtime.send.call_count == 2
client.realtime.send.assert_any_call('conversation.item.create', {
'item': {
'type': 'message',
'role': 'user',
'content': content
}
})
client.realtime.send.assert_any_call('response.create')
def test_append_input_audio(client):
audio_data = np.array([1, 2, 3], dtype=np.int16)
with patch.object(RealtimeUtils, 'array_buffer_to_base64', return_value='base64audio'):
client.append_input_audio(audio_data)
client.realtime.send.assert_called_once_with('input_audio_buffer.append', {
'audio': 'base64audio'
})
np.testing.assert_array_equal(client.input_audio_buffer, audio_data)
def test_create_response(client):
client.create_response()
client.realtime.send.assert_called_once_with('response.create')
def test_cancel_response(client):
client.cancel_response()
client.realtime.send.assert_called_once_with('response.cancel')
@pytest.mark.asyncio
async def test_wait_for_session_created(client):
client.realtime.is_connected.return_value = True
client.session_created = False
# Define a side effect that modifies client.session_created and accepts arguments
def set_session_created(*args, **kwargs):
client.session_created = True
with patch('openai_realtime.client.asyncio.sleep', new_callable=AsyncMock) as mock_sleep:
mock_sleep.side_effect = set_session_created
result = await client.wait_for_session_created()
assert result == True
mock_sleep.assert_awaited()
@pytest.mark.asyncio
async def test_wait_for_next_item(client):
client.wait_for_next = AsyncMock(return_value={'item': {'id': 'test_item'}})
result = await client.wait_for_next_item()
assert result == {'item': {'id': 'test_item'}}
client.wait_for_next.assert_awaited_once_with('conversation.item.appended')
@pytest.mark.asyncio
async def test_wait_for_next_completed_item(client):
client.wait_for_next = AsyncMock(return_value={'item': {'id': 'test_item', 'status': 'completed'}})
result = await client.wait_for_next_completed_item()
assert result == {'item': {'id': 'test_item', 'status': 'completed'}}
client.wait_for_next.assert_awaited_once_with('conversation.item.completed')
@pytest.mark.asyncio
async def test_call_tool(client):
tool_name = 'test_tool'
tool_arguments = '{"arg1": "value1"}'
tool_result = {'result': 'success'}
tool_handler_mock = AsyncMock(return_value=tool_result)
client.tools = {
tool_name: {
'handler': tool_handler_mock,
'definition': {'name': tool_name, 'description': 'A test tool'}
}
}
with patch('json.loads', return_value={'arg1': 'value1'}), \
patch('json.dumps', return_value=json.dumps(tool_result)):
await client._call_tool({'name': tool_name, 'arguments': tool_arguments, 'call_id': 'test_call_id'})
tool_handler_mock.assert_awaited_once_with({'arg1': 'value1'})
client.realtime.send.assert_any_call('conversation.item.create', {
'item': {
'type': 'function_call_output',
'call_id': 'test_call_id',
'output': json.dumps(tool_result)
}
})
client.realtime.send.assert_any_call('response.create')
@pytest.mark.asyncio
async def test_call_tool_error(client):
tool_name = 'test_tool'
tool_arguments = '{"arg1": "value1"}'
error_message = "Test error"
tool_handler_mock = AsyncMock(side_effect=Exception(error_message))
client.tools = {
tool_name: {
'handler': tool_handler_mock,
'definition': {'name': tool_name, 'description': 'A test tool'}
}
}
with patch('json.loads', return_value={'arg1': 'value1'}), \
patch('json.dumps', return_value='{"error": "Test error"}'):
await client._call_tool({'name': tool_name, 'arguments': tool_arguments, 'call_id': 'test_call_id'})
tool_handler_mock.assert_awaited_once_with({'arg1': 'value1'})
client.realtime.send.assert_any_call('conversation.item.create', {
'item': {
'type': 'function_call_output',
'call_id': 'test_call_id',
'output': '{"error": "Test error"}'
}
})
client.realtime.send.assert_any_call('response.create')
| 2,325
|
63ccb71d245fd86b3b221884783507eb92cce7f3d3e5fd311237177899d8a269
| 34.597458
| 108
| 0.651429
| 3.612903
| false
| true
| false
| false
|
MadcowD/ell
|
src/ell/stores/migrations/__init__.py
| 3,678
| 0
|
MIT License
|
from alembic import command
from alembic.config import Config
from sqlalchemy import inspect, text
from pathlib import Path
from sqlmodel import Session, SQLModel, create_engine, select
import logging
logger = logging.getLogger(__name__)
def get_alembic_config(engine_url: str) -> Config:
"""Create Alembic config programmatically"""
alembic_cfg = Config()
migrations_dir = Path(__file__).parent
alembic_cfg.set_main_option("script_location", str(migrations_dir))
alembic_cfg.set_main_option("sqlalchemy.url", str(engine_url))
alembic_cfg.set_main_option("version_table", "ell_alembic_version")
alembic_cfg.set_main_option("timezone", "UTC")
return alembic_cfg
def init_or_migrate_database(engine) -> None:
"""Initialize or migrate database with ELL schema
Handles three cases:
1. Existing database with our tables but no Alembic -> stamp with initial migration
2. Database with Alembic -> upgrade to head
3. New/empty database or database without our tables -> create tables and stamp with head
Args:
engine_or_url: SQLAlchemy engine or database URL string
"""
inspector = inspect(engine)
# Check database state
our_tables_v1 = {'serializedlmp', 'invocation', 'invocationcontents',
'invocationtrace', 'serializedlmpuses'}
our_tables_v2 = {'evaluationlabeler', 'evaluationresultdatapoint', 'evaluationrunlabelersummary', 'evaluationlabel'}
existing_tables = set(inspector.get_table_names())
has_our_tables = bool(our_tables_v1 & existing_tables) # Intersection
has_alembic = 'ell_alembic_version' in existing_tables
alembic_cfg = get_alembic_config(engine.url.render_as_string(hide_password=False))
try:
if has_our_tables and not has_alembic:
# Case 1: Existing database with our tables but no Alembic
# This is likely a database from version <= 0.14
logger.debug("Found existing tables but no Alembic - stamping with initial migration")
is_v1 = has_our_tables and not bool(our_tables_v2 & existing_tables)
command.stamp(alembic_cfg, "4524fb60d23e" if is_v1 else "head")
# Verify table was created
after_tables = set(inspect(engine).get_table_names())
logger.debug(f"Tables after stamp: {after_tables}")
if is_v1:
# Check if version table has our stamp
with engine.connect() as connection:
version_result = connection.execute(text("SELECT version_num FROM ell_alembic_version")).first()
if not version_result or version_result[0] != "4524fb60d23e":
raise RuntimeError("Failed to stamp database - version table empty or incorrect version")
logger.debug(f"Successfully stamped database with version {version_result[0]}")
has_alembic = True
if has_alembic:
# Case 2: Database has Alembic - run any pending migrations
logger.debug("Running any pending Alembic migrations")
command.upgrade(alembic_cfg, "head")
else:
# Case 3: New database or database without our tables
logger.debug("New database detected - creating schema and stamping with latest migration")
# Create all tables according to current schema
SQLModel.metadata.create_all(engine)
# Stamp with latest migration
command.stamp(alembic_cfg, "head")
except Exception as e:
logger.error(f"Failed to initialize/migrate database: {e}")
raise
| 871
|
82d01a646f706d3e703ce04de2c383acf6de2d0b355c050fe21eb78d08202ec0
| 43.853659
| 120
| 0.65416
| 4.222732
| false
| true
| false
| false
|
MadcowD/ell
|
docs/ramblings/notes_on_adapters.py
| 6,496
| 0
|
MIT License
|
# How do we want to handle model adaptation..
# I don't really want to maitnain some base registry and update it every damn time a new model comes out, but it's not clear how we specify providers otherwise.
# e.g.
@ell.simple(model="gpt-4-turbo", temperature=0.1)
def blah():
pass
# Even then Azure is bullshit and doesn't use the same model names as oai so we cant really even have a registry. I guess we could do best effort and occasionally updat ehte lbirary when new models come out?
@ell.simple(model="gpt-4-turbo", provider=AzureProvider, temperature=0.1)
def blah():
pass
# Do we make each provider implement several types of supported interfaces?
class Provider(abc.ABC):
def __init__
pass
# I mean OAI has basically set the standard for how all providers in teract.
class OAILikeProvider(abc.ABC):
# Also do we care about tracking embeddings?
# no not yet lol, but we clearly nee a generic invocation stack.
# We can just focus on text output models for now and revisit later if necessary.
# Wow that was ass
# Am I really going to expect my users to pass around the same 'client' class to all the models.. Also since this is inthe decorartor they'd have to define this client globally. I also want thigns to be mostly static; there's not really a reason to reinstantiate these providers. Except for changing the 'client'
# the only reaosn for the client is to enable switching between different model infra bakcends for oai lol rather than hard coding ours.
# we could also jsut adopt oai's philosophy on this and just use their cliejts as our providers class. but i hate the idea that i have to pass clients around all the time for different mdoe lclasses.
# this is very much a user decision and in fact you might even want to load balance (a la azure not implementing this..)
register('gpt-4-turbo', oai_client)
register('llama-70b-chat', groq_client)
# how to balance this with low barrirer to entry. env vars didnt' work last time.
# some amount of initialization of the library needs to happen at the beginning.
# this is a requirement in that while we could just default to oai models from oai and attempt to use the oai client on first invocation of an lmp
ell.init(
oai_client=...,
)
# okay so by default we will use the standard provider of a model if a model is 'standard' from a provider ie llama can get fucked but 4 turbo we'll go w oai
# we will try to initialize using the env vars for dany defauly provider but essentially we need to prvoide a runtime interface for a user to select which provider they want to use for a class of mdoels.
# im fine using my own fucking client..
# would there ever be a situation when the user wants to switch between clients for different lmps
# rate limits etc.
# i really didnt want to make this my job but now it's my job.
# ew
"""ell.set_provider(
models=ell.providers.OpenAI.models,
provider=ell.providers.Azure
)"""
# or...
# we could go entirely functional
# fuck conflict resolution
""ell.register('gpt-4-turbo', OpenAI.chat)
""
# inherently you just don't want to fuck around with
""blah(api_params=dict(client=my_openai_client))
""
# or even
with ell.use_client(my_openai_client): #<-- well maybe actually i like this
blah()
# or even
with_client(blah, openai)()
# it might be as simple as saying: "Just implement the oai standard and you're good to go.."
# should see how one atualyl invokes mystral etc.
# brb
"""
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
api_key = os.environ["MISTRAL_API_KEY"]
model = "mistral-large-latest"
client = MistralClient(api_key=api_key)
messages = [
ChatMessage(role="user", content="What is the best French cheese?")
]
# No streaming
chat_response = client.chat(
model=model,
messages=messages,
)
"""
"""from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
)
"""
# As much as I like the with_client framework, it does't actually relegate a certain model to a certain provider.
# We could get django about htis shit
class ProviderMeta():
from typing import List, Type
from ell.types.message import MessageOrDict
class ProviderMeta(type):
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs)
if hasattr(cls, 'models'):
cls.register_models(cls.models)
@staticmethod
def register_models(models):
for model in models:
print(f"Registering model: {model}")
class OAILikeProvider(Provider):
models = [
"gpt-4-turbo"
]
@staticmethod
def chat_completions(client, model, messages : List[MessageOrDict]):
client.chat.completions.create(
model=model,
messages=messages
)
OAIProvider = OAILikeProvider
# Ah so this is weird: We actually might have providers with different model classes that we want to specify for example, azure doesn't have defualt names for these models and they are in the user namespace... So literally when we want to use an azure provier we have 'isntantiate it'. That's fucking annoying lol.
# For example we'd actually want the user to be able to easily switch to gpt-4-turbo without changing all their lmp code.
AzureProvider(
model_map = {
oai.GPT4Turbo: "ell-production-canada-west-gpt4-turbo"
}
)
# and azure is so fucked that i'm pretty sure you need to specify different clients for different regions..
# :(
# just adopt the oai standard :\ please. this hurts.
# Like the model map is per client. So now we can't even disambiguate providers and model maps.
# Mistral had to be special didn';t it;;
class MistralProvider(Provider):
models = [
"some trash model"
]
def chat(client, model, message):
chat_response = client.chat(
model=model,
messages=messages,
)
# then we handle conflict resolution:
# "Two providers have registered this mdoel, you msut specify a provider."
# Basically we don't handle the client ptoblem but we do handle the code mismatch problem
# So really it'll also be per model.
| 1,813
|
1e68421cc82666da1c88ae3271149faf0c0fd90b538ae5fca36c4e823720a2bb
| 30.386473
| 316
| 0.716441
| 3.583012
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/providers/remote/inference/nvidia/config.py
| 2,012
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import os
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, SecretStr
from llama_stack.schema_utils import json_schema_type
@json_schema_type
class NVIDIAConfig(BaseModel):
"""
Configuration for the NVIDIA NIM inference endpoint.
Attributes:
url (str): A base url for accessing the NVIDIA NIM, e.g. http://localhost:8000
api_key (str): The access key for the hosted NIM endpoints
There are two ways to access NVIDIA NIMs -
0. Hosted: Preview APIs hosted at https://integrate.api.nvidia.com
1. Self-hosted: You can run NVIDIA NIMs on your own infrastructure
By default the configuration is set to use the hosted APIs. This requires
an API key which can be obtained from https://ngc.nvidia.com/.
By default the configuration will attempt to read the NVIDIA_API_KEY environment
variable to set the api_key. Please do not put your API key in code.
If you are using a self-hosted NVIDIA NIM, you can set the url to the
URL of your running NVIDIA NIM and do not need to set the api_key.
"""
url: str = Field(
default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com"),
description="A base url for accessing the NVIDIA NIM",
)
api_key: Optional[SecretStr] = Field(
default_factory=lambda: os.getenv("NVIDIA_API_KEY"),
description="The NVIDIA API key, only needed of using the hosted service",
)
timeout: int = Field(
default=60,
description="Timeout for the HTTP requests",
)
@classmethod
def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
return {
"url": "${env.NVIDIA_BASE_URL:https://integrate.api.nvidia.com}",
"api_key": "${env.NVIDIA_API_KEY:}",
}
| 550
|
63d8860697cb44619ee4804ebafaca905649a674f55b4dbfa7c0fb9ed1fdd1cd
| 34.928571
| 97
| 0.681412
| 3.658182
| false
| true
| false
| false
|
microsoft/markitdown
|
packages/markitdown-sample-plugin/src/markitdown_sample_plugin/_plugin.py
| 1,824
| 0
|
MIT License
|
import locale
from typing import BinaryIO, Any
from striprtf.striprtf import rtf_to_text
from markitdown import (
MarkItDown,
DocumentConverter,
DocumentConverterResult,
StreamInfo,
)
__plugin_interface_version__ = (
1 # The version of the plugin interface that this plugin uses
)
ACCEPTED_MIME_TYPE_PREFIXES = [
"text/rtf",
"application/rtf",
]
ACCEPTED_FILE_EXTENSIONS = [".rtf"]
def register_converters(markitdown: MarkItDown, **kwargs):
"""
Called during construction of MarkItDown instances to register converters provided by plugins.
"""
# Simply create and attach an RtfConverter instance
markitdown.register_converter(RtfConverter())
class RtfConverter(DocumentConverter):
"""
Converts an RTF file to in the simplest possible way.
"""
def accepts(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any,
) -> bool:
mimetype = (stream_info.mimetype or "").lower()
extension = (stream_info.extension or "").lower()
if extension in ACCEPTED_FILE_EXTENSIONS:
return True
for prefix in ACCEPTED_MIME_TYPE_PREFIXES:
if mimetype.startswith(prefix):
return True
return False
def convert(
self,
file_stream: BinaryIO,
stream_info: StreamInfo,
**kwargs: Any,
) -> DocumentConverterResult:
# Read the file stream into an str using hte provided charset encoding, or using the system default
encoding = stream_info.charset or locale.getpreferredencoding()
stream_data = file_stream.read().decode(encoding)
# Return the result
return DocumentConverterResult(
title=None,
markdown=rtf_to_text(stream_data),
)
| 439
|
4fd3c8aceacdf8c76cec1c9c2b41da3c8ac8abfada1897760089c7191a8255c6
| 24.690141
| 107
| 0.645833
| 4.154897
| false
| false
| false
| false
|
browser-use/browser-use
|
examples/features/restrict_urls.py
| 954
| 0
|
MIT License
|
import os
import sys
from langchain_openai import ChatOpenAI
from browser_use.browser.context import BrowserContextConfig
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import asyncio
from browser_use import Agent
from browser_use.browser.browser import Browser, BrowserConfig
llm = ChatOpenAI(model='gpt-4o', temperature=0.0)
task = (
"go to google.com and search for openai.com and click on the first link then extract content and scroll down - what's there?"
)
allowed_domains = ['google.com']
browser = Browser(
config=BrowserConfig(
browser_binary_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
new_context_config=BrowserContextConfig(
allowed_domains=allowed_domains,
),
),
)
agent = Agent(
task=task,
llm=llm,
browser=browser,
)
async def main():
await agent.run(max_steps=25)
input('Press Enter to close the browser...')
await browser.close()
asyncio.run(main())
| 299
|
95283bb8bfd9d1fc02f586b25166b2feae7742836ef26933843abb1382e59dc3
| 20.681818
| 126
| 0.750524
| 3.190635
| false
| true
| false
| false
|
BrainBlend-AI/atomic-agents
|
atomic-examples/deep-research/deep_research/agents/qa_agent.py
| 3,151
| 0
|
MIT License
|
import instructor
import openai
from pydantic import Field
from atomic_agents.agents.base_agent import BaseIOSchema, BaseAgent, BaseAgentConfig
from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator
from deep_research.config import ChatConfig
class QuestionAnsweringAgentInputSchema(BaseIOSchema):
"""This is the input schema for the QuestionAnsweringAgent."""
question: str = Field(..., description="The question to answer.")
class QuestionAnsweringAgentOutputSchema(BaseIOSchema):
"""This is the output schema for the QuestionAnsweringAgent."""
answer: str = Field(..., description="The answer to the question.")
follow_up_questions: list[str] = Field(
...,
description=(
"Specific questions about the topic that would help the user learn more details about the subject matter. "
"For example, if discussing a Nobel Prize winner, suggest questions about their research, impact, or "
"related scientific concepts."
),
)
question_answering_agent = BaseAgent(
BaseAgentConfig(
client=instructor.from_openai(openai.OpenAI(api_key=ChatConfig.api_key)),
model=ChatConfig.model,
system_prompt_generator=SystemPromptGenerator(
background=[
"You are an expert question answering agent focused on providing factual information and encouraging deeper topic exploration.",
"For general greetings or non-research questions, provide relevant questions about the system's capabilities and research functions.",
],
steps=[
"Analyze the question and identify the core topic",
"Answer the question using available information",
"For topic-specific questions, generate follow-up questions that explore deeper aspects of the same topic",
"For general queries about the system, suggest questions about research capabilities and functionality",
],
output_instructions=[
"Answer in a direct, informative manner",
"NEVER generate generic conversational follow-ups like 'How are you?' or 'What would you like to know?'",
"For topic questions, follow-up questions MUST be about specific aspects of that topic",
"For system queries, follow-up questions should be about specific research capabilities",
"Example good follow-ups for a Nobel Prize question:",
"- What specific discoveries led to their Nobel Prize?",
"- How has their research influenced their field?",
"- What other scientists collaborated on this research?",
"Example good follow-ups for system queries:",
"- What types of sources do you use for research?",
"- How do you verify information accuracy?",
"- What are the limitations of your search capabilities?",
],
),
input_schema=QuestionAnsweringAgentInputSchema,
output_schema=QuestionAnsweringAgentOutputSchema,
)
)
| 645
|
ed2cb8909ce5045a8ace247a7778fe7d9eb6e53449908e38ef6b3a23b7d4b543
| 49.015873
| 150
| 0.669629
| 4.885271
| false
| true
| false
| false
|
crestalnetwork/intentkit
|
app/services/tg/bot/kind/ai_relayer/router.py
| 6,354
| 0
|
MIT License
|
import inspect
import logging
import telegramify_markdown
from aiogram import Router
from aiogram.filters import Command, CommandStart
from aiogram.types import Message
from epyxid import XID
from app.core.client import execute_agent
from app.services.tg.bot import pool
from app.services.tg.bot.filter.chat_type import GroupOnlyFilter
from app.services.tg.bot.filter.content_type import TextOnlyFilter
from app.services.tg.bot.filter.id import WhitelistedChatIDsFilter
from app.services.tg.bot.filter.no_bot import NoBotFilter
from app.services.tg.utils.cleanup import remove_bot_name
from models.chat import AuthorType, ChatMessageCreate
from utils.slack_alert import send_slack_message
logger = logging.getLogger(__name__)
def cur_func_name():
return inspect.stack()[1][3]
def cur_mod_name():
return inspect.getmodule(inspect.stack()[1][0]).__name__
general_router = Router()
@general_router.message(Command("chat_id"), NoBotFilter(), TextOnlyFilter())
async def command_chat_id(message: Message) -> None:
try:
await message.answer(text=str(message.chat.id))
except Exception as e:
logger.warning(
f"error processing in function:{cur_func_name()}, token:{message.bot.token} err: {str(e)}"
)
## group commands and messages
@general_router.message(
CommandStart(),
NoBotFilter(),
WhitelistedChatIDsFilter(),
GroupOnlyFilter(),
TextOnlyFilter(),
)
async def gp_command_start(message: Message):
try:
cached_bot_item = pool.bot_by_token(message.bot.token)
await message.answer(text=cached_bot_item.greeting_group)
except Exception as e:
logger.warning(
f"error processing in function:{cur_func_name()}, token:{message.bot.token} err: {str(e)}"
)
@general_router.message(
WhitelistedChatIDsFilter(), NoBotFilter(), GroupOnlyFilter(), TextOnlyFilter()
)
async def gp_process_message(message: Message) -> None:
bot = await message.bot.get_me()
if (
message.reply_to_message
and message.reply_to_message.from_user.id == message.bot.id
) or bot.username in message.text:
cached_bot_item = pool.bot_by_token(message.bot.token)
if cached_bot_item is None:
logger.warning(f"bot with token {message.bot.token} not found in cache.")
return
try:
# remove bot name tag from text
message_text = remove_bot_name(bot.username, message.text)
if len(message_text) > 65535:
send_slack_message(
(
"Message too long from telegram.\n"
f"length: {len(message_text)}\n"
f"chat_id:{message.chat.id}\n"
f"agent:{cached_bot_item.agent_id}\n"
f"user:{message.from_user.id}\n"
f"content:{message_text[:100]}..."
)
)
input = ChatMessageCreate(
id=str(XID()),
agent_id=cached_bot_item.agent_id,
chat_id=pool.agent_chat_id(
cached_bot_item.is_public_memory, message.chat.id
),
user_id=str(message.from_user.id),
author_id=str(message.from_user.id),
author_type=AuthorType.TELEGRAM,
thread_type=AuthorType.TELEGRAM,
message=message_text,
)
response = await execute_agent(input)
await message.answer(
text=telegramify_markdown.markdownify(
response[-1].message if response else "Server Error"
),
parse_mode="MarkdownV2",
reply_to_message_id=message.message_id,
)
except Exception as e:
logger.warning(
f"error processing in function:{cur_func_name()}, token:{message.bot.token}, err={str(e)}"
)
await message.answer(
text="Server Error", reply_to_message_id=message.message_id
)
## direct commands and messages
@general_router.message(
CommandStart(), NoBotFilter(), WhitelistedChatIDsFilter(), TextOnlyFilter()
)
async def command_start(message: Message) -> None:
try:
cached_bot_item = pool.bot_by_token(message.bot.token)
await message.answer(text=cached_bot_item.greeting_user)
except Exception as e:
logger.warning(
f"error processing in function:{cur_func_name()}, token:{message.bot.token} err: {str(e)}"
)
@general_router.message(
TextOnlyFilter(),
NoBotFilter(),
WhitelistedChatIDsFilter(),
)
async def process_message(message: Message) -> None:
cached_bot_item = pool.bot_by_token(message.bot.token)
if cached_bot_item is None:
logger.warning(f"bot with token {message.bot.token} not found in cache.")
return
if len(message.text) > 65535:
send_slack_message(
(
"Message too long from telegram.\n"
f"length: {len(message.text)}\n"
f"chat_id:{message.chat.id}\n"
f"agent:{cached_bot_item.agent_id}\n"
f"user:{message.from_user.id}\n"
f"content:{message.text[:100]}..."
)
)
try:
input = ChatMessageCreate(
id=str(XID()),
agent_id=cached_bot_item.agent_id,
chat_id=pool.agent_chat_id(False, message.chat.id),
user_id=str(message.from_user.id),
author_id=str(message.from_user.id),
author_type=AuthorType.TELEGRAM,
thread_type=AuthorType.TELEGRAM,
message=message.text,
)
response = await execute_agent(input)
await message.answer(
text=telegramify_markdown.markdownify(
response[-1].message if response else "Server Error"
),
parse_mode="MarkdownV2",
reply_to_message_id=message.message_id,
)
except Exception as e:
logger.warning(
f"error processing in function:{cur_func_name()}, token:{message.bot.token} err:{str(e)}"
)
await message.answer(
text="Server Error", reply_to_message_id=message.message_id
)
| 1,668
|
92f35730c12e33a0cd5fb428321da99b4a1d4e324ba591fe0fd35fc616be2907
| 33.16129
| 106
| 0.597891
| 3.809353
| false
| false
| false
| false
|
openai/openai-agents-python
|
examples/agent_patterns/output_guardrails.py
| 2,354
| 0
|
MIT License
|
from __future__ import annotations
import asyncio
import json
from pydantic import BaseModel, Field
from agents import (
Agent,
GuardrailFunctionOutput,
OutputGuardrailTripwireTriggered,
RunContextWrapper,
Runner,
output_guardrail,
)
"""
This example shows how to use output guardrails.
Output guardrails are checks that run on the final output of an agent.
They can be used to do things like:
- Check if the output contains sensitive data
- Check if the output is a valid response to the user's message
In this example, we'll use a (contrived) example where we check if the agent's response contains
a phone number.
"""
# The agent's output type
class MessageOutput(BaseModel):
reasoning: str = Field(description="Thoughts on how to respond to the user's message")
response: str = Field(description="The response to the user's message")
user_name: str | None = Field(description="The name of the user who sent the message, if known")
@output_guardrail
async def sensitive_data_check(
context: RunContextWrapper, agent: Agent, output: MessageOutput
) -> GuardrailFunctionOutput:
phone_number_in_response = "650" in output.response
phone_number_in_reasoning = "650" in output.reasoning
return GuardrailFunctionOutput(
output_info={
"phone_number_in_response": phone_number_in_response,
"phone_number_in_reasoning": phone_number_in_reasoning,
},
tripwire_triggered=phone_number_in_response or phone_number_in_reasoning,
)
agent = Agent(
name="Assistant",
instructions="You are a helpful assistant.",
output_type=MessageOutput,
output_guardrails=[sensitive_data_check],
)
async def main():
# This should be ok
await Runner.run(agent, "What's the capital of California?")
print("First message passed")
# This should trip the guardrail
try:
result = await Runner.run(
agent, "My phone number is 650-123-4567. Where do you think I live?"
)
print(
f"Guardrail didn't trip - this is unexpected. Output: {json.dumps(result.final_output.model_dump(), indent=2)}"
)
except OutputGuardrailTripwireTriggered as e:
print(f"Guardrail tripped. Info: {e.guardrail_result.output.output_info}")
if __name__ == "__main__":
asyncio.run(main())
| 638
|
7a77c58ce62aa9773d0405d42e9a5df05c075f8f8533c287604d8c77be6f9cee
| 28.425
| 123
| 0.696262
| 3.689655
| false
| false
| false
| false
|
MadcowD/ell
|
examples/evals/vibes.py
| 703
| 0
|
MIT License
|
import ell
from pydantic import BaseModel
class TweetInput(BaseModel):
input: str
@ell.simple(model="gpt-4o")
def tweet(obj: TweetInput):
print(obj)
return f"Write a tweet like roon in lower case about {obj.input}"
dataset = [
{"input": [TweetInput(input="Polymath")]},
{"input": [TweetInput(input="Dogs")]},
{"input": [TweetInput(input="Intelligenve")]},
]
# # No metrics. We will iterate on by just looking at the output/
eval = ell.evaluation.Evaluation(
name="vibes",
dataset=dataset,
criterion=lambda datapoint, output: "roon" in output.lower(),
)
if __name__ == "__main__":
ell.init(store="./logdir", verbose=True)
eval.run(tweet)
# tweet("hi")
| 226
|
5bd70ca8d6be6d3b54ffd89a49d769ef888e87f793e96b62562ef03f2ddf9ea4
| 21.677419
| 69
| 0.654339
| 3.110619
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/cli/verify_download.py
| 4,620
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
import hashlib
from dataclasses import dataclass
from functools import partial
from pathlib import Path
from typing import Dict, List, Optional
from rich.console import Console
from rich.progress import Progress, SpinnerColumn, TextColumn
from llama_stack.cli.subcommand import Subcommand
@dataclass
class VerificationResult:
filename: str
expected_hash: str
actual_hash: Optional[str]
exists: bool
matches: bool
class VerifyDownload(Subcommand):
"""Llama cli for verifying downloaded model files"""
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"verify-download",
prog="llama verify-download",
description="Verify integrity of downloaded model files",
formatter_class=argparse.RawTextHelpFormatter,
)
setup_verify_download_parser(self.parser)
def setup_verify_download_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--model-id",
required=True,
help="Model ID to verify (only for models downloaded from Meta)",
)
parser.set_defaults(func=partial(run_verify_cmd, parser=parser))
def calculate_md5(filepath: Path, chunk_size: int = 8192) -> str:
# NOTE: MD5 is used here only for download integrity verification,
# not for security purposes
# TODO: switch to SHA256
md5_hash = hashlib.md5(usedforsecurity=False)
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(chunk_size), b""):
md5_hash.update(chunk)
return md5_hash.hexdigest()
def load_checksums(checklist_path: Path) -> Dict[str, str]:
checksums = {}
with open(checklist_path, "r") as f:
for line in f:
if line.strip():
md5sum, filepath = line.strip().split(" ", 1)
# Remove leading './' if present
filepath = filepath.lstrip("./")
checksums[filepath] = md5sum
return checksums
def verify_files(model_dir: Path, checksums: Dict[str, str], console: Console) -> List[VerificationResult]:
results = []
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
console=console,
) as progress:
for filepath, expected_hash in checksums.items():
full_path = model_dir / filepath
task_id = progress.add_task(f"Verifying {filepath}...", total=None)
exists = full_path.exists()
actual_hash = None
matches = False
if exists:
actual_hash = calculate_md5(full_path)
matches = actual_hash == expected_hash
results.append(
VerificationResult(
filename=filepath,
expected_hash=expected_hash,
actual_hash=actual_hash,
exists=exists,
matches=matches,
)
)
progress.remove_task(task_id)
return results
def run_verify_cmd(args: argparse.Namespace, parser: argparse.ArgumentParser):
from llama_stack.distribution.utils.model_utils import model_local_dir
console = Console()
model_dir = Path(model_local_dir(args.model_id))
checklist_path = model_dir / "checklist.chk"
if not model_dir.exists():
parser.error(f"Model directory not found: {model_dir}")
if not checklist_path.exists():
parser.error(f"Checklist file not found: {checklist_path}")
checksums = load_checksums(checklist_path)
results = verify_files(model_dir, checksums, console)
# Print results
console.print("\nVerification Results:")
all_good = True
for result in results:
if not result.exists:
console.print(f"[red]❌ {result.filename}: File not found[/red]")
all_good = False
elif not result.matches:
console.print(
f"[red]❌ {result.filename}: Hash mismatch[/red]\n"
f" Expected: {result.expected_hash}\n"
f" Got: {result.actual_hash}"
)
all_good = False
else:
console.print(f"[green]✓ {result.filename}: Verified[/green]")
if all_good:
console.print("\n[green]All files verified successfully![/green]")
| 1,130
|
ec7457e5eb1d6ad268b62bdb1e5d7c7cb478efde7899018bd92ac51eb0157433
| 30.862069
| 107
| 0.619264
| 4.088496
| false
| false
| false
| false
|
circlemind-ai/fast-graphrag
|
fast_graphrag/_policies/_base.py
| 2,627
| 0
|
MIT License
|
from dataclasses import dataclass, field
from typing import Any, Generic, Iterable, Tuple, Type
from scipy.sparse import csr_matrix
from fast_graphrag._llm._llm_openai import BaseLLMService
from fast_graphrag._storage._base import BaseGraphStorage
from fast_graphrag._types import GTEdge, GTId, GTNode, TIndex
@dataclass
class BasePolicy:
config: Any = field()
####################################################################################################
# GRAPH UPSERT POLICIES
####################################################################################################
@dataclass
class BaseNodeUpsertPolicy(BasePolicy, Generic[GTNode, GTId]):
async def __call__(
self, llm: BaseLLMService, target: BaseGraphStorage[GTNode, GTEdge, GTId], source_nodes: Iterable[GTNode]
) -> Tuple[BaseGraphStorage[GTNode, GTEdge, GTId], Iterable[Tuple[TIndex, GTNode]]]:
raise NotImplementedError
@dataclass
class BaseEdgeUpsertPolicy(BasePolicy, Generic[GTEdge, GTId]):
async def __call__(
self, llm: BaseLLMService, target: BaseGraphStorage[GTNode, GTEdge, GTId], source_edges: Iterable[GTEdge]
) -> Tuple[BaseGraphStorage[GTNode, GTEdge, GTId], Iterable[Tuple[TIndex, GTEdge]]]:
raise NotImplementedError
@dataclass
class BaseGraphUpsertPolicy(BasePolicy, Generic[GTNode, GTEdge, GTId]):
nodes_upsert_cls: Type[BaseNodeUpsertPolicy[GTNode, GTId]] = field()
edges_upsert_cls: Type[BaseEdgeUpsertPolicy[GTEdge, GTId]] = field()
_nodes_upsert: BaseNodeUpsertPolicy[GTNode, GTId] = field(init=False)
_edges_upsert: BaseEdgeUpsertPolicy[GTEdge, GTId] = field(init=False)
def __post_init__(self):
self._nodes_upsert = self.nodes_upsert_cls(self.config)
self._edges_upsert = self.edges_upsert_cls(self.config)
async def __call__(
self,
llm: BaseLLMService,
target: BaseGraphStorage[GTNode, GTEdge, GTId],
source_nodes: Iterable[GTNode],
source_edges: Iterable[GTEdge],
) -> Tuple[
BaseGraphStorage[GTNode, GTEdge, GTId],
Iterable[Tuple[TIndex, GTNode]],
Iterable[Tuple[TIndex, GTEdge]],
]:
raise NotImplementedError
####################################################################################################
# RANKING POLICIES
####################################################################################################
class BaseRankingPolicy(BasePolicy):
def __call__(self, scores: csr_matrix) -> csr_matrix:
assert scores.shape[0] == 1, "Ranking policies only supports batch size of 1"
return scores
| 714
|
93e0e95789ccc2d5e46563a65e4a99595087cfcecb9a761eacaedea1190d180a
| 36.528571
| 113
| 0.596117
| 3.679272
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama4/model.py
| 16,451
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import fairscale.nn.model_parallel.initialize as fs_init
import torch
import torch.nn.functional as F
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
VocabParallelEmbedding,
)
from torch import nn
from .args import ModelArgs
from .datatypes import TransformerInput, TransformerOutput
from .ffn import FeedForward
from .moe import MoE
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float()).type_as(x)
return output * self.weight
class L2Norm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
return self._norm(x.float()).type_as(x)
def apply_scaling(freqs: torch.Tensor):
# Values obtained from grid search
scale_factor = 8
low_freq_factor = 1
high_freq_factor = 4
old_context_len = 8192 # original llama3 length
low_freq_wavelen = old_context_len / low_freq_factor
high_freq_wavelen = old_context_len / high_freq_factor
new_freqs = []
for freq in freqs:
wavelen = 2 * math.pi / freq
if wavelen < high_freq_wavelen:
new_freqs.append(freq)
elif wavelen > low_freq_wavelen:
new_freqs.append(freq / scale_factor)
else:
assert low_freq_wavelen != high_freq_wavelen
smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
new_freqs.append((1 - smooth) * freq / scale_factor + smooth * freq)
return torch.tensor(new_freqs, dtype=freqs.dtype, device=freqs.device)
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0, use_scaled: bool = False):
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device, dtype=torch.float32)
if use_scaled:
freqs = apply_scaling(freqs)
freqs = torch.outer(t, freqs)
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
return freqs_cis
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
ndim = x.ndim
assert 0 <= 1 < ndim
assert freqs_cis.shape == (x.shape[1], x.shape[-1])
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
return freqs_cis.view(*shape)
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
return xq_out.type_as(xq), xk_out.type_as(xk)
class Attention(nn.Module):
# TODO: this module needs to be moved into a separate file since it can be used by
# the vision encoder as well.
def __init__(
self,
args: ModelArgs,
use_qk_norm: bool,
use_rope: bool,
add_bias: bool = False,
):
super().__init__()
self.use_rope = use_rope
self.use_qk_norm = use_qk_norm
# For attention temperature tuning
self.attn_temperature_tuning = args.attn_temperature_tuning
self.floor_scale = args.floor_scale
self.attn_scale = args.attn_scale
self.n_heads = args.n_heads
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
world_size = fs_init.get_model_parallel_world_size()
self.n_local_heads = args.n_heads // world_size
self.n_local_kv_heads = self.n_kv_heads // world_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = ColumnParallelLinear(
args.dim,
args.n_heads * self.head_dim,
bias=add_bias,
gather_output=False,
init_method=lambda x: x,
)
self.wk = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=add_bias,
gather_output=False,
init_method=lambda x: x,
)
self.wv = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=add_bias,
gather_output=False,
init_method=lambda x: x,
)
self.wo = RowParallelLinear(
args.n_heads * self.head_dim,
args.dim,
bias=add_bias,
input_is_parallel=True,
init_method=lambda x: x,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.qk_norm = None
if self.use_qk_norm:
self.qk_norm = L2Norm(args.norm_eps)
self._register_load_state_dict_pre_hook(self.load_hook)
def load_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Dict[str, Any],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
if prefix + "wqkv.weight" in state_dict:
wqkv = state_dict.pop(prefix + "wqkv.weight")
d, r = divmod(wqkv.shape[0], self.n_heads + 2 * self.n_kv_heads)
if r != 0:
raise ValueError(
f"shape={tuple(wqkv.shape)} is not divisible by "
f"n_heads ({self.n_heads}) + 2 * n_kv_heads ({self.n_kv_heads})"
)
wq, wk, wv = wqkv.split([d * self.n_heads, d * self.n_kv_heads, d * self.n_kv_heads], dim=0)
state_dict[prefix + "wq.weight"] = wq
state_dict[prefix + "wk.weight"] = wk
state_dict[prefix + "wv.weight"] = wv
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor] = None,
):
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
if self.use_rope:
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
if self.use_qk_norm:
xq = self.qk_norm(xq)
xk = self.qk_norm(xk)
# We are applying temperature tuning (https://arxiv.org/abs/2501.19399) to NoPE layers, where
# the inference-time temperature tuning function is customized to not affect short context
# while working at very long context
if self.attn_temperature_tuning and not self.use_rope:
seq_positions = torch.arange(start_pos, start_pos + seqlen, device=xq.device, dtype=torch.float32)
attn_scales = torch.log(torch.floor((seq_positions + 1.0) / self.floor_scale) + 1.0) * self.attn_scale + 1.0
# reshape for broadcasting [seqlen] -> [1, seqlen, 1, 1]
attn_scales = attn_scales.view(1, seqlen, 1, 1)
xq = xq * attn_scales
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
xk = self.cache_k[:bsz, : start_pos + seqlen]
xv = self.cache_v[:bsz, : start_pos + seqlen]
xq, xk, xv = [t.transpose(1, 2) for t in (xq, xk, xv)]
xk = xk.repeat_interleave(self.n_rep, dim=1)
xv = xv.repeat_interleave(self.n_rep, dim=1)
attn_output = F.scaled_dot_product_attention(xq, xk, xv, attn_mask=mask, dropout_p=0.0)
attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
output = self.wo(attn_output)
return output
class TransformerBlock(nn.Module):
def __init__(self, layer_id: int, args: ModelArgs):
super().__init__()
self.n_heads = args.n_heads
self.dim = args.dim
self.head_dim = args.dim // args.n_heads if args.head_dim is None else args.head_dim
self.is_nope_layer = args.nope_layer_interval is not None and (layer_id + 1) % args.nope_layer_interval == 0
use_rope = not self.is_nope_layer
use_qk_norm = args.use_qk_norm and not self.is_nope_layer
self.attention = Attention(args, use_rope=use_rope, use_qk_norm=use_qk_norm)
if args.moe_args and (layer_id + 1) % args.moe_args.interleave_moe_layer_step == 0:
self.feed_forward = MoE(
dim=args.dim,
hidden_dim=int(args.ffn_exp * args.dim),
ffn_dim_multiplier=args.ffn_dim_multiplier,
multiple_of=args.multiple_of,
moe_args=args.moe_args,
)
else:
hidden_dim = int(4 * args.dim)
hidden_dim = int(2 * hidden_dim / 3)
if args.ffn_dim_multiplier is not None:
hidden_dim = int(args.ffn_dim_multiplier * hidden_dim)
hidden_dim = args.multiple_of * ((hidden_dim + args.multiple_of - 1) // args.multiple_of)
self.feed_forward = FeedForward(
dim=args.dim,
hidden_dim=hidden_dim,
)
self.layer_id = layer_id
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
self._register_load_state_dict_pre_hook(self.load_hook)
def load_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Dict[str, Any],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
if prefix + "attention.wqkv.layer_norm_weight" in state_dict:
state_dict[prefix + "attention_norm.weight"] = state_dict.pop(prefix + "attention.wqkv.layer_norm_weight")
if prefix + "feed_forward.mlp.layer_norm_weight" in state_dict:
state_dict[prefix + "ffn_norm.weight"] = state_dict.pop(prefix + "feed_forward.mlp.layer_norm_weight")
elif prefix + "feed_forward.norm.weight" in state_dict:
state_dict[prefix + "ffn_norm.weight"] = state_dict.pop(prefix + "feed_forward.norm.weight")
for k in (
"feed_forward.experts.mlp",
"feed_forward.mlp_shared",
"attention.wo",
"attention.wqkv",
):
if prefix + k + "._extra_state" in state_dict:
state_dict.pop(prefix + k + "._extra_state")
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
global_attn_mask: Optional[torch.Tensor],
local_attn_mask: Optional[torch.Tensor],
):
# The iRoPE architecture uses global attention mask for NoPE layers or
# if chunked local attention is not used
if self.is_nope_layer or local_attn_mask is None:
mask = global_attn_mask
else:
mask = local_attn_mask
h = x + self.attention(self.attention_norm(x), start_pos, freqs_cis, mask)
out = h + self.feed_forward(self.ffn_norm(h))
return out
class Transformer(nn.Module):
def __init__(self, args: ModelArgs, **kwargs) -> None:
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.n_layers = args.n_layers
self.tok_embeddings = VocabParallelEmbedding(args.vocab_size, args.dim, init_method=lambda x: x)
self.layers = torch.nn.ModuleList()
for layer_id in range(args.n_layers):
self.layers.append(TransformerBlock(layer_id, args))
self.norm = RMSNorm(args.dim, eps=args.norm_eps)
self.output = ColumnParallelLinear(args.dim, args.vocab_size, bias=False, init_method=lambda x: x)
self.freqs_cis = precompute_freqs_cis(
args.dim // args.n_heads,
args.max_seq_len * 2,
args.rope_theta,
args.use_scaled_rope,
)
vision_args = self.args.vision_args
if vision_args:
# circular import otherwise until we refactor out Attention
from .vision.embedding import VisionEmbeddings
self.vision_embeddings = VisionEmbeddings(vision_args)
self.vision_projection = ColumnParallelLinear(
vision_args.output_dim,
args.dim,
bias=False,
init_method=lambda x: x,
)
self._register_load_state_dict_pre_hook(self.load_hook)
def load_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Dict[str, Any],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
if prefix + "rope.freqs" in state_dict:
state_dict.pop(prefix + "rope.freqs")
@torch.inference_mode()
def forward(self, model_input: TransformerInput) -> TransformerOutput:
tokens = model_input.tokens
start_pos = model_input.tokens_position
assert isinstance(start_pos, int), (
"This implementation does not support different start positions per batch item"
)
_bsz, seqlen = tokens.shape
h = self.tok_embeddings(tokens)
if image_embedding := model_input.image_embedding:
h_image = self.vision_projection(image_embedding.embedding)
h = h * ~image_embedding.mask + h_image * image_embedding.mask
self.freqs_cis = self.freqs_cis.to(h.device)
freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]
global_attn_mask, local_attn_mask = None, None
if seqlen > 1:
global_attn_mask = torch.full((seqlen, seqlen), float("-inf"), device=tokens.device)
global_attn_mask = torch.triu(global_attn_mask, diagonal=1).type_as(h)
# https://github.com/pytorch/pytorch/issues/100005
# torch.triu is buggy when the device is mps: filled values are
# nan instead of 0.
if global_attn_mask.device.type == torch.device("mps").type:
global_attn_mask = torch.nan_to_num(global_attn_mask, nan=0.0)
if chunk_size := self.args.attention_chunk_size:
local_attn_mask = create_chunked_attention_mask(seqlen, chunk_size, tokens.device)
for layer in self.layers:
h = layer(h, start_pos, freqs_cis, global_attn_mask, local_attn_mask)
h = self.norm(h)
output = self.output(h).float()
return TransformerOutput(logits=output)
# tokens (0, K), (K, 2K), (2K, 3K) attend to each other when doing local chunked attention
# in the iRoPE architecture
def create_chunked_attention_mask(seq_len: int, attention_chunk_size: int, device: torch.device) -> torch.Tensor:
block_pos = torch.abs(
(torch.arange(seq_len).unsqueeze(0) // attention_chunk_size)
- (torch.arange(seq_len).unsqueeze(1) // attention_chunk_size)
)
token_pos = torch.arange(seq_len).unsqueeze(0) - torch.arange(seq_len).unsqueeze(1)
mask = (block_pos == 0) & (token_pos <= 0)
return mask.to(device)
| 5,031
|
8dde12556653f529ad9d2fce1489e66130626e087e9a27b38d06ee5e78e53901
| 36.219457
| 120
| 0.584888
| 3.269926
| false
| false
| false
| false
|
MadcowD/ell
|
src/ell/studio/server.py
| 11,417
| 0
|
MIT License
|
from typing import Optional, Dict, Any, List
from sqlmodel import Session
from ell.stores.sql import PostgresStore, SQLiteStore
from ell import __version__
from fastapi import FastAPI, Query, HTTPException, Depends, Response, WebSocket, WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
import logging
import json
from ell.studio.config import Config
from ell.studio.connection_manager import ConnectionManager
from ell.studio.datamodels import EvaluationResultDatapointPublic, InvocationPublicWithConsumes, SerializedLMPWithUses, EvaluationPublic, SpecificEvaluationRunPublic
from ell.stores.models.core import SerializedLMP
from datetime import datetime, timedelta
from sqlmodel import select
from ell.stores.models.evaluations import SerializedEvaluation
logger = logging.getLogger(__name__)
from ell.studio.datamodels import InvocationsAggregate
def get_serializer(config: Config):
if config.pg_connection_string:
return PostgresStore(config.pg_connection_string)
elif config.storage_dir:
return SQLiteStore(config.storage_dir)
else:
raise ValueError("No storage configuration found")
def create_app(config:Config):
serializer = get_serializer(config)
def get_session():
with Session(serializer.engine) as session:
yield session
app = FastAPI(title="ell Studio", version=__version__)
# Enable CORS for all origins
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
manager = ConnectionManager()
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await manager.connect(websocket)
try:
while True:
data = await websocket.receive_text()
# Handle incoming WebSocket messages if needed
except WebSocketDisconnect:
manager.disconnect(websocket)
@app.get("/api/latest/lmps", response_model=list[SerializedLMPWithUses])
def get_latest_lmps(
skip: int = Query(0, ge=0),
limit: int = Query(100, ge=1, le=100),
session: Session = Depends(get_session)
):
lmps = serializer.get_latest_lmps(
session,
skip=skip, limit=limit,
)
return lmps
# TOOD: Create a get endpoint to efficient get on the index with /api/lmp/<lmp_id>
@app.get("/api/lmp/{lmp_id}")
def get_lmp_by_id(lmp_id: str, session: Session = Depends(get_session)):
lmp = serializer.get_lmps(session, lmp_id=lmp_id)[0]
return lmp
@app.get("/api/lmps", response_model=list[SerializedLMPWithUses])
def get_lmp(
lmp_id: Optional[str] = Query(None),
name: Optional[str] = Query(None),
skip: int = Query(0, ge=0),
limit: int = Query(100, ge=1, le=100),
session: Session = Depends(get_session)
):
filters : Dict[str, Any] = {}
if name:
filters['name'] = name
if lmp_id:
filters['lmp_id'] = lmp_id
lmps = serializer.get_lmps(session, skip=skip, limit=limit, **filters)
if not lmps:
raise HTTPException(status_code=404, detail="LMP not found")
print(lmps[0])
return lmps
@app.get("/api/invocation/{invocation_id}", response_model=InvocationPublicWithConsumes)
def get_invocation(
invocation_id: str,
session: Session = Depends(get_session)
):
invocation = serializer.get_invocations(session, lmp_filters=dict(), filters={"id": invocation_id})[0]
return invocation
@app.get("/api/invocations", response_model=list[InvocationPublicWithConsumes])
def get_invocations(
id: Optional[str] = Query(None),
hierarchical: Optional[bool] = Query(False),
skip: int = Query(0, ge=0),
limit: int = Query(100, ge=1, le=100),
lmp_name: Optional[str] = Query(None),
lmp_id: Optional[str] = Query(None),
session: Session = Depends(get_session)
):
lmp_filters = {}
if lmp_name:
lmp_filters["name"] = lmp_name
if lmp_id:
lmp_filters["lmp_id"] = lmp_id
invocation_filters = {}
if id:
invocation_filters["id"] = id
invocations = serializer.get_invocations(
session,
lmp_filters=lmp_filters,
filters=invocation_filters,
skip=skip,
limit=limit,
hierarchical=hierarchical
)
return invocations
@app.get("/api/traces")
def get_consumption_graph(
session: Session = Depends(get_session)
):
traces = serializer.get_traces(session)
return traces
@app.get("/api/blob/{blob_id}", response_class=Response)
def get_blob(
blob_id: str,
session: Session = Depends(get_session)
):
if serializer.blob_store is None:
raise HTTPException(status_code=400, detail="Blob storage is not configured")
try:
blob_data = serializer.blob_store.retrieve_blob(blob_id)
return Response(content=blob_data.decode('utf-8'), media_type="application/json")
except FileNotFoundError:
raise HTTPException(status_code=404, detail="Blob not found")
except Exception as e:
logger.error(f"Error retrieving blob: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@app.get("/api/lmp-history")
def get_lmp_history(
days: int = Query(365, ge=1, le=3650), # Default to 1 year, max 10 years
session: Session = Depends(get_session)
):
# Calculate the start date
start_date = datetime.utcnow() - timedelta(days=days)
# Query to get all LMP creation times within the date range
query = (
select(SerializedLMP.created_at)
.where(SerializedLMP.created_at >= start_date)
.order_by(SerializedLMP.created_at)
)
results = session.exec(query).all()
# Convert results to a list of dictionaries
history = [{"date": str(row), "count": 1} for row in results]
return history
async def notify_clients(entity: str, id: Optional[str] = None):
message = json.dumps({"entity": entity, "id": id})
await manager.broadcast(message)
# Add this method to the app object
app.notify_clients = notify_clients
@app.get("/api/invocations/aggregate", response_model=InvocationsAggregate)
def get_invocations_aggregate(
lmp_name: Optional[str] = Query(None),
lmp_id: Optional[str] = Query(None),
days: int = Query(30, ge=1, le=365),
session: Session = Depends(get_session)
):
lmp_filters = {}
if lmp_name:
lmp_filters["name"] = lmp_name
if lmp_id:
lmp_filters["lmp_id"] = lmp_id
aggregate_data = serializer.get_invocations_aggregate(session, lmp_filters=lmp_filters, days=days)
return InvocationsAggregate(**aggregate_data)
@app.get("/api/evaluations", response_model=List[EvaluationPublic])
def get_evaluations(
evaluation_id: Optional[str] = Query(None),
lmp_id: Optional[str] = Query(None),
skip: int = Query(0, ge=0),
limit: int = Query(100, ge=1, le=100),
session: Session = Depends(get_session)
):
filters: Dict[str, Any] = {}
if evaluation_id:
filters['id'] = evaluation_id
if lmp_id:
filters['lmp_id'] = lmp_id
evaluations = serializer.get_evaluations(
session,
filters=filters,
skip=skip,
limit=limit
)
return evaluations
@app.get("/api/latest/evaluations", response_model=List[EvaluationPublic])
def get_latest_evaluations(
skip: int = Query(0, ge=0),
limit: int = Query(100, ge=1, le=100),
session: Session = Depends(get_session)
):
evaluations = serializer.get_latest_evaluations(
session,
skip=skip,
limit=limit
)
return evaluations
@app.get("/api/evaluation/{evaluation_id}", response_model=EvaluationPublic)
def get_evaluation(
evaluation_id: str,
session: Session = Depends(get_session)
):
evaluation = serializer.get_evaluations(session, filters={"id": evaluation_id})
if not evaluation:
raise HTTPException(status_code=404, detail="Evaluation not found")
return evaluation[0]
@app.get("/api/evaluation-runs/{run_id}", response_model=SpecificEvaluationRunPublic)
def get_evaluation_run(
run_id: str,
session: Session = Depends(get_session)
):
runs = serializer.get_evaluation_run(session, run_id)
return runs
@app.get("/api/evaluation-runs/{run_id}/results", response_model=List[EvaluationResultDatapointPublic])
def get_evaluation_run_results(
run_id: str,
skip: int = Query(0, ge=0),
limit: int = Query(100, ge=1, le=100),
session: Session = Depends(get_session)
):
results = serializer.get_evaluation_run_results(
session,
run_id,
skip=skip,
limit=limit,
)
return results
@app.get("/api/all-evaluations", response_model=List[EvaluationPublic])
def get_all_evaluations(
skip: int = Query(0, ge=0),
limit: int = Query(100, ge=1, le=100),
session: Session = Depends(get_session)
):
# Get all evaluations ordered by creation date, without deduplication
query = (
select(SerializedEvaluation)
.order_by(SerializedEvaluation.created_at.desc())
.offset(skip)
.limit(limit)
)
results = session.exec(query).all()
return list(results)
@app.get("/api/dataset/{dataset_id}")
def get_dataset(
dataset_id: str,
session: Session = Depends(get_session)
):
if not serializer.blob_store:
raise HTTPException(status_code=400, detail="Blob storage not configured")
try:
# Get the blob data
blob_data = serializer.blob_store.retrieve_blob(dataset_id)
# Check if size is under 5MB
if len(blob_data) > 5 * 1024 * 1024: # 5MB in bytes
raise HTTPException(
status_code=413,
detail="Dataset too large to preview (>5MB)"
)
# Decode and parse JSON
dataset_json = json.loads(blob_data.decode('utf-8'))
return {
"size": len(blob_data),
"data": dataset_json
}
except FileNotFoundError:
raise HTTPException(status_code=404, detail="Dataset not found")
except json.JSONDecodeError:
raise HTTPException(status_code=400, detail="Invalid JSON data")
except Exception as e:
logger.error(f"Error retrieving dataset: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
return app
| 2,877
|
c80aa95802c3588f7805415be45158e03f69fa6a5cc10eff9ff350d5bd67d138
| 31.713467
| 165
| 0.601997
| 3.96837
| false
| false
| false
| false
|
crestalnetwork/intentkit
|
skills/goat/__init__.py
| 8,249
| 0
|
MIT License
|
"""Goat skills."""
import importlib
import secrets
import time
from dataclasses import is_dataclass
from typing import (
Any,
Dict,
Literal,
Type,
Union,
get_args,
get_origin,
get_type_hints,
)
import httpx
from eth_account import Account
from eth_utils import encode_hex
from goat import WalletClientBase
from goat.classes.plugin_base import PluginBase
from goat_adapters.langchain import get_on_chain_tools
from goat_wallets.crossmint import crossmint
from abstracts.skill import SkillStoreABC
from skills.goat.base import GoatBaseTool
from utils.chain import ChainProvider, Network
from .base import CrossmintChainProviderAdapter
def create_smart_wallet(base_url: str, api_key: str, signer_address: str) -> Dict:
url = f"{base_url}/api/v1-alpha2/wallets"
headers = {
"Content-Type": "application/json",
"X-API-KEY": api_key,
}
js = {
"type": "evm-smart-wallet",
"config": {
"adminSigner": {
"type": "evm-keypair",
"address": signer_address,
},
},
}
with httpx.Client() as client:
try:
response = client.post(url, headers=headers, json=js)
response.raise_for_status()
json_dict = response.json()
if "error" in json_dict:
raise Exception(f"Failed to create wallet: {json_dict}")
return json_dict
except httpx.RequestError as req_err:
raise Exception(f"request error from Crossmint API: {req_err}") from req_err
except httpx.HTTPStatusError as http_err:
raise Exception(f"http error from Crossmint API: {http_err}") from http_err
except Exception as e:
raise Exception(f"error from Crossmint API: {e}") from e
def create_smart_wallets_if_not_exist(
base_url: str, api_key: str, wallet_data: dict | None
):
evm_wallet_data = wallet_data.get("evm") if wallet_data else None
# no wallet data or private_key is empty
if not evm_wallet_data or not evm_wallet_data.get("private_key"):
evm_wallet_data = evm_wallet_data or {}
if evm_wallet_data.get("address"):
raise Exception(
"smart wallet address is present but private key is not provided"
)
# Generate a random 256-bit (32-byte) private key
private_key_bytes = secrets.token_bytes(32)
# Encode the private key to a hexadecimal string
evm_wallet_data["private_key"] = encode_hex(private_key_bytes)
signer_address = Account.from_key(evm_wallet_data["private_key"]).address
new_smart_wallet = create_smart_wallet(base_url, api_key, signer_address)
if not new_smart_wallet or not new_smart_wallet.get("address"):
raise RuntimeError("Failed to create smart wallet")
evm_wallet_data["address"] = new_smart_wallet["address"]
# put an sleep to prevent 429 error
if not evm_wallet_data.get("address"):
raise Exception("smart wallet address is empty")
return {"evm": evm_wallet_data}
def init_smart_wallets(
api_key: str,
chain_provider: ChainProvider,
networks: list[Network],
wallet_data: dict | None,
):
cs_chain_provider = CrossmintChainProviderAdapter(chain_provider, networks)
# Create Crossmint client
crossmint_client = crossmint(api_key)
wallets = []
for cfg in cs_chain_provider.chain_configs:
wallet = crossmint_client["smartwallet"](
{
"address": wallet_data["address"],
"signer": {
"secretKey": wallet_data["private_key"],
},
"provider": cfg.chain_config.rpc_url,
"ensProvider": cfg.chain_config.ens_url,
"chain": cfg.network_alias,
}
)
wallets.append(wallet)
time.sleep(1)
return wallets
def resolve_optional_type(field_type: Type) -> Type:
if hasattr(field_type, "__origin__") and get_origin(field_type) is Union:
args = get_args(field_type)
if (
type(None) in args and len(args) == 2
): # Check if None is one of the types in Union and there are 2 types
return next(t for t in args if t is not type(None))
return field_type
def resolve_type(val: str, mod) -> Any:
try:
return getattr(mod, val)
except AttributeError:
try:
mod_path, cls_name = val.rsplit(".", 1)
type_mod = importlib.import_module(mod_path)
return getattr(type_mod, cls_name)
except (ValueError, ImportError, AttributeError) as e:
raise ValueError(f"type '{val}' could not be resolved") from e
def resolve_value(val: Any, f_type: Type, mod) -> Any:
f_type = resolve_optional_type(f_type)
if f_type in (str, int, float, bool):
return f_type(val)
if hasattr(f_type, "__origin__"):
if f_type.__origin__ is list:
if not isinstance(val, list):
raise ValueError(f"expected list object but got {type(val).__name__}")
elem_type = f_type.__args__[0]
return [resolve_value(item, elem_type, mod) for item in val]
if f_type.__origin__ is Literal:
literal_items = f_type.__args__
if val not in literal_items:
raise ValueError(f"not supported literal value {type(val)}")
return val
if isinstance(val, str):
return resolve_type(val, mod)
raise ValueError(f"unsupported type: {f_type}")
def get_goat_skill(
wallet: WalletClientBase,
plugin_configs: Dict[str, Any],
skill_store: SkillStoreABC,
agent_store: SkillStoreABC,
agent_id: str,
) -> list[GoatBaseTool]:
if not wallet:
raise ValueError("GOAT crossmint wallet is empty")
plugins = []
for p_name, p_options in plugin_configs.items():
try:
mod = importlib.import_module(f"goat_plugins.{p_name}")
initializer = getattr(mod, p_name)
hints = get_type_hints(initializer)
opt_type = hints.get("options")
if not opt_type:
raise ValueError(
f"GOAT plugin {p_name} does not have associated options"
)
opt_type = resolve_optional_type(opt_type)
if not is_dataclass(opt_type):
raise ValueError(f"GOAT plugin {p_name} options is malformed")
fields = get_type_hints(opt_type)
resolved_vals = {}
raw_args = p_options
for f_name, f_type in fields.items():
if f_name not in raw_args:
if f_type.__name__.upper() == "OPTIONAL":
continue
raise ValueError(
f"GOAT plugin {p_name} should have {f_name} option"
)
val = raw_args[f_name]
try:
resolved_val = resolve_value(val, f_type, mod)
resolved_vals[f_name] = resolved_val
except (ValueError, TypeError) as e:
raise ValueError(
f"GOAT field {f_name} has invalid value, plugin name {p_name} : {str(e)}"
)
plugin_options = opt_type(**resolved_vals)
plugin: PluginBase = initializer(options=plugin_options)
plugins.append(plugin)
except AttributeError:
raise Exception(f"GOAT initializer function not found: {p_name}")
except ImportError:
raise Exception(f"GOAT plugin load failed: {p_name}")
except Exception as e:
raise Exception(f"GOAT plugin initialization failed: {p_name}: {str(e)}")
tools = []
try:
p_tools = get_on_chain_tools(
wallet=wallet,
plugins=plugins,
)
for t in p_tools:
t.name = f"goat_{t.name.replace('.', '_')}"
t.description = f"This is plugin of GOAT tool, {t.description}"
tools.extend(p_tools)
except Exception as e:
raise Exception(f"GOAT tools initiation failed: {str(e)}")
return tools
| 2,143
|
c73a02d38799d79682f61075ee75de3ddf3718ab59ab69adb49a3d0ec6aedd4b
| 31.222656
| 97
| 0.583586
| 3.849277
| false
| false
| false
| false
|
openai/openai-agents-python
|
examples/research_bot/agents/writer_agent.py
| 1,065
| 0
|
MIT License
|
# Agent used to synthesize a final report from the individual summaries.
from pydantic import BaseModel
from agents import Agent
PROMPT = (
"You are a senior researcher tasked with writing a cohesive report for a research query. "
"You will be provided with the original query, and some initial research done by a research "
"assistant.\n"
"You should first come up with an outline for the report that describes the structure and "
"flow of the report. Then, generate the report and return that as your final output.\n"
"The final output should be in markdown format, and it should be lengthy and detailed. Aim "
"for 5-10 pages of content, at least 1000 words."
)
class ReportData(BaseModel):
short_summary: str
"""A short 2-3 sentence summary of the findings."""
markdown_report: str
"""The final report"""
follow_up_questions: list[str]
"""Suggested topics to research further"""
writer_agent = Agent(
name="WriterAgent",
instructions=PROMPT,
model="o3-mini",
output_type=ReportData,
)
| 269
|
02ea6872ed108536ba10f764f644ff823971bde885b5675cf6be670c52cfa12b
| 31.272727
| 97
| 0.712676
| 3.959108
| false
| false
| false
| false
|
fudan-generative-vision/hallo2
|
basicsr/utils/dist_util.py
| 2,608
| 0
|
MIT License
|
# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501
import functools
import os
import subprocess
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend, port=None):
"""Initialize slurm distributed training environment.
If argument ``port`` is not specified, then the master port will be system
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
environment variable, then a default port ``29500`` will be used.
Args:
backend (str): Backend of torch.distributed.
port (int, optional): Master port. Defaults to None.
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1')
# specify master port
if port is not None:
os.environ['MASTER_PORT'] = str(port)
elif 'MASTER_PORT' in os.environ:
pass # use MASTER_PORT in the environment variable
else:
# 29500 is torch.distributed default port
os.environ['MASTER_PORT'] = '29500'
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
def get_dist_info():
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
| 745
|
7f2743d74bcc3e5cee369cfc7994f4a483b3339d778f8ad183f174fbf337f982
| 30.804878
| 102
| 0.646856
| 3.500671
| false
| false
| false
| false
|
BrainBlend-AI/atomic-agents
|
atomic-agents/tests/agents/test_base_agent.py
| 9,746
| 0
|
MIT License
|
import pytest
from unittest.mock import Mock, call, patch
from pydantic import BaseModel
import instructor
from atomic_agents.agents.base_agent import (
BaseIOSchema,
BaseAgent,
BaseAgentConfig,
BaseAgentInputSchema,
BaseAgentOutputSchema,
SystemPromptGenerator,
AgentMemory,
SystemPromptContextProviderBase,
)
from instructor.dsl.partial import PartialBase
@pytest.fixture
def mock_instructor():
mock = Mock(spec=instructor.Instructor)
mock.chat.completions.create = Mock()
mock.chat.completions.create_partial = Mock()
return mock
@pytest.fixture
def mock_memory():
mock = Mock(spec=AgentMemory)
mock.get_history.return_value = []
mock.add_message = Mock()
mock.copy = Mock(return_value=Mock(spec=AgentMemory))
mock.initialize_turn = Mock()
return mock
@pytest.fixture
def mock_system_prompt_generator():
mock = Mock(spec=SystemPromptGenerator)
mock.generate_prompt.return_value = "Mocked system prompt"
mock.context_providers = {}
return mock
@pytest.fixture
def agent_config(mock_instructor, mock_memory, mock_system_prompt_generator):
return BaseAgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
memory=mock_memory,
system_prompt_generator=mock_system_prompt_generator,
)
@pytest.fixture
def agent(agent_config):
return BaseAgent(agent_config)
def test_initialization(agent, mock_instructor, mock_memory, mock_system_prompt_generator):
assert agent.client == mock_instructor
assert agent.model == "gpt-4o-mini"
assert agent.memory == mock_memory
assert agent.system_prompt_generator == mock_system_prompt_generator
assert agent.input_schema == BaseAgentInputSchema
assert agent.output_schema == BaseAgentOutputSchema
assert agent.model_api_parameters["temperature"] == 0
assert "max_tokens" not in agent.model_api_parameters
# model_api_parameters should have priority over the deprecated temperature parameter if both are provided.
def test_initialization_temperature_priority(mock_instructor, mock_memory, mock_system_prompt_generator):
config = BaseAgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
memory=mock_memory,
system_prompt_generator=mock_system_prompt_generator,
temperature=0.5,
model_api_parameters={"temperature": 1.0},
)
agent = BaseAgent(config)
assert agent.model_api_parameters["temperature"] == 1.0
def test_reset_memory(agent, mock_memory):
initial_memory = agent.initial_memory
agent.reset_memory()
assert agent.memory != initial_memory
mock_memory.copy.assert_called_once()
def test_get_response(agent, mock_instructor, mock_memory, mock_system_prompt_generator):
mock_memory.get_history.return_value = [{"role": "user", "content": "Hello"}]
mock_system_prompt_generator.generate_prompt.return_value = "System prompt"
mock_response = Mock(spec=BaseAgentOutputSchema)
mock_instructor.chat.completions.create.return_value = mock_response
response = agent.get_response()
assert response == mock_response
mock_instructor.chat.completions.create.assert_called_once_with(
model="gpt-4o-mini",
messages=[{"role": "system", "content": "System prompt"}, {"role": "user", "content": "Hello"}],
response_model=BaseAgentOutputSchema,
temperature=0,
)
def test_get_context_provider(agent, mock_system_prompt_generator):
mock_provider = Mock(spec=SystemPromptContextProviderBase)
mock_system_prompt_generator.context_providers = {"test_provider": mock_provider}
result = agent.get_context_provider("test_provider")
assert result == mock_provider
with pytest.raises(KeyError):
agent.get_context_provider("non_existent_provider")
def test_register_context_provider(agent, mock_system_prompt_generator):
mock_provider = Mock(spec=SystemPromptContextProviderBase)
agent.register_context_provider("new_provider", mock_provider)
assert "new_provider" in mock_system_prompt_generator.context_providers
assert mock_system_prompt_generator.context_providers["new_provider"] == mock_provider
def test_unregister_context_provider(agent, mock_system_prompt_generator):
mock_provider = Mock(spec=SystemPromptContextProviderBase)
mock_system_prompt_generator.context_providers = {"test_provider": mock_provider}
agent.unregister_context_provider("test_provider")
assert "test_provider" not in mock_system_prompt_generator.context_providers
with pytest.raises(KeyError):
agent.unregister_context_provider("non_existent_provider")
def test_custom_input_output_schemas(mock_instructor):
class CustomInputSchema(BaseModel):
custom_field: str
class CustomOutputSchema(BaseModel):
result: str
custom_config = BaseAgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
input_schema=CustomInputSchema,
output_schema=CustomOutputSchema,
)
custom_agent = BaseAgent(custom_config)
assert custom_agent.input_schema == CustomInputSchema
assert custom_agent.output_schema == CustomOutputSchema
def test_base_agent_io_str_and_rich():
class TestIO(BaseIOSchema):
"""TestIO docstring"""
field: str
test_io = TestIO(field="test")
assert str(test_io) == '{"field":"test"}'
assert test_io.__rich__() is not None # Just check if it returns something, as we can't easily compare Rich objects
def test_run(agent, mock_memory):
mock_input = BaseAgentInputSchema(chat_message="Test input")
mock_output = BaseAgentOutputSchema(chat_message="Test output")
agent.get_response = Mock(return_value=mock_output)
result = agent.run(mock_input)
assert result == mock_output
assert agent.current_user_input == mock_input
mock_memory.add_message.assert_has_calls([call("user", mock_input), call("assistant", mock_output)])
def test_base_io_schema_empty_docstring():
with pytest.raises(ValueError, match="must have a non-empty docstring"):
class EmptyDocStringSchema(BaseIOSchema):
""""""
pass
def test_base_io_schema_model_json_schema_no_description():
class TestSchema(BaseIOSchema):
"""Test schema docstring."""
field: str
# Mock the superclass model_json_schema to return a schema without a description
with patch("pydantic.BaseModel.model_json_schema", return_value={}):
schema = TestSchema.model_json_schema()
assert "description" in schema
assert schema["description"] == "Test schema docstring."
@pytest.mark.asyncio
async def test_run_async(agent, mock_memory):
mock_input = BaseAgentInputSchema(chat_message="Test input")
mock_output = BaseAgentOutputSchema(chat_message="Test output")
# Create a mock async generator that properly sets current_user_input and adds messages
async def mock_run_async(*args, **kwargs):
agent.memory.initialize_turn()
agent.current_user_input = mock_input
agent.memory.add_message("user", mock_input)
yield mock_output
agent.memory.add_message("assistant", mock_output)
# Replace run_async with our mock
agent.run_async = mock_run_async
# Collect all responses from the generator
responses = []
async for response in agent.run_async(mock_input):
responses.append(response)
assert responses == [mock_output]
assert agent.current_user_input == mock_input
mock_memory.add_message.assert_has_calls([call("user", mock_input), call("assistant", mock_output)])
@pytest.mark.asyncio
async def test_stream_response_async(agent, mock_memory, mock_instructor, mock_system_prompt_generator):
mock_input = BaseAgentInputSchema(chat_message="Test input")
mock_memory.get_history.return_value = [{"role": "user", "content": "Hello"}]
mock_system_prompt_generator.generate_prompt.return_value = "System prompt"
partial_responses = [
BaseAgentOutputSchema(chat_message="Partial response 1"),
BaseAgentOutputSchema(chat_message="Partial response 2"),
BaseAgentOutputSchema(chat_message="Final response"),
]
async def mock_create_partial(*args, **kwargs):
for response in partial_responses:
yield response
mock_instructor.chat.completions.create_partial = mock_create_partial
responses = []
async for partial_response in agent.stream_response_async(mock_input):
responses.append(partial_response)
assert responses == partial_responses
mock_memory.add_message.assert_called_with("assistant", partial_responses[-1])
def test_model_from_chunks_patched():
class TestPartialModel(PartialBase):
@classmethod
def get_partial_model(cls):
class PartialModel(BaseModel):
field: str
return PartialModel
chunks = ['{"field": "hel', 'lo"}']
expected_values = ["hel", "hello"]
generator = TestPartialModel.model_from_chunks(chunks)
results = [result.field for result in generator]
assert results == expected_values
@pytest.mark.asyncio
async def test_model_from_chunks_async_patched():
class TestPartialModel(PartialBase):
@classmethod
def get_partial_model(cls):
class PartialModel(BaseModel):
field: str
return PartialModel
async def async_gen():
yield '{"field": "hel'
yield 'lo"}'
expected_values = ["hel", "hello"]
generator = TestPartialModel.model_from_chunks_async(async_gen())
results = []
async for result in generator:
results.append(result.field)
assert results == expected_values
| 2,464
|
b751f8bc0919c136acdb356536735475eee52ba7b1a1920a0894cfdeacb9b076
| 31.486667
| 120
| 0.702339
| 3.955357
| false
| true
| false
| false
|
grapeot/devin.cursorrules
|
tests/test_search_engine.py
| 4,162
| 0
|
MIT License
|
import unittest
from unittest.mock import patch, MagicMock
import sys
from io import StringIO
from tools.search_engine import search
class TestSearchEngine(unittest.TestCase):
def setUp(self):
# Capture stdout and stderr for testing
self.stdout = StringIO()
self.stderr = StringIO()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
# Restore stdout and stderr
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
@patch('tools.search_engine.DDGS')
def test_successful_search(self, mock_ddgs):
# Mock search results
mock_results = [
{
'href': 'http://example.com',
'title': 'Example Title',
'body': 'Example Body'
},
{
'href': 'http://example2.com',
'title': 'Example Title 2',
'body': 'Example Body 2'
}
]
# Setup mock
mock_ddgs_instance = MagicMock()
mock_ddgs_instance.__enter__.return_value.text.return_value = mock_results
mock_ddgs.return_value = mock_ddgs_instance
# Run search
search("test query", max_results=2)
# Check debug output
expected_debug = "DEBUG: Searching for query: test query (attempt 1/3)"
self.assertIn(expected_debug, self.stderr.getvalue())
self.assertIn("DEBUG: Found 2 results", self.stderr.getvalue())
# Check search results output
output = self.stdout.getvalue()
self.assertIn("=== Result 1 ===", output)
self.assertIn("URL: http://example.com", output)
self.assertIn("Title: Example Title", output)
self.assertIn("Snippet: Example Body", output)
self.assertIn("=== Result 2 ===", output)
self.assertIn("URL: http://example2.com", output)
self.assertIn("Title: Example Title 2", output)
self.assertIn("Snippet: Example Body 2", output)
# Verify mock was called correctly
mock_ddgs_instance.__enter__.return_value.text.assert_called_once_with(
"test query",
max_results=2
)
@patch('tools.search_engine.DDGS')
def test_no_results(self, mock_ddgs):
# Mock empty results
mock_ddgs_instance = MagicMock()
mock_ddgs_instance.__enter__.return_value.text.return_value = []
mock_ddgs.return_value = mock_ddgs_instance
# Run search
search("test query")
# Check debug output
self.assertIn("DEBUG: No results found", self.stderr.getvalue())
# Check that no results were printed
self.assertEqual("", self.stdout.getvalue().strip())
@patch('tools.search_engine.DDGS')
def test_search_error(self, mock_ddgs):
# Mock search error
mock_ddgs_instance = MagicMock()
mock_ddgs_instance.__enter__.return_value.text.side_effect = Exception("Test error")
mock_ddgs.return_value = mock_ddgs_instance
# Run search and check for error
with self.assertRaises(SystemExit) as cm:
search("test query")
self.assertEqual(cm.exception.code, 1)
self.assertIn("ERROR: Search failed: Test error", self.stderr.getvalue())
def test_result_field_fallbacks(self):
# Test that the fields work correctly with N/A fallback
result = {
'href': 'http://example.com',
'title': 'Example Title',
'body': 'Example Body'
}
# Test fields present
self.assertEqual(result.get('href', 'N/A'), 'http://example.com')
self.assertEqual(result.get('title', 'N/A'), 'Example Title')
self.assertEqual(result.get('body', 'N/A'), 'Example Body')
# Test missing fields
result = {}
self.assertEqual(result.get('href', 'N/A'), 'N/A')
self.assertEqual(result.get('title', 'N/A'), 'N/A')
self.assertEqual(result.get('body', 'N/A'), 'N/A')
if __name__ == '__main__':
unittest.main()
| 1,023
|
62e2ec5cd530a5404f4e35c6f16ecd5c3cbcbf902e13811be3eded02dd56f161
| 34.271186
| 92
| 0.583854
| 4.068426
| false
| true
| false
| false
|
cyclotruc/gitingest
|
tests/query_parser/test_git_host_agnostic.py
| 2,896
| 0
|
MIT License
|
"""
Tests to verify that the query parser is Git host agnostic.
These tests confirm that `parse_query` correctly identifies user/repo pairs and canonical URLs for GitHub, GitLab,
Bitbucket, Gitea, and Codeberg, even if the host is omitted.
"""
from typing import List
import pytest
from gitingest.query_parsing import parse_query
@pytest.mark.parametrize(
"urls, expected_user, expected_repo, expected_url",
[
(
[
"https://github.com/tiangolo/fastapi",
"github.com/tiangolo/fastapi",
"tiangolo/fastapi",
],
"tiangolo",
"fastapi",
"https://github.com/tiangolo/fastapi",
),
(
[
"https://gitlab.com/gitlab-org/gitlab-runner",
"gitlab.com/gitlab-org/gitlab-runner",
"gitlab-org/gitlab-runner",
],
"gitlab-org",
"gitlab-runner",
"https://gitlab.com/gitlab-org/gitlab-runner",
),
(
[
"https://bitbucket.org/na-dna/llm-knowledge-share",
"bitbucket.org/na-dna/llm-knowledge-share",
"na-dna/llm-knowledge-share",
],
"na-dna",
"llm-knowledge-share",
"https://bitbucket.org/na-dna/llm-knowledge-share",
),
(
[
"https://gitea.com/xorm/xorm",
"gitea.com/xorm/xorm",
"xorm/xorm",
],
"xorm",
"xorm",
"https://gitea.com/xorm/xorm",
),
(
[
"https://codeberg.org/forgejo/forgejo",
"codeberg.org/forgejo/forgejo",
"forgejo/forgejo",
],
"forgejo",
"forgejo",
"https://codeberg.org/forgejo/forgejo",
),
],
)
@pytest.mark.asyncio
async def test_parse_query_without_host(
urls: List[str],
expected_user: str,
expected_repo: str,
expected_url: str,
) -> None:
"""
Test `parse_query` for Git host agnosticism.
Given multiple URL variations for the same user/repo on different Git hosts (with or without host names):
When `parse_query` is called with each variation,
Then the parser should correctly identify the user, repo, canonical URL, and other default fields.
"""
for url in urls:
query = await parse_query(url, max_file_size=50, from_web=True)
assert query.user_name == expected_user
assert query.repo_name == expected_repo
assert query.url == expected_url
assert query.slug == f"{expected_user}-{expected_repo}"
assert query.id is not None
assert query.subpath == "/"
assert query.branch is None
assert query.commit is None
assert query.type is None
| 747
|
bab0b335b3cbda058e381f2a2fd480a441f0984976ff18483e760c7c2db9a3f3
| 29.484211
| 114
| 0.538674
| 3.876841
| false
| true
| false
| false
|
meta-llama/llama-stack
|
llama_stack/cli/stack/list_providers.py
| 2,434
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
from llama_stack.cli.subcommand import Subcommand
class StackListProviders(Subcommand):
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"list-providers",
prog="llama stack list-providers",
description="Show available Llama Stack Providers for an API",
formatter_class=argparse.RawTextHelpFormatter,
)
self._add_arguments()
self.parser.set_defaults(func=self._run_providers_list_cmd)
@property
def providable_apis(self):
from llama_stack.distribution.distribution import providable_apis
return [api.value for api in providable_apis()]
def _add_arguments(self):
self.parser.add_argument(
"api",
type=str,
choices=self.providable_apis,
nargs="?",
help="API to list providers for. List all if not specified.",
)
def _run_providers_list_cmd(self, args: argparse.Namespace) -> None:
from llama_stack.cli.table import print_table
from llama_stack.distribution.distribution import Api, get_provider_registry
all_providers = get_provider_registry()
if args.api:
providers = [(args.api, all_providers[Api(args.api)])]
else:
providers = [(k.value, prov) for k, prov in all_providers.items()]
providers = [p for api, p in providers if api in self.providable_apis]
# eventually, this should query a registry at llama.meta.com/llamastack/distributions
headers = [
"API Type",
"Provider Type",
"PIP Package Dependencies",
]
rows = []
specs = [spec for p in providers for spec in p.values()]
for spec in specs:
if spec.is_sample:
continue
rows.append(
[
spec.api.value,
spec.provider_type,
",".join(spec.pip_packages),
]
)
print_table(
rows,
headers,
separate_rows=True,
sort_by=(0, 1),
)
| 577
|
83603f38080fbc122dc24ddf25ab7e72b490e3164cbeff09d79ae3e89e261d41
| 31.026316
| 93
| 0.578472
| 4.218371
| false
| false
| false
| false
|
HKUDS/AutoAgent
|
process_tool_docs.py
| 784
| 0
|
MIT License
|
from pandas import read_csv
import json
from rich import print
df = read_csv("tool_docs.csv")
rapidapi_tools = df[df['Platform'] == 'RapidAPI']['Tool_Name'].unique()
print("[bold blue]Current RapidAPI tools:[/bold blue]")
print(json.dumps(rapidapi_tools.tolist(), indent=4))
print("[bold red][IMPORTANT][/bold red] [bold yellow]If you want to use these tools, you should go to RapidAPI and subscribe to them. More convenient tool platforms such as Composio are under development.[/bold yellow]")
your_api_key = input("Please input your RapidAPI API key:")
for column in df.columns:
if df[column].dtype == 'object':
df[column] = df[column].str.replace('YOUR_RAPID_API_KEY', your_api_key)
df.to_csv('tool_docs.csv', index=False)
print("[bold green]Done![/bold green]")
| 242
|
180d31ae6d2bcff41f12fe3907bdf6b79c6698c0cff3ccc2b6ca465e4a52cb34
| 38.25
| 220
| 0.716837
| 3.239669
| false
| false
| false
| false
|
autoscrape-labs/pydoll
|
pydoll/exceptions.py
| 2,008
| 0
|
MIT License
|
class ConnectionFailed(Exception):
message = 'Failed to connect to the browser'
def __str__(self):
return self.message
class InvalidCommand(Exception):
message = 'The command provided is invalid'
def __str__(self):
return self.message
class InvalidCallback(Exception):
message = 'The callback provided is invalid'
def __str__(self):
return self.message
class NetworkError(Exception):
message = 'A network error occurred'
def __str__(self):
return self.message
class InvalidResponse(Exception):
message = 'The response received is invalid'
def __str__(self):
return self.message
class ReconnectionFailed(Exception):
message = 'Failed to reconnect to the browser'
def __str__(self):
return self.message
class ResendCommandFailed(Exception):
message = 'Failed to resend the command'
def __str__(self):
return self.message
class BrowserNotRunning(Exception):
message = 'The browser is not running'
def __str__(self):
return self.message
class ElementNotFound(Exception):
message = 'The specified element was not found'
def __str__(self):
return self.message
class ClickIntercepted(Exception):
message = 'The click was intercepted'
def __str__(self):
return self.message
class ElementNotVisible(Exception):
message = 'The element is not visible'
def __str__(self):
return self.message
class ElementNotInteractable(Exception):
message = 'The element is not interactable'
def __str__(self):
return self.message
class InvalidFileExtension(Exception):
message = 'The file extension provided is not supported'
def __str__(self):
return self.message
class EventNotSupported(Exception):
message = 'The event is not supported'
def __init__(self, message: str = ''):
self.message = message or self.message
def __str__(self):
return self.message
| 457
|
1ffb4dfa08865a26dced8fc8adf83f46b9235fbd71a3024784ec20026d4f0257
| 19.282828
| 60
| 0.659363
| 4.393873
| false
| false
| false
| false
|
pydantic/pydantic-ai
|
examples/pydantic_ai_examples/chat_app.py
| 7,089
| 0
|
MIT License
|
"""Simple chat app example build with FastAPI.
Run with:
uv run -m pydantic_ai_examples.chat_app
"""
from __future__ import annotations as _annotations
import asyncio
import json
import sqlite3
from collections.abc import AsyncIterator
from concurrent.futures.thread import ThreadPoolExecutor
from contextlib import asynccontextmanager
from dataclasses import dataclass
from datetime import datetime, timezone
from functools import partial
from pathlib import Path
from typing import Annotated, Any, Callable, Literal, TypeVar
import fastapi
import logfire
from fastapi import Depends, Request
from fastapi.responses import FileResponse, Response, StreamingResponse
from typing_extensions import LiteralString, ParamSpec, TypedDict
from pydantic_ai import Agent
from pydantic_ai.exceptions import UnexpectedModelBehavior
from pydantic_ai.messages import (
ModelMessage,
ModelMessagesTypeAdapter,
ModelRequest,
ModelResponse,
TextPart,
UserPromptPart,
)
# 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured
logfire.configure(send_to_logfire='if-token-present')
agent = Agent('openai:gpt-4o', instrument=True)
THIS_DIR = Path(__file__).parent
@asynccontextmanager
async def lifespan(_app: fastapi.FastAPI):
async with Database.connect() as db:
yield {'db': db}
app = fastapi.FastAPI(lifespan=lifespan)
logfire.instrument_fastapi(app)
@app.get('/')
async def index() -> FileResponse:
return FileResponse((THIS_DIR / 'chat_app.html'), media_type='text/html')
@app.get('/chat_app.ts')
async def main_ts() -> FileResponse:
"""Get the raw typescript code, it's compiled in the browser, forgive me."""
return FileResponse((THIS_DIR / 'chat_app.ts'), media_type='text/plain')
async def get_db(request: Request) -> Database:
return request.state.db
@app.get('/chat/')
async def get_chat(database: Database = Depends(get_db)) -> Response:
msgs = await database.get_messages()
return Response(
b'\n'.join(json.dumps(to_chat_message(m)).encode('utf-8') for m in msgs),
media_type='text/plain',
)
class ChatMessage(TypedDict):
"""Format of messages sent to the browser."""
role: Literal['user', 'model']
timestamp: str
content: str
def to_chat_message(m: ModelMessage) -> ChatMessage:
first_part = m.parts[0]
if isinstance(m, ModelRequest):
if isinstance(first_part, UserPromptPart):
assert isinstance(first_part.content, str)
return {
'role': 'user',
'timestamp': first_part.timestamp.isoformat(),
'content': first_part.content,
}
elif isinstance(m, ModelResponse):
if isinstance(first_part, TextPart):
return {
'role': 'model',
'timestamp': m.timestamp.isoformat(),
'content': first_part.content,
}
raise UnexpectedModelBehavior(f'Unexpected message type for chat app: {m}')
@app.post('/chat/')
async def post_chat(
prompt: Annotated[str, fastapi.Form()], database: Database = Depends(get_db)
) -> StreamingResponse:
async def stream_messages():
"""Streams new line delimited JSON `Message`s to the client."""
# stream the user prompt so that can be displayed straight away
yield (
json.dumps(
{
'role': 'user',
'timestamp': datetime.now(tz=timezone.utc).isoformat(),
'content': prompt,
}
).encode('utf-8')
+ b'\n'
)
# get the chat history so far to pass as context to the agent
messages = await database.get_messages()
# run the agent with the user prompt and the chat history
async with agent.run_stream(prompt, message_history=messages) as result:
async for text in result.stream(debounce_by=0.01):
# text here is a `str` and the frontend wants
# JSON encoded ModelResponse, so we create one
m = ModelResponse(parts=[TextPart(text)], timestamp=result.timestamp())
yield json.dumps(to_chat_message(m)).encode('utf-8') + b'\n'
# add new messages (e.g. the user prompt and the agent response in this case) to the database
await database.add_messages(result.new_messages_json())
return StreamingResponse(stream_messages(), media_type='text/plain')
P = ParamSpec('P')
R = TypeVar('R')
@dataclass
class Database:
"""Rudimentary database to store chat messages in SQLite.
The SQLite standard library package is synchronous, so we
use a thread pool executor to run queries asynchronously.
"""
con: sqlite3.Connection
_loop: asyncio.AbstractEventLoop
_executor: ThreadPoolExecutor
@classmethod
@asynccontextmanager
async def connect(
cls, file: Path = THIS_DIR / '.chat_app_messages.sqlite'
) -> AsyncIterator[Database]:
with logfire.span('connect to DB'):
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
con = await loop.run_in_executor(executor, cls._connect, file)
slf = cls(con, loop, executor)
try:
yield slf
finally:
await slf._asyncify(con.close)
@staticmethod
def _connect(file: Path) -> sqlite3.Connection:
con = sqlite3.connect(str(file))
con = logfire.instrument_sqlite3(con)
cur = con.cursor()
cur.execute(
'CREATE TABLE IF NOT EXISTS messages (id INT PRIMARY KEY, message_list TEXT);'
)
con.commit()
return con
async def add_messages(self, messages: bytes):
await self._asyncify(
self._execute,
'INSERT INTO messages (message_list) VALUES (?);',
messages,
commit=True,
)
await self._asyncify(self.con.commit)
async def get_messages(self) -> list[ModelMessage]:
c = await self._asyncify(
self._execute, 'SELECT message_list FROM messages order by id'
)
rows = await self._asyncify(c.fetchall)
messages: list[ModelMessage] = []
for row in rows:
messages.extend(ModelMessagesTypeAdapter.validate_json(row[0]))
return messages
def _execute(
self, sql: LiteralString, *args: Any, commit: bool = False
) -> sqlite3.Cursor:
cur = self.con.cursor()
cur.execute(sql, args)
if commit:
self.con.commit()
return cur
async def _asyncify(
self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
) -> R:
return await self._loop.run_in_executor( # type: ignore
self._executor,
partial(func, **kwargs),
*args, # type: ignore
)
if __name__ == '__main__':
import uvicorn
uvicorn.run(
'pydantic_ai_examples.chat_app:app', reload=True, reload_dirs=[str(THIS_DIR)]
)
| 1,797
|
117e7bfbcdbf15349873910cd210d9c05a54b0e5e20c6c3fd28e2f6f930d85cb
| 30.367257
| 112
| 0.631683
| 3.944908
| false
| false
| false
| false
|
andrewyng/aisuite
|
aisuite/providers/azure_provider.py
| 5,389
| 0
|
MIT License
|
import urllib.request
import json
import os
from aisuite.provider import Provider
from aisuite.framework import ChatCompletionResponse
from aisuite.framework.message import Message, ChatCompletionMessageToolCall, Function
# Azure provider is based on the documentation here -
# https://learn.microsoft.com/en-us/azure/machine-learning/reference-model-inference-api?view=azureml-api-2&source=recommendations&tabs=python
# Azure AI Model Inference API is used.
# From the documentation -
# """
# The Azure AI Model Inference is an API that exposes a common set of capabilities for foundational models
# and that can be used by developers to consume predictions from a diverse set of models in a uniform and consistent way.
# Developers can talk with different models deployed in Azure AI Foundry portal without changing the underlying code they are using.
#
# The Azure AI Model Inference API is available in the following models:
#
# Models deployed to serverless API endpoints:
# Cohere Embed V3 family of models
# Cohere Command R family of models
# Meta Llama 2 chat family of models
# Meta Llama 3 instruct family of models
# Mistral-Small
# Mistral-Large
# Jais family of models
# Jamba family of models
# Phi-3 family of models
#
# Models deployed to managed inference:
# Meta Llama 3 instruct family of models
# Phi-3 family of models
# Mixtral famility of models
#
# The API is compatible with Azure OpenAI model deployments.
# """
class AzureMessageConverter:
@staticmethod
def convert_request(messages):
"""Convert messages to Azure format."""
transformed_messages = []
for message in messages:
if isinstance(message, Message):
transformed_messages.append(message.model_dump(mode="json"))
else:
transformed_messages.append(message)
return transformed_messages
@staticmethod
def convert_response(resp_json) -> ChatCompletionResponse:
"""Normalize the response from the Azure API to match OpenAI's response format."""
completion_response = ChatCompletionResponse()
choice = resp_json["choices"][0]
message = choice["message"]
# Set basic message content
completion_response.choices[0].message.content = message.get("content")
completion_response.choices[0].message.role = message.get("role", "assistant")
# Handle tool calls if present
if "tool_calls" in message and message["tool_calls"] is not None:
tool_calls = []
for tool_call in message["tool_calls"]:
new_tool_call = ChatCompletionMessageToolCall(
id=tool_call["id"],
type=tool_call["type"],
function={
"name": tool_call["function"]["name"],
"arguments": tool_call["function"]["arguments"],
},
)
tool_calls.append(new_tool_call)
completion_response.choices[0].message.tool_calls = tool_calls
return completion_response
class AzureProvider(Provider):
def __init__(self, **config):
self.base_url = config.get("base_url") or os.getenv("AZURE_BASE_URL")
self.api_key = config.get("api_key") or os.getenv("AZURE_API_KEY")
self.api_version = config.get("api_version") or os.getenv("AZURE_API_VERSION")
if not self.api_key:
raise ValueError("For Azure, api_key is required.")
if not self.base_url:
raise ValueError(
"For Azure, base_url is required. Check your deployment page for a URL like this - https://<model-deployment-name>.<region>.models.ai.azure.com"
)
self.transformer = AzureMessageConverter()
def chat_completions_create(self, model, messages, **kwargs):
url = f"{self.base_url}/chat/completions"
if self.api_version:
url = f"{url}?api-version={self.api_version}"
# Remove 'stream' from kwargs if present
kwargs.pop("stream", None)
# Transform messages using converter
transformed_messages = self.transformer.convert_request(messages)
# Prepare the request payload
data = {"messages": transformed_messages}
# Add tools if provided
if "tools" in kwargs:
data["tools"] = kwargs["tools"]
kwargs.pop("tools")
# Add tool_choice if provided
if "tool_choice" in kwargs:
data["tool_choice"] = kwargs["tool_choice"]
kwargs.pop("tool_choice")
# Add remaining kwargs
data.update(kwargs)
body = json.dumps(data).encode("utf-8")
headers = {"Content-Type": "application/json", "Authorization": self.api_key}
try:
req = urllib.request.Request(url, body, headers)
with urllib.request.urlopen(req) as response:
result = response.read()
resp_json = json.loads(result)
return self.transformer.convert_response(resp_json)
except urllib.error.HTTPError as error:
error_message = f"The request failed with status code: {error.code}\n"
error_message += f"Headers: {error.info()}\n"
error_message += error.read().decode("utf-8", "ignore")
raise Exception(error_message)
| 1,283
|
bb77d5901c84d67cd902db73b9b6d664b69f96c21a326b96fdde55c4cec92a4e
| 38.625
| 160
| 0.641492
| 4.200312
| false
| false
| false
| false
|
abus-aikorea/voice-pro
|
cosyvoice/hifigan/hifigan.py
| 3,231
| 0
|
MIT License
|
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from matcha.hifigan.models import feature_loss, generator_loss, discriminator_loss
from cosyvoice.utils.losses import tpr_loss, mel_loss
class HiFiGan(nn.Module):
def __init__(self, generator, discriminator, mel_spec_transform,
multi_mel_spectral_recon_loss_weight=45, feat_match_loss_weight=2.0,
tpr_loss_weight=1.0, tpr_loss_tau=0.04):
super(HiFiGan, self).__init__()
self.generator = generator
self.discriminator = discriminator
self.mel_spec_transform = mel_spec_transform
self.multi_mel_spectral_recon_loss_weight = multi_mel_spectral_recon_loss_weight
self.feat_match_loss_weight = feat_match_loss_weight
self.tpr_loss_weight = tpr_loss_weight
self.tpr_loss_tau = tpr_loss_tau
def forward(
self,
batch: dict,
device: torch.device,
) -> Dict[str, Optional[torch.Tensor]]:
if batch['turn'] == 'generator':
return self.forward_generator(batch, device)
else:
return self.forward_discriminator(batch, device)
def forward_generator(self, batch, device):
real_speech = batch['speech'].to(device)
pitch_feat = batch['pitch_feat'].to(device)
# 1. calculate generator outputs
generated_speech, generated_f0 = self.generator(batch, device)
# 2. calculate discriminator outputs
y_d_rs, y_d_gs, fmap_rs, fmap_gs = self.discriminator(real_speech, generated_speech)
# 3. calculate generator losses, feature loss, mel loss, tpr losses [Optional]
loss_gen, _ = generator_loss(y_d_gs)
loss_fm = feature_loss(fmap_rs, fmap_gs)
loss_mel = mel_loss(real_speech, generated_speech, self.mel_spec_transform)
if self.tpr_loss_weight != 0:
loss_tpr = tpr_loss(y_d_rs, y_d_gs, self.tpr_loss_tau)
else:
loss_tpr = torch.zeros(1).to(device)
loss_f0 = F.l1_loss(generated_f0, pitch_feat)
loss = loss_gen + self.feat_match_loss_weight * loss_fm + \
self.multi_mel_spectral_recon_loss_weight * loss_mel + \
self.tpr_loss_weight * loss_tpr + loss_f0
return {'loss': loss, 'loss_gen': loss_gen, 'loss_fm': loss_fm, 'loss_mel': loss_mel, 'loss_tpr': loss_tpr, 'loss_f0': loss_f0}
def forward_discriminator(self, batch, device):
real_speech = batch['speech'].to(device)
# 1. calculate generator outputs
with torch.no_grad():
generated_speech, generated_f0 = self.generator(batch, device)
# 2. calculate discriminator outputs
y_d_rs, y_d_gs, fmap_rs, fmap_gs = self.discriminator(real_speech, generated_speech)
# 3. calculate discriminator losses, tpr losses [Optional]
loss_disc, _, _ = discriminator_loss(y_d_rs, y_d_gs)
if self.tpr_loss_weight != 0:
loss_tpr = tpr_loss(y_d_rs, y_d_gs, self.tpr_loss_tau)
else:
loss_tpr = torch.zeros(1).to(device)
loss = loss_disc + self.tpr_loss_weight * loss_tpr
return {'loss': loss, 'loss_disc': loss_disc, 'loss_tpr': loss_tpr}
| 980
|
716181c498e85d58bdcb09d9b079a52d38a73aa18df3b9bc582c4e4015e91d3e
| 47.223881
| 135
| 0.626741
| 3.296939
| false
| false
| false
| false
|
microsoft/TRELLIS
|
dataset_toolkits/render.py
| 4,987
| 0
|
MIT License
|
import os
import json
import copy
import sys
import importlib
import argparse
import pandas as pd
from easydict import EasyDict as edict
from functools import partial
from subprocess import DEVNULL, call
import numpy as np
from utils import sphere_hammersley_sequence
BLENDER_LINK = 'https://download.blender.org/release/Blender3.0/blender-3.0.1-linux-x64.tar.xz'
BLENDER_INSTALLATION_PATH = '/tmp'
BLENDER_PATH = f'{BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64/blender'
def _install_blender():
if not os.path.exists(BLENDER_PATH):
os.system('sudo apt-get update')
os.system('sudo apt-get install -y libxrender1 libxi6 libxkbcommon-x11-0 libsm6')
os.system(f'wget {BLENDER_LINK} -P {BLENDER_INSTALLATION_PATH}')
os.system(f'tar -xvf {BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64.tar.xz -C {BLENDER_INSTALLATION_PATH}')
def _render(file_path, sha256, output_dir, num_views):
output_folder = os.path.join(output_dir, 'renders', sha256)
# Build camera {yaw, pitch, radius, fov}
yaws = []
pitchs = []
offset = (np.random.rand(), np.random.rand())
for i in range(num_views):
y, p = sphere_hammersley_sequence(i, num_views, offset)
yaws.append(y)
pitchs.append(p)
radius = [2] * num_views
fov = [40 / 180 * np.pi] * num_views
views = [{'yaw': y, 'pitch': p, 'radius': r, 'fov': f} for y, p, r, f in zip(yaws, pitchs, radius, fov)]
args = [
BLENDER_PATH, '-b', '-P', os.path.join(os.path.dirname(__file__), 'blender_script', 'render.py'),
'--',
'--views', json.dumps(views),
'--object', os.path.expanduser(file_path),
'--resolution', '512',
'--output_folder', output_folder,
'--engine', 'CYCLES',
'--save_mesh',
]
if file_path.endswith('.blend'):
args.insert(1, file_path)
call(args, stdout=DEVNULL, stderr=DEVNULL)
if os.path.exists(os.path.join(output_folder, 'transforms.json')):
return {'sha256': sha256, 'rendered': True}
if __name__ == '__main__':
dataset_utils = importlib.import_module(f'datasets.{sys.argv[1]}')
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', type=str, required=True,
help='Directory to save the metadata')
parser.add_argument('--filter_low_aesthetic_score', type=float, default=None,
help='Filter objects with aesthetic score lower than this value')
parser.add_argument('--instances', type=str, default=None,
help='Instances to process')
parser.add_argument('--num_views', type=int, default=150,
help='Number of views to render')
dataset_utils.add_args(parser)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--world_size', type=int, default=1)
parser.add_argument('--max_workers', type=int, default=8)
opt = parser.parse_args(sys.argv[2:])
opt = edict(vars(opt))
os.makedirs(os.path.join(opt.output_dir, 'renders'), exist_ok=True)
# install blender
print('Checking blender...', flush=True)
_install_blender()
# get file list
if not os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')):
raise ValueError('metadata.csv not found')
metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv'))
if opt.instances is None:
metadata = metadata[metadata['local_path'].notna()]
if opt.filter_low_aesthetic_score is not None:
metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score]
if 'rendered' in metadata.columns:
metadata = metadata[metadata['rendered'] == False]
else:
if os.path.exists(opt.instances):
with open(opt.instances, 'r') as f:
instances = f.read().splitlines()
else:
instances = opt.instances.split(',')
metadata = metadata[metadata['sha256'].isin(instances)]
start = len(metadata) * opt.rank // opt.world_size
end = len(metadata) * (opt.rank + 1) // opt.world_size
metadata = metadata[start:end]
records = []
# filter out objects that are already processed
for sha256 in copy.copy(metadata['sha256'].values):
if os.path.exists(os.path.join(opt.output_dir, 'renders', sha256, 'transforms.json')):
records.append({'sha256': sha256, 'rendered': True})
metadata = metadata[metadata['sha256'] != sha256]
print(f'Processing {len(metadata)} objects...')
# process objects
func = partial(_render, output_dir=opt.output_dir, num_views=opt.num_views)
rendered = dataset_utils.foreach_instance(metadata, opt.output_dir, func, max_workers=opt.max_workers, desc='Rendering objects')
rendered = pd.concat([rendered, pd.DataFrame.from_records(records)])
rendered.to_csv(os.path.join(opt.output_dir, f'rendered_{opt.rank}.csv'), index=False)
| 1,468
|
2eb4df7f64c1afeb37a4a24fecb5022caab25d165163f937a684650ce71b1df5
| 40.214876
| 132
| 0.63465
| 3.397139
| false
| false
| false
| false
|
virattt/ai-hedge-fund
|
src/agents/risk_manager.py
| 3,134
| 0
|
MIT License
|
from langchain_core.messages import HumanMessage
from graph.state import AgentState, show_agent_reasoning
from utils.progress import progress
from tools.api import get_prices, prices_to_df
import json
##### Risk Management Agent #####
def risk_management_agent(state: AgentState):
"""Controls position sizing based on real-world risk factors for multiple tickers."""
portfolio = state["data"]["portfolio"]
data = state["data"]
tickers = data["tickers"]
# Initialize risk analysis for each ticker
risk_analysis = {}
current_prices = {} # Store prices here to avoid redundant API calls
for ticker in tickers:
progress.update_status("risk_management_agent", ticker, "Analyzing price data")
prices = get_prices(
ticker=ticker,
start_date=data["start_date"],
end_date=data["end_date"],
)
if not prices:
progress.update_status("risk_management_agent", ticker, "Failed: No price data found")
continue
prices_df = prices_to_df(prices)
progress.update_status("risk_management_agent", ticker, "Calculating position limits")
# Calculate portfolio value
current_price = prices_df["close"].iloc[-1]
current_prices[ticker] = current_price # Store the current price
# Calculate current position value for this ticker
current_position_value = portfolio.get("cost_basis", {}).get(ticker, 0)
# Calculate total portfolio value using stored prices
total_portfolio_value = portfolio.get("cash", 0) + sum(portfolio.get("cost_basis", {}).get(t, 0) for t in portfolio.get("cost_basis", {}))
# Base limit is 20% of portfolio for any single position
position_limit = total_portfolio_value * 0.20
# For existing positions, subtract current position value from limit
remaining_position_limit = position_limit - current_position_value
# Ensure we don't exceed available cash
max_position_size = min(remaining_position_limit, portfolio.get("cash", 0))
risk_analysis[ticker] = {
"remaining_position_limit": float(max_position_size),
"current_price": float(current_price),
"reasoning": {
"portfolio_value": float(total_portfolio_value),
"current_position": float(current_position_value),
"position_limit": float(position_limit),
"remaining_limit": float(remaining_position_limit),
"available_cash": float(portfolio.get("cash", 0)),
},
}
progress.update_status("risk_management_agent", ticker, "Done")
message = HumanMessage(
content=json.dumps(risk_analysis),
name="risk_management_agent",
)
if state["metadata"]["show_reasoning"]:
show_agent_reasoning(risk_analysis, "Risk Management Agent")
# Add the signal to the analyst_signals list
state["data"]["analyst_signals"]["risk_management_agent"] = risk_analysis
return {
"messages": state["messages"] + [message],
"data": data,
}
| 755
|
0d385b11ce5d2ed419046e28178e23589399756435c42ab1ef58233b214d7537
| 36.759036
| 146
| 0.638162
| 4.150993
| false
| false
| false
| false
|
mannaandpoem/OpenManus
|
app/prompt/mcp.py
| 2,168
| 0
|
MIT License
|
"""Prompts for the MCP Agent."""
SYSTEM_PROMPT = """You are an AI assistant with access to a Model Context Protocol (MCP) server.
You can use the tools provided by the MCP server to complete tasks.
The MCP server will dynamically expose tools that you can use - always check the available tools first.
When using an MCP tool:
1. Choose the appropriate tool based on your task requirements
2. Provide properly formatted arguments as required by the tool
3. Observe the results and use them to determine next steps
4. Tools may change during operation - new tools might appear or existing ones might disappear
Follow these guidelines:
- Call tools with valid parameters as documented in their schemas
- Handle errors gracefully by understanding what went wrong and trying again with corrected parameters
- For multimedia responses (like images), you'll receive a description of the content
- Complete user requests step by step, using the most appropriate tools
- If multiple tools need to be called in sequence, make one call at a time and wait for results
Remember to clearly explain your reasoning and actions to the user.
"""
NEXT_STEP_PROMPT = """Based on the current state and available tools, what should be done next?
Think step by step about the problem and identify which MCP tool would be most helpful for the current stage.
If you've already made progress, consider what additional information you need or what actions would move you closer to completing the task.
"""
# Additional specialized prompts
TOOL_ERROR_PROMPT = """You encountered an error with the tool '{tool_name}'.
Try to understand what went wrong and correct your approach.
Common issues include:
- Missing or incorrect parameters
- Invalid parameter formats
- Using a tool that's no longer available
- Attempting an operation that's not supported
Please check the tool specifications and try again with corrected parameters.
"""
MULTIMEDIA_RESPONSE_PROMPT = """You've received a multimedia response (image, audio, etc.) from the tool '{tool_name}'.
This content has been processed and described for you.
Use this information to continue the task or provide insights to the user.
"""
| 489
|
2895060dbaccfb7ff99aacbad3eaca1cc95a6cc03789b36fff3d473077158f76
| 49.418605
| 140
| 0.788284
| 4.433538
| false
| false
| false
| false
|
openai/openai-agents-python
|
src/agents/mcp/server.py
| 11,269
| 0
|
MIT License
|
from __future__ import annotations
import abc
import asyncio
from contextlib import AbstractAsyncContextManager, AsyncExitStack
from pathlib import Path
from typing import Any, Literal
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client
from mcp.client.sse import sse_client
from mcp.types import CallToolResult, JSONRPCMessage
from typing_extensions import NotRequired, TypedDict
from ..exceptions import UserError
from ..logger import logger
class MCPServer(abc.ABC):
"""Base class for Model Context Protocol servers."""
@abc.abstractmethod
async def connect(self):
"""Connect to the server. For example, this might mean spawning a subprocess or
opening a network connection. The server is expected to remain connected until
`cleanup()` is called.
"""
pass
@property
@abc.abstractmethod
def name(self) -> str:
"""A readable name for the server."""
pass
@abc.abstractmethod
async def cleanup(self):
"""Cleanup the server. For example, this might mean closing a subprocess or
closing a network connection.
"""
pass
@abc.abstractmethod
async def list_tools(self) -> list[MCPTool]:
"""List the tools available on the server."""
pass
@abc.abstractmethod
async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult:
"""Invoke a tool on the server."""
pass
class _MCPServerWithClientSession(MCPServer, abc.ABC):
"""Base class for MCP servers that use a `ClientSession` to communicate with the server."""
def __init__(self, cache_tools_list: bool):
"""
Args:
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
cached and only fetched from the server once. If `False`, the tools list will be
fetched from the server on each call to `list_tools()`. The cache can be invalidated
by calling `invalidate_tools_cache()`. You should set this to `True` if you know the
server will not change its tools list, because it can drastically improve latency
(by avoiding a round-trip to the server every time).
"""
self.session: ClientSession | None = None
self.exit_stack: AsyncExitStack = AsyncExitStack()
self._cleanup_lock: asyncio.Lock = asyncio.Lock()
self.cache_tools_list = cache_tools_list
# The cache is always dirty at startup, so that we fetch tools at least once
self._cache_dirty = True
self._tools_list: list[MCPTool] | None = None
@abc.abstractmethod
def create_streams(
self,
) -> AbstractAsyncContextManager[
tuple[
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
MemoryObjectSendStream[JSONRPCMessage],
]
]:
"""Create the streams for the server."""
pass
async def __aenter__(self):
await self.connect()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.cleanup()
def invalidate_tools_cache(self):
"""Invalidate the tools cache."""
self._cache_dirty = True
async def connect(self):
"""Connect to the server."""
try:
transport = await self.exit_stack.enter_async_context(self.create_streams())
read, write = transport
session = await self.exit_stack.enter_async_context(ClientSession(read, write))
await session.initialize()
self.session = session
except Exception as e:
logger.error(f"Error initializing MCP server: {e}")
await self.cleanup()
raise
async def list_tools(self) -> list[MCPTool]:
"""List the tools available on the server."""
if not self.session:
raise UserError("Server not initialized. Make sure you call `connect()` first.")
# Return from cache if caching is enabled, we have tools, and the cache is not dirty
if self.cache_tools_list and not self._cache_dirty and self._tools_list:
return self._tools_list
# Reset the cache dirty to False
self._cache_dirty = False
# Fetch the tools from the server
self._tools_list = (await self.session.list_tools()).tools
return self._tools_list
async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult:
"""Invoke a tool on the server."""
if not self.session:
raise UserError("Server not initialized. Make sure you call `connect()` first.")
return await self.session.call_tool(tool_name, arguments)
async def cleanup(self):
"""Cleanup the server."""
async with self._cleanup_lock:
try:
await self.exit_stack.aclose()
self.session = None
except Exception as e:
logger.error(f"Error cleaning up server: {e}")
class MCPServerStdioParams(TypedDict):
"""Mirrors `mcp.client.stdio.StdioServerParameters`, but lets you pass params without another
import.
"""
command: str
"""The executable to run to start the server. For example, `python` or `node`."""
args: NotRequired[list[str]]
"""Command line args to pass to the `command` executable. For example, `['foo.py']` or
`['server.js', '--port', '8080']`."""
env: NotRequired[dict[str, str]]
"""The environment variables to set for the server. ."""
cwd: NotRequired[str | Path]
"""The working directory to use when spawning the process."""
encoding: NotRequired[str]
"""The text encoding used when sending/receiving messages to the server. Defaults to `utf-8`."""
encoding_error_handler: NotRequired[Literal["strict", "ignore", "replace"]]
"""The text encoding error handler. Defaults to `strict`.
See https://docs.python.org/3/library/codecs.html#codec-base-classes for
explanations of possible values.
"""
class MCPServerStdio(_MCPServerWithClientSession):
"""MCP server implementation that uses the stdio transport. See the [spec]
(https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#stdio) for
details.
"""
def __init__(
self,
params: MCPServerStdioParams,
cache_tools_list: bool = False,
name: str | None = None,
):
"""Create a new MCP server based on the stdio transport.
Args:
params: The params that configure the server. This includes the command to run to
start the server, the args to pass to the command, the environment variables to
set for the server, the working directory to use when spawning the process, and
the text encoding used when sending/receiving messages to the server.
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
cached and only fetched from the server once. If `False`, the tools list will be
fetched from the server on each call to `list_tools()`. The cache can be
invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
if you know the server will not change its tools list, because it can drastically
improve latency (by avoiding a round-trip to the server every time).
name: A readable name for the server. If not provided, we'll create one from the
command.
"""
super().__init__(cache_tools_list)
self.params = StdioServerParameters(
command=params["command"],
args=params.get("args", []),
env=params.get("env"),
cwd=params.get("cwd"),
encoding=params.get("encoding", "utf-8"),
encoding_error_handler=params.get("encoding_error_handler", "strict"),
)
self._name = name or f"stdio: {self.params.command}"
def create_streams(
self,
) -> AbstractAsyncContextManager[
tuple[
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
MemoryObjectSendStream[JSONRPCMessage],
]
]:
"""Create the streams for the server."""
return stdio_client(self.params)
@property
def name(self) -> str:
"""A readable name for the server."""
return self._name
class MCPServerSseParams(TypedDict):
"""Mirrors the params in`mcp.client.sse.sse_client`."""
url: str
"""The URL of the server."""
headers: NotRequired[dict[str, str]]
"""The headers to send to the server."""
timeout: NotRequired[float]
"""The timeout for the HTTP request. Defaults to 5 seconds."""
sse_read_timeout: NotRequired[float]
"""The timeout for the SSE connection, in seconds. Defaults to 5 minutes."""
class MCPServerSse(_MCPServerWithClientSession):
"""MCP server implementation that uses the HTTP with SSE transport. See the [spec]
(https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse)
for details.
"""
def __init__(
self,
params: MCPServerSseParams,
cache_tools_list: bool = False,
name: str | None = None,
):
"""Create a new MCP server based on the HTTP with SSE transport.
Args:
params: The params that configure the server. This includes the URL of the server,
the headers to send to the server, the timeout for the HTTP request, and the
timeout for the SSE connection.
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
cached and only fetched from the server once. If `False`, the tools list will be
fetched from the server on each call to `list_tools()`. The cache can be
invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
if you know the server will not change its tools list, because it can drastically
improve latency (by avoiding a round-trip to the server every time).
name: A readable name for the server. If not provided, we'll create one from the
URL.
"""
super().__init__(cache_tools_list)
self.params = params
self._name = name or f"sse: {self.params['url']}"
def create_streams(
self,
) -> AbstractAsyncContextManager[
tuple[
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
MemoryObjectSendStream[JSONRPCMessage],
]
]:
"""Create the streams for the server."""
return sse_client(
url=self.params["url"],
headers=self.params.get("headers", None),
timeout=self.params.get("timeout", 5),
sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
)
@property
def name(self) -> str:
"""A readable name for the server."""
return self._name
| 2,590
|
5c7447b73641d615b024ab62f85417efcbb14c71734ed5e18f27b28ba4e440c0
| 36.438538
| 100
| 0.630934
| 4.350965
| false
| false
| false
| false
|
HKUDS/LightRAG
|
examples/lightrag_lmdeploy_demo.py
| 3,084
| 0
|
MIT License
|
import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.lmdeploy import lmdeploy_model_if_cache
from lightrag.llm.hf import hf_embed
from lightrag.utils import EmbeddingFunc
from transformers import AutoModel, AutoTokenizer
from lightrag.kg.shared_storage import initialize_pipeline_status
import asyncio
import nest_asyncio
nest_asyncio.apply()
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
async def lmdeploy_model_complete(
prompt=None,
system_prompt=None,
history_messages=[],
keyword_extraction=False,
**kwargs,
) -> str:
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
return await lmdeploy_model_if_cache(
model_name,
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
## please specify chat_template if your local path does not follow original HF file name,
## or model_name is a pytorch model on huggingface.co,
## you can refer to https://github.com/InternLM/lmdeploy/blob/main/lmdeploy/model.py
## for a list of chat_template available in lmdeploy.
chat_template="llama3",
# model_format ='awq', # if you are using awq quantization model.
# quant_policy=8, # if you want to use online kv cache, 4=kv int4, 8=kv int8.
**kwargs,
)
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=lmdeploy_model_complete,
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=5000,
func=lambda texts: hf_embed(
texts,
tokenizer=AutoTokenizer.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
embed_model=AutoModel.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
),
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Test different query modes
print("\nNaive Search:")
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="naive")
)
)
print("\nLocal Search:")
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="local")
)
)
print("\nGlobal Search:")
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="global")
)
)
print("\nHybrid Search:")
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
)
)
if __name__ == "__main__":
main()
| 830
|
7060bfe8585221b8f13ecb15228e23d63e2b7f0675006386af836aa432d8e1f8
| 27.036364
| 102
| 0.613489
| 3.715663
| false
| false
| false
| false
|
trycua/cua
|
libs/agent/agent/providers/omni/tools/bash.py
| 2,241
| 0
|
MIT License
|
"""Bash tool for Omni provider."""
import logging
from typing import Any, Dict
from computer import Computer
from ....core.tools import ToolResult, ToolError
from .base import BaseOmniTool
logger = logging.getLogger(__name__)
class BashTool(BaseOmniTool):
"""Tool for executing bash commands."""
name = "bash"
description = "Execute bash commands on the system"
def __init__(self, computer: Computer):
"""Initialize the bash tool.
Args:
computer: Computer instance
"""
super().__init__()
self.computer = computer
def to_params(self) -> Dict[str, Any]:
"""Convert tool to API parameters.
Returns:
Dictionary with tool parameters
"""
return {
"type": "function",
"function": {
"name": self.name,
"description": self.description,
"parameters": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The bash command to execute",
},
},
"required": ["command"],
},
},
}
async def __call__(self, **kwargs) -> ToolResult:
"""Execute bash command.
Args:
**kwargs: Command parameters
Returns:
Tool execution result
"""
try:
command = kwargs.get("command", "")
if not command:
return ToolResult(error="No command specified")
# The true implementation would use the actual method to run terminal commands
# Since we're getting linter errors, we'll just implement a placeholder that will
# be replaced with the correct implementation when this tool is fully integrated
logger.info(f"Would execute command: {command}")
return ToolResult(output=f"Command executed (placeholder): {command}")
except Exception as e:
logger.error(f"Error in bash tool: {str(e)}")
return ToolResult(error=f"Error: {str(e)}")
| 447
|
4b360f31416828784b743e168c5f6a6c5bc07795114d25933dd3d68da0f6af28
| 29.283784
| 93
| 0.525658
| 5.013423
| false
| false
| false
| false
|
docling-project/docling
|
docling/cli/models.py
| 3,979
| 0
|
MIT License
|
import logging
import warnings
from enum import Enum
from pathlib import Path
from typing import Annotated, Optional
import typer
from rich.console import Console
from rich.logging import RichHandler
from docling.datamodel.settings import settings
from docling.utils.model_downloader import download_models
warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic|torch")
warnings.filterwarnings(action="ignore", category=FutureWarning, module="easyocr")
console = Console()
err_console = Console(stderr=True)
app = typer.Typer(
name="Docling models helper",
no_args_is_help=True,
add_completion=False,
pretty_exceptions_enable=False,
)
class _AvailableModels(str, Enum):
LAYOUT = "layout"
TABLEFORMER = "tableformer"
CODE_FORMULA = "code_formula"
PICTURE_CLASSIFIER = "picture_classifier"
SMOLVLM = "smolvlm"
GRANITE_VISION = "granite_vision"
EASYOCR = "easyocr"
_default_models = [
_AvailableModels.LAYOUT,
_AvailableModels.TABLEFORMER,
_AvailableModels.CODE_FORMULA,
_AvailableModels.PICTURE_CLASSIFIER,
_AvailableModels.EASYOCR,
]
@app.command("download")
def download(
output_dir: Annotated[
Path,
typer.Option(
...,
"-o",
"--output-dir",
help="The directory where to download the models.",
),
] = (settings.cache_dir / "models"),
force: Annotated[
bool, typer.Option(..., help="If true, the download will be forced.")
] = False,
models: Annotated[
Optional[list[_AvailableModels]],
typer.Argument(
help=f"Models to download (default behavior: a predefined set of models will be downloaded).",
),
] = None,
all: Annotated[
bool,
typer.Option(
...,
"--all",
help="If true, all available models will be downloaded (mutually exclusive with passing specific models).",
show_default=True,
),
] = False,
quiet: Annotated[
bool,
typer.Option(
...,
"-q",
"--quiet",
help="No extra output is generated, the CLI prints only the directory with the cached models.",
),
] = False,
):
if models and all:
raise typer.BadParameter(
"Cannot simultaneously set 'all' parameter and specify models to download."
)
if not quiet:
FORMAT = "%(message)s"
logging.basicConfig(
level=logging.INFO,
format="[blue]%(message)s[/blue]",
datefmt="[%X]",
handlers=[RichHandler(show_level=False, show_time=False, markup=True)],
)
to_download = models or ([m for m in _AvailableModels] if all else _default_models)
output_dir = download_models(
output_dir=output_dir,
force=force,
progress=(not quiet),
with_layout=_AvailableModels.LAYOUT in to_download,
with_tableformer=_AvailableModels.TABLEFORMER in to_download,
with_code_formula=_AvailableModels.CODE_FORMULA in to_download,
with_picture_classifier=_AvailableModels.PICTURE_CLASSIFIER in to_download,
with_smolvlm=_AvailableModels.SMOLVLM in to_download,
with_granite_vision=_AvailableModels.GRANITE_VISION in to_download,
with_easyocr=_AvailableModels.EASYOCR in to_download,
)
if quiet:
typer.echo(output_dir)
else:
typer.secho(f"\nModels downloaded into: {output_dir}.", fg="green")
console.print(
"\n",
"Docling can now be configured for running offline using the local artifacts.\n\n",
"Using the CLI:",
f"`docling --artifacts-path={output_dir} FILE`",
"\n",
"Using Python: see the documentation at <https://docling-project.github.io/docling/usage>.",
)
click_app = typer.main.get_command(app)
if __name__ == "__main__":
app()
| 1,044
|
4bf25380ce323f6d78a58d8015a32d760c07c01b890bbbc5141d4a998e113809
| 29.374046
| 119
| 0.624277
| 3.811303
| false
| false
| false
| false
|
pydantic/pydantic-ai
|
pydantic_graph/pydantic_graph/nodes.py
| 7,441
| 0
|
MIT License
|
from __future__ import annotations as _annotations
import copy
from abc import ABC, abstractmethod
from dataclasses import dataclass, is_dataclass
from functools import cache
from typing import Any, ClassVar, Generic, get_type_hints
from uuid import uuid4
from typing_extensions import Never, Self, TypeVar, get_origin
from . import _utils, exceptions
__all__ = 'GraphRunContext', 'BaseNode', 'End', 'Edge', 'NodeDef', 'DepsT', 'StateT', 'RunEndT'
StateT = TypeVar('StateT', default=None)
"""Type variable for the state in a graph."""
RunEndT = TypeVar('RunEndT', covariant=True, default=None)
"""Covariant type variable for the return type of a graph [`run`][pydantic_graph.graph.Graph.run]."""
NodeRunEndT = TypeVar('NodeRunEndT', covariant=True, default=Never)
"""Covariant type variable for the return type of a node [`run`][pydantic_graph.nodes.BaseNode.run]."""
DepsT = TypeVar('DepsT', default=None, contravariant=True)
"""Type variable for the dependencies of a graph and node."""
@dataclass
class GraphRunContext(Generic[StateT, DepsT]):
"""Context for a graph."""
# TODO: Can we get rid of this struct and just pass both these things around..?
state: StateT
"""The state of the graph."""
deps: DepsT
"""Dependencies for the graph."""
class BaseNode(ABC, Generic[StateT, DepsT, NodeRunEndT]):
"""Base class for a node."""
docstring_notes: ClassVar[bool] = False
"""Set to `True` to generate mermaid diagram notes from the class's docstring.
While this can add valuable information to the diagram, it can make diagrams harder to view, hence
it is disabled by default. You can also customise notes overriding the
[`get_note`][pydantic_graph.nodes.BaseNode.get_note] method.
"""
@abstractmethod
async def run(self, ctx: GraphRunContext[StateT, DepsT]) -> BaseNode[StateT, DepsT, Any] | End[NodeRunEndT]:
"""Run the node.
This is an abstract method that must be implemented by subclasses.
!!! note "Return types used at runtime"
The return type of this method are read by `pydantic_graph` at runtime and used to define which
nodes can be called next in the graph. This is displayed in [mermaid diagrams](mermaid.md)
and enforced when running the graph.
Args:
ctx: The graph context.
Returns:
The next node to run or [`End`][pydantic_graph.nodes.End] to signal the end of the graph.
"""
...
def get_snapshot_id(self) -> str:
if snapshot_id := getattr(self, '__snapshot_id', None):
return snapshot_id
else:
self.__dict__['__snapshot_id'] = snapshot_id = generate_snapshot_id(self.get_node_id())
return snapshot_id
def set_snapshot_id(self, snapshot_id: str) -> None:
self.__dict__['__snapshot_id'] = snapshot_id
@classmethod
@cache
def get_node_id(cls) -> str:
"""Get the ID of the node."""
return cls.__name__
@classmethod
def get_note(cls) -> str | None:
"""Get a note about the node to render on mermaid charts.
By default, this returns a note only if [`docstring_notes`][pydantic_graph.nodes.BaseNode.docstring_notes]
is `True`. You can override this method to customise the node notes.
"""
if not cls.docstring_notes:
return None
docstring = cls.__doc__
# dataclasses get an automatic docstring which is just their signature, we don't want that
if docstring and is_dataclass(cls) and docstring.startswith(f'{cls.__name__}('):
docstring = None
if docstring:
# remove indentation from docstring
import inspect
docstring = inspect.cleandoc(docstring)
return docstring
@classmethod
def get_node_def(cls, local_ns: dict[str, Any] | None) -> NodeDef[StateT, DepsT, NodeRunEndT]:
"""Get the node definition."""
type_hints = get_type_hints(cls.run, localns=local_ns, include_extras=True)
try:
return_hint = type_hints['return']
except KeyError as e:
raise exceptions.GraphSetupError(f'Node {cls} is missing a return type hint on its `run` method') from e
next_node_edges: dict[str, Edge] = {}
end_edge: Edge | None = None
returns_base_node: bool = False
for return_type in _utils.get_union_args(return_hint):
return_type, annotations = _utils.unpack_annotated(return_type)
edge = next((a for a in annotations if isinstance(a, Edge)), Edge(None))
return_type_origin = get_origin(return_type) or return_type
if return_type_origin is End:
end_edge = edge
elif return_type_origin is BaseNode:
# TODO: Should we disallow this?
returns_base_node = True
elif issubclass(return_type_origin, BaseNode):
next_node_edges[return_type.get_node_id()] = edge
else:
raise exceptions.GraphSetupError(f'Invalid return type: {return_type}')
return NodeDef(
cls,
cls.get_node_id(),
cls.get_note(),
next_node_edges,
end_edge,
returns_base_node,
)
def deep_copy(self) -> Self:
"""Returns a deep copy of the node."""
return copy.deepcopy(self)
@dataclass
class End(Generic[RunEndT]):
"""Type to return from a node to signal the end of the graph."""
data: RunEndT
"""Data to return from the graph."""
def deep_copy_data(self) -> End[RunEndT]:
"""Returns a deep copy of the end of the run."""
if self.data is None:
return self
else:
end = End(copy.deepcopy(self.data))
end.set_snapshot_id(self.get_snapshot_id())
return end
def get_snapshot_id(self) -> str:
if snapshot_id := getattr(self, '__snapshot_id', None):
return snapshot_id
else:
self.__dict__['__snapshot_id'] = snapshot_id = generate_snapshot_id('end')
return snapshot_id
def set_snapshot_id(self, set_id: str) -> None:
self.__dict__['__snapshot_id'] = set_id
def generate_snapshot_id(node_id: str) -> str:
# module method to allow mocking
return f'{node_id}:{uuid4().hex}'
@dataclass
class Edge:
"""Annotation to apply a label to an edge in a graph."""
label: str | None
"""Label for the edge."""
@dataclass
class NodeDef(Generic[StateT, DepsT, NodeRunEndT]):
"""Definition of a node.
This is a primarily internal representation of a node; in general, it shouldn't be necessary to use it directly.
Used by [`Graph`][pydantic_graph.graph.Graph] to store information about a node, and when generating
mermaid graphs.
"""
node: type[BaseNode[StateT, DepsT, NodeRunEndT]]
"""The node definition itself."""
node_id: str
"""ID of the node."""
note: str | None
"""Note about the node to render on mermaid charts."""
next_node_edges: dict[str, Edge]
"""IDs of the nodes that can be called next."""
end_edge: Edge | None
"""If node definition returns an `End` this is an Edge, indicating the node can end the run."""
returns_base_node: bool
"""The node definition returns a `BaseNode`, hence any node in the next can be called next."""
| 1,940
|
04547dc2906c3c2e41966a5628c3be271f8d2add12e9cf50ce7a62fc2ac5111d
| 35.121359
| 116
| 0.628276
| 3.835567
| false
| false
| false
| false
|
microsoft/TRELLIS
|
app.py
| 15,010
| 0
|
MIT License
|
import gradio as gr
from gradio_litmodel3d import LitModel3D
import os
import shutil
from typing import *
import torch
import numpy as np
import imageio
from easydict import EasyDict as edict
from PIL import Image
from trellis.pipelines import TrellisImageTo3DPipeline
from trellis.representations import Gaussian, MeshExtractResult
from trellis.utils import render_utils, postprocessing_utils
MAX_SEED = np.iinfo(np.int32).max
TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
os.makedirs(TMP_DIR, exist_ok=True)
def start_session(req: gr.Request):
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
os.makedirs(user_dir, exist_ok=True)
def end_session(req: gr.Request):
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
shutil.rmtree(user_dir)
def preprocess_image(image: Image.Image) -> Image.Image:
"""
Preprocess the input image.
Args:
image (Image.Image): The input image.
Returns:
Image.Image: The preprocessed image.
"""
processed_image = pipeline.preprocess_image(image)
return processed_image
def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image]:
"""
Preprocess a list of input images.
Args:
images (List[Tuple[Image.Image, str]]): The input images.
Returns:
List[Image.Image]: The preprocessed images.
"""
images = [image[0] for image in images]
processed_images = [pipeline.preprocess_image(image) for image in images]
return processed_images
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
return {
'gaussian': {
**gs.init_params,
'_xyz': gs._xyz.cpu().numpy(),
'_features_dc': gs._features_dc.cpu().numpy(),
'_scaling': gs._scaling.cpu().numpy(),
'_rotation': gs._rotation.cpu().numpy(),
'_opacity': gs._opacity.cpu().numpy(),
},
'mesh': {
'vertices': mesh.vertices.cpu().numpy(),
'faces': mesh.faces.cpu().numpy(),
},
}
def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
gs = Gaussian(
aabb=state['gaussian']['aabb'],
sh_degree=state['gaussian']['sh_degree'],
mininum_kernel_size=state['gaussian']['mininum_kernel_size'],
scaling_bias=state['gaussian']['scaling_bias'],
opacity_bias=state['gaussian']['opacity_bias'],
scaling_activation=state['gaussian']['scaling_activation'],
)
gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')
gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')
gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')
gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')
gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')
mesh = edict(
vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),
faces=torch.tensor(state['mesh']['faces'], device='cuda'),
)
return gs, mesh
def get_seed(randomize_seed: bool, seed: int) -> int:
"""
Get the random seed.
"""
return np.random.randint(0, MAX_SEED) if randomize_seed else seed
def image_to_3d(
image: Image.Image,
multiimages: List[Tuple[Image.Image, str]],
is_multiimage: bool,
seed: int,
ss_guidance_strength: float,
ss_sampling_steps: int,
slat_guidance_strength: float,
slat_sampling_steps: int,
multiimage_algo: Literal["multidiffusion", "stochastic"],
req: gr.Request,
) -> Tuple[dict, str]:
"""
Convert an image to a 3D model.
Args:
image (Image.Image): The input image.
multiimages (List[Tuple[Image.Image, str]]): The input images in multi-image mode.
is_multiimage (bool): Whether is in multi-image mode.
seed (int): The random seed.
ss_guidance_strength (float): The guidance strength for sparse structure generation.
ss_sampling_steps (int): The number of sampling steps for sparse structure generation.
slat_guidance_strength (float): The guidance strength for structured latent generation.
slat_sampling_steps (int): The number of sampling steps for structured latent generation.
multiimage_algo (Literal["multidiffusion", "stochastic"]): The algorithm for multi-image generation.
Returns:
dict: The information of the generated 3D model.
str: The path to the video of the 3D model.
"""
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
if not is_multiimage:
outputs = pipeline.run(
image,
seed=seed,
formats=["gaussian", "mesh"],
preprocess_image=False,
sparse_structure_sampler_params={
"steps": ss_sampling_steps,
"cfg_strength": ss_guidance_strength,
},
slat_sampler_params={
"steps": slat_sampling_steps,
"cfg_strength": slat_guidance_strength,
},
)
else:
outputs = pipeline.run_multi_image(
[image[0] for image in multiimages],
seed=seed,
formats=["gaussian", "mesh"],
preprocess_image=False,
sparse_structure_sampler_params={
"steps": ss_sampling_steps,
"cfg_strength": ss_guidance_strength,
},
slat_sampler_params={
"steps": slat_sampling_steps,
"cfg_strength": slat_guidance_strength,
},
mode=multiimage_algo,
)
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
video_path = os.path.join(user_dir, 'sample.mp4')
imageio.mimsave(video_path, video, fps=15)
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])
torch.cuda.empty_cache()
return state, video_path
def extract_glb(
state: dict,
mesh_simplify: float,
texture_size: int,
req: gr.Request,
) -> Tuple[str, str]:
"""
Extract a GLB file from the 3D model.
Args:
state (dict): The state of the generated 3D model.
mesh_simplify (float): The mesh simplification factor.
texture_size (int): The texture resolution.
Returns:
str: The path to the extracted GLB file.
"""
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
gs, mesh = unpack_state(state)
glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
glb_path = os.path.join(user_dir, 'sample.glb')
glb.export(glb_path)
torch.cuda.empty_cache()
return glb_path, glb_path
def extract_gaussian(state: dict, req: gr.Request) -> Tuple[str, str]:
"""
Extract a Gaussian file from the 3D model.
Args:
state (dict): The state of the generated 3D model.
Returns:
str: The path to the extracted Gaussian file.
"""
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
gs, _ = unpack_state(state)
gaussian_path = os.path.join(user_dir, 'sample.ply')
gs.save_ply(gaussian_path)
torch.cuda.empty_cache()
return gaussian_path, gaussian_path
def prepare_multi_example() -> List[Image.Image]:
multi_case = list(set([i.split('_')[0] for i in os.listdir("assets/example_multi_image")]))
images = []
for case in multi_case:
_images = []
for i in range(1, 4):
img = Image.open(f'assets/example_multi_image/{case}_{i}.png')
W, H = img.size
img = img.resize((int(W / H * 512), 512))
_images.append(np.array(img))
images.append(Image.fromarray(np.concatenate(_images, axis=1)))
return images
def split_image(image: Image.Image) -> List[Image.Image]:
"""
Split an image into multiple views.
"""
image = np.array(image)
alpha = image[..., 3]
alpha = np.any(alpha>0, axis=0)
start_pos = np.where(~alpha[:-1] & alpha[1:])[0].tolist()
end_pos = np.where(alpha[:-1] & ~alpha[1:])[0].tolist()
images = []
for s, e in zip(start_pos, end_pos):
images.append(Image.fromarray(image[:, s:e+1]))
return [preprocess_image(image) for image in images]
with gr.Blocks(delete_cache=(600, 600)) as demo:
gr.Markdown("""
## Image to 3D Asset with [TRELLIS](https://trellis3d.github.io/)
* Upload an image and click "Generate" to create a 3D asset. If the image has alpha channel, it be used as the mask. Otherwise, we use `rembg` to remove the background.
* If you find the generated 3D asset satisfactory, click "Extract GLB" to extract the GLB file and download it.
""")
with gr.Row():
with gr.Column():
with gr.Tabs() as input_tabs:
with gr.Tab(label="Single Image", id=0) as single_image_input_tab:
image_prompt = gr.Image(label="Image Prompt", format="png", image_mode="RGBA", type="pil", height=300)
with gr.Tab(label="Multiple Images", id=1) as multiimage_input_tab:
multiimage_prompt = gr.Gallery(label="Image Prompt", format="png", type="pil", height=300, columns=3)
gr.Markdown("""
Input different views of the object in separate images.
*NOTE: this is an experimental algorithm without training a specialized model. It may not produce the best results for all images, especially those having different poses or inconsistent details.*
""")
with gr.Accordion(label="Generation Settings", open=False):
seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
gr.Markdown("Stage 1: Sparse Structure Generation")
with gr.Row():
ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
gr.Markdown("Stage 2: Structured Latent Generation")
with gr.Row():
slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
multiimage_algo = gr.Radio(["stochastic", "multidiffusion"], label="Multi-image Algorithm", value="stochastic")
generate_btn = gr.Button("Generate")
with gr.Accordion(label="GLB Extraction Settings", open=False):
mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01)
texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
with gr.Row():
extract_glb_btn = gr.Button("Extract GLB", interactive=False)
extract_gs_btn = gr.Button("Extract Gaussian", interactive=False)
gr.Markdown("""
*NOTE: Gaussian file can be very large (~50MB), it will take a while to display and download.*
""")
with gr.Column():
video_output = gr.Video(label="Generated 3D Asset", autoplay=True, loop=True, height=300)
model_output = LitModel3D(label="Extracted GLB/Gaussian", exposure=10.0, height=300)
with gr.Row():
download_glb = gr.DownloadButton(label="Download GLB", interactive=False)
download_gs = gr.DownloadButton(label="Download Gaussian", interactive=False)
is_multiimage = gr.State(False)
output_buf = gr.State()
# Example images at the bottom of the page
with gr.Row() as single_image_example:
examples = gr.Examples(
examples=[
f'assets/example_image/{image}'
for image in os.listdir("assets/example_image")
],
inputs=[image_prompt],
fn=preprocess_image,
outputs=[image_prompt],
run_on_click=True,
examples_per_page=64,
)
with gr.Row(visible=False) as multiimage_example:
examples_multi = gr.Examples(
examples=prepare_multi_example(),
inputs=[image_prompt],
fn=split_image,
outputs=[multiimage_prompt],
run_on_click=True,
examples_per_page=8,
)
# Handlers
demo.load(start_session)
demo.unload(end_session)
single_image_input_tab.select(
lambda: tuple([False, gr.Row.update(visible=True), gr.Row.update(visible=False)]),
outputs=[is_multiimage, single_image_example, multiimage_example]
)
multiimage_input_tab.select(
lambda: tuple([True, gr.Row.update(visible=False), gr.Row.update(visible=True)]),
outputs=[is_multiimage, single_image_example, multiimage_example]
)
image_prompt.upload(
preprocess_image,
inputs=[image_prompt],
outputs=[image_prompt],
)
multiimage_prompt.upload(
preprocess_images,
inputs=[multiimage_prompt],
outputs=[multiimage_prompt],
)
generate_btn.click(
get_seed,
inputs=[randomize_seed, seed],
outputs=[seed],
).then(
image_to_3d,
inputs=[image_prompt, multiimage_prompt, is_multiimage, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps, multiimage_algo],
outputs=[output_buf, video_output],
).then(
lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
outputs=[extract_glb_btn, extract_gs_btn],
)
video_output.clear(
lambda: tuple([gr.Button(interactive=False), gr.Button(interactive=False)]),
outputs=[extract_glb_btn, extract_gs_btn],
)
extract_glb_btn.click(
extract_glb,
inputs=[output_buf, mesh_simplify, texture_size],
outputs=[model_output, download_glb],
).then(
lambda: gr.Button(interactive=True),
outputs=[download_glb],
)
extract_gs_btn.click(
extract_gaussian,
inputs=[output_buf],
outputs=[model_output, download_gs],
).then(
lambda: gr.Button(interactive=True),
outputs=[download_gs],
)
model_output.clear(
lambda: gr.Button(interactive=False),
outputs=[download_glb],
)
# Launch the Gradio app
if __name__ == "__main__":
pipeline = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
pipeline.cuda()
demo.launch()
| 4,028
|
8cc8961079862042896c7772129e44dd212fc2a654606b810238afd334ee958d
| 36.245658
| 220
| 0.60473
| 3.726415
| false
| false
| false
| false
|
HKUDS/LightRAG
|
lightrag/kg/qdrant_impl.py
| 12,731
| 0
|
MIT License
|
import asyncio
import os
from typing import Any, final, List
from dataclasses import dataclass
import numpy as np
import hashlib
import uuid
from ..utils import logger
from ..base import BaseVectorStorage
import configparser
import pipmaster as pm
if not pm.is_installed("qdrant-client"):
pm.install("qdrant-client")
from qdrant_client import QdrantClient, models # type: ignore
config = configparser.ConfigParser()
config.read("config.ini", "utf-8")
def compute_mdhash_id_for_qdrant(
content: str, prefix: str = "", style: str = "simple"
) -> str:
"""
Generate a UUID based on the content and support multiple formats.
:param content: The content used to generate the UUID.
:param style: The format of the UUID, optional values are "simple", "hyphenated", "urn".
:return: A UUID that meets the requirements of Qdrant.
"""
if not content:
raise ValueError("Content must not be empty.")
# Use the hash value of the content to create a UUID.
hashed_content = hashlib.sha256((prefix + content).encode("utf-8")).digest()
generated_uuid = uuid.UUID(bytes=hashed_content[:16], version=4)
# Return the UUID according to the specified format.
if style == "simple":
return generated_uuid.hex
elif style == "hyphenated":
return str(generated_uuid)
elif style == "urn":
return f"urn:uuid:{generated_uuid}"
else:
raise ValueError("Invalid style. Choose from 'simple', 'hyphenated', or 'urn'.")
@final
@dataclass
class QdrantVectorDBStorage(BaseVectorStorage):
@staticmethod
def create_collection_if_not_exist(
client: QdrantClient, collection_name: str, **kwargs
):
if client.collection_exists(collection_name):
return
client.create_collection(collection_name, **kwargs)
def __post_init__(self):
kwargs = self.global_config.get("vector_db_storage_cls_kwargs", {})
cosine_threshold = kwargs.get("cosine_better_than_threshold")
if cosine_threshold is None:
raise ValueError(
"cosine_better_than_threshold must be specified in vector_db_storage_cls_kwargs"
)
self.cosine_better_than_threshold = cosine_threshold
self._client = QdrantClient(
url=os.environ.get(
"QDRANT_URL", config.get("qdrant", "uri", fallback=None)
),
api_key=os.environ.get(
"QDRANT_API_KEY", config.get("qdrant", "apikey", fallback=None)
),
)
self._max_batch_size = self.global_config["embedding_batch_num"]
QdrantVectorDBStorage.create_collection_if_not_exist(
self._client,
self.namespace,
vectors_config=models.VectorParams(
size=self.embedding_func.embedding_dim, distance=models.Distance.COSINE
),
)
async def upsert(self, data: dict[str, dict[str, Any]]) -> None:
logger.info(f"Inserting {len(data)} to {self.namespace}")
if not data:
return
list_data = [
{
"id": k,
**{k1: v1 for k1, v1 in v.items() if k1 in self.meta_fields},
}
for k, v in data.items()
]
contents = [v["content"] for v in data.values()]
batches = [
contents[i : i + self._max_batch_size]
for i in range(0, len(contents), self._max_batch_size)
]
embedding_tasks = [self.embedding_func(batch) for batch in batches]
embeddings_list = await asyncio.gather(*embedding_tasks)
embeddings = np.concatenate(embeddings_list)
list_points = []
for i, d in enumerate(list_data):
list_points.append(
models.PointStruct(
id=compute_mdhash_id_for_qdrant(d["id"]),
vector=embeddings[i],
payload=d,
)
)
results = self._client.upsert(
collection_name=self.namespace, points=list_points, wait=True
)
return results
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
embedding = await self.embedding_func([query])
results = self._client.search(
collection_name=self.namespace,
query_vector=embedding[0],
limit=top_k,
with_payload=True,
score_threshold=self.cosine_better_than_threshold,
)
logger.debug(f"query result: {results}")
return [{**dp.payload, "distance": dp.score} for dp in results]
async def index_done_callback(self) -> None:
# Qdrant handles persistence automatically
pass
async def delete(self, ids: List[str]) -> None:
"""Delete vectors with specified IDs
Args:
ids: List of vector IDs to be deleted
"""
try:
# Convert regular ids to Qdrant compatible ids
qdrant_ids = [compute_mdhash_id_for_qdrant(id) for id in ids]
# Delete points from the collection
self._client.delete(
collection_name=self.namespace,
points_selector=models.PointIdsList(
points=qdrant_ids,
),
wait=True,
)
logger.debug(
f"Successfully deleted {len(ids)} vectors from {self.namespace}"
)
except Exception as e:
logger.error(f"Error while deleting vectors from {self.namespace}: {e}")
async def delete_entity(self, entity_name: str) -> None:
"""Delete an entity by name
Args:
entity_name: Name of the entity to delete
"""
try:
# Generate the entity ID
entity_id = compute_mdhash_id_for_qdrant(entity_name, prefix="ent-")
logger.debug(
f"Attempting to delete entity {entity_name} with ID {entity_id}"
)
# Delete the entity point from the collection
self._client.delete(
collection_name=self.namespace,
points_selector=models.PointIdsList(
points=[entity_id],
),
wait=True,
)
logger.debug(f"Successfully deleted entity {entity_name}")
except Exception as e:
logger.error(f"Error deleting entity {entity_name}: {e}")
async def delete_entity_relation(self, entity_name: str) -> None:
"""Delete all relations associated with an entity
Args:
entity_name: Name of the entity whose relations should be deleted
"""
try:
# Find relations where the entity is either source or target
results = self._client.scroll(
collection_name=self.namespace,
scroll_filter=models.Filter(
should=[
models.FieldCondition(
key="src_id", match=models.MatchValue(value=entity_name)
),
models.FieldCondition(
key="tgt_id", match=models.MatchValue(value=entity_name)
),
]
),
with_payload=True,
limit=1000, # Adjust as needed for your use case
)
# Extract points that need to be deleted
relation_points = results[0]
ids_to_delete = [point.id for point in relation_points]
if ids_to_delete:
# Delete the relations
self._client.delete(
collection_name=self.namespace,
points_selector=models.PointIdsList(
points=ids_to_delete,
),
wait=True,
)
logger.debug(
f"Deleted {len(ids_to_delete)} relations for {entity_name}"
)
else:
logger.debug(f"No relations found for entity {entity_name}")
except Exception as e:
logger.error(f"Error deleting relations for {entity_name}: {e}")
async def search_by_prefix(self, prefix: str) -> list[dict[str, Any]]:
"""Search for records with IDs starting with a specific prefix.
Args:
prefix: The prefix to search for in record IDs
Returns:
List of records with matching ID prefixes
"""
try:
# Use scroll method to find records with IDs starting with the prefix
results = self._client.scroll(
collection_name=self.namespace,
scroll_filter=models.Filter(
must=[
models.FieldCondition(
key="id", match=models.MatchText(text=prefix, prefix=True)
)
]
),
with_payload=True,
with_vectors=False,
limit=1000, # Adjust as needed for your use case
)
# Extract matching points
matching_records = results[0]
# Format the results to match expected return format
formatted_results = [{**point.payload} for point in matching_records]
logger.debug(
f"Found {len(formatted_results)} records with prefix '{prefix}'"
)
return formatted_results
except Exception as e:
logger.error(f"Error searching for prefix '{prefix}': {e}")
return []
async def get_by_id(self, id: str) -> dict[str, Any] | None:
"""Get vector data by its ID
Args:
id: The unique identifier of the vector
Returns:
The vector data if found, or None if not found
"""
try:
# Convert to Qdrant compatible ID
qdrant_id = compute_mdhash_id_for_qdrant(id)
# Retrieve the point by ID
result = self._client.retrieve(
collection_name=self.namespace,
ids=[qdrant_id],
with_payload=True,
)
if not result:
return None
return result[0].payload
except Exception as e:
logger.error(f"Error retrieving vector data for ID {id}: {e}")
return None
async def get_by_ids(self, ids: list[str]) -> list[dict[str, Any]]:
"""Get multiple vector data by their IDs
Args:
ids: List of unique identifiers
Returns:
List of vector data objects that were found
"""
if not ids:
return []
try:
# Convert to Qdrant compatible IDs
qdrant_ids = [compute_mdhash_id_for_qdrant(id) for id in ids]
# Retrieve the points by IDs
results = self._client.retrieve(
collection_name=self.namespace,
ids=qdrant_ids,
with_payload=True,
)
return [point.payload for point in results]
except Exception as e:
logger.error(f"Error retrieving vector data for IDs {ids}: {e}")
return []
async def drop(self) -> dict[str, str]:
"""Drop all vector data from storage and clean up resources
This method will delete all data from the Qdrant collection.
Returns:
dict[str, str]: Operation status and message
- On success: {"status": "success", "message": "data dropped"}
- On failure: {"status": "error", "message": "<error details>"}
"""
try:
# Delete the collection and recreate it
if self._client.collection_exists(self.namespace):
self._client.delete_collection(self.namespace)
# Recreate the collection
QdrantVectorDBStorage.create_collection_if_not_exist(
self._client,
self.namespace,
vectors_config=models.VectorParams(
size=self.embedding_func.embedding_dim,
distance=models.Distance.COSINE,
),
)
logger.info(
f"Process {os.getpid()} drop Qdrant collection {self.namespace}"
)
return {"status": "success", "message": "data dropped"}
except Exception as e:
logger.error(f"Error dropping Qdrant collection {self.namespace}: {e}")
return {"status": "error", "message": str(e)}
| 2,865
|
94c25e4ff17b4b8a1e725088b7b1e2afa96e3ce2e9965b3a49a5b345701eb570
| 33.975275
| 96
| 0.548347
| 4.44363
| false
| false
| false
| false
|
pydantic/pydantic-ai
|
tests/typed_graph.py
| 3,602
| 0
|
MIT License
|
from __future__ import annotations as _annotations
from dataclasses import dataclass
from typing import Any
from typing_extensions import assert_type
from pydantic_graph import BaseNode, End, FullStatePersistence, Graph, GraphRunContext
from pydantic_graph.persistence import BaseStatePersistence
@dataclass
class Float2String(BaseNode):
input_data: float
async def run(self, ctx: GraphRunContext) -> String2Length:
return String2Length(str(self.input_data))
@dataclass
class String2Length(BaseNode):
input_data: str
async def run(self, ctx: GraphRunContext) -> Double:
return Double(len(self.input_data))
@dataclass
class X:
v: int
@dataclass
class Double(BaseNode[None, None, X]):
input_data: int
async def run(self, ctx: GraphRunContext) -> String2Length | End[X]:
if self.input_data == 7:
return String2Length('x' * 21)
else:
return End(X(self.input_data * 2))
def use_double(node: BaseNode[None, None, X]) -> None:
"""Shoe that `Double` is valid as a `BaseNode[None, int, X]`."""
print(node)
use_double(Double(1))
g1 = Graph[None, None, X](
nodes=(
Float2String,
String2Length,
Double,
)
)
assert_type(g1, Graph[None, None, X])
g2 = Graph(nodes=(Double,))
assert_type(g2, Graph[None, None, X])
g3 = Graph(
nodes=(
Float2String,
String2Length,
Double,
)
)
# because String2Length came before Double, the output type is Any
assert_type(g3, Graph[None, None, X])
Graph[None, bytes](nodes=(Float2String, String2Length, Double)) # type: ignore[arg-type]
Graph[None, str](nodes=[Double]) # type: ignore[list-item]
@dataclass
class MyState:
x: int
@dataclass
class MyDeps:
y: str
@dataclass
class A(BaseNode[MyState, MyDeps]):
async def run(self, ctx: GraphRunContext[MyState, MyDeps]) -> B:
assert ctx.state.x == 1
assert ctx.deps.y == 'y'
return B()
@dataclass
class B(BaseNode[MyState, MyDeps, int]):
async def run(self, ctx: GraphRunContext[MyState, MyDeps]) -> End[int]:
return End(42)
g4 = Graph[MyState, MyDeps, int](nodes=(A, B))
assert_type(g4, Graph[MyState, MyDeps, int])
g5 = Graph(nodes=(A, B))
assert_type(g5, Graph[MyState, MyDeps, int])
def run_g5() -> None:
g5.run_sync(A()) # pyright: ignore[reportArgumentType]
g5.run_sync(A(), state=MyState(x=1)) # pyright: ignore[reportArgumentType]
g5.run_sync(A(), deps=MyDeps(y='y')) # pyright: ignore[reportArgumentType]
result = g5.run_sync(A(), state=MyState(x=1), deps=MyDeps(y='y'))
assert_type(result.output, int)
def run_g6() -> None:
result = g5.run_sync(A(), state=MyState(x=1), deps=MyDeps(y='y'))
assert_type(result.output, int)
assert_type(result.persistence, BaseStatePersistence[MyState, int])
p = FullStatePersistence()
assert_type(p, FullStatePersistence[Any, Any])
def run_persistence_any() -> None:
p = FullStatePersistence()
result = g5.run_sync(A(), persistence=p, state=MyState(x=1), deps=MyDeps(y='y'))
assert_type(result.output, int)
assert_type(p, FullStatePersistence[Any, Any])
def run_persistence_right() -> None:
p = FullStatePersistence[MyState, int]()
result = g5.run_sync(A(), persistence=p, state=MyState(x=1), deps=MyDeps(y='y'))
assert_type(result.output, int)
assert_type(p, FullStatePersistence[MyState, int])
def run_persistence_wrong() -> None:
p = FullStatePersistence[str, int]()
g5.run_sync(A(), persistence=p, state=MyState(x=1), deps=MyDeps(y='y')) # type: ignore[arg-type]
| 1,188
|
d0d41101e0e18fe9e72dbd128cd72fdd07764e9508206f93cfcf7ef399e396bc
| 24.013889
| 101
| 0.664908
| 3.031987
| false
| false
| false
| false
|
openai/openai-agents-python
|
src/agents/mcp/util.py
| 5,232
| 0
|
MIT License
|
import functools
import json
from typing import TYPE_CHECKING, Any
from agents.strict_schema import ensure_strict_json_schema
from .. import _debug
from ..exceptions import AgentsException, ModelBehaviorError, UserError
from ..logger import logger
from ..run_context import RunContextWrapper
from ..tool import FunctionTool, Tool
from ..tracing import FunctionSpanData, get_current_span, mcp_tools_span
if TYPE_CHECKING:
from mcp.types import Tool as MCPTool
from .server import MCPServer
class MCPUtil:
"""Set of utilities for interop between MCP and Agents SDK tools."""
@classmethod
async def get_all_function_tools(
cls, servers: list["MCPServer"], convert_schemas_to_strict: bool
) -> list[Tool]:
"""Get all function tools from a list of MCP servers."""
tools = []
tool_names: set[str] = set()
for server in servers:
server_tools = await cls.get_function_tools(server, convert_schemas_to_strict)
server_tool_names = {tool.name for tool in server_tools}
if len(server_tool_names & tool_names) > 0:
raise UserError(
f"Duplicate tool names found across MCP servers: "
f"{server_tool_names & tool_names}"
)
tool_names.update(server_tool_names)
tools.extend(server_tools)
return tools
@classmethod
async def get_function_tools(
cls, server: "MCPServer", convert_schemas_to_strict: bool
) -> list[Tool]:
"""Get all function tools from a single MCP server."""
with mcp_tools_span(server=server.name) as span:
tools = await server.list_tools()
span.span_data.result = [tool.name for tool in tools]
return [cls.to_function_tool(tool, server, convert_schemas_to_strict) for tool in tools]
@classmethod
def to_function_tool(
cls, tool: "MCPTool", server: "MCPServer", convert_schemas_to_strict: bool
) -> FunctionTool:
"""Convert an MCP tool to an Agents SDK function tool."""
invoke_func = functools.partial(cls.invoke_mcp_tool, server, tool)
schema, is_strict = tool.inputSchema, False
# MCP spec doesn't require the inputSchema to have `properties`, but OpenAI spec does.
if "properties" not in schema:
schema["properties"] = {}
if convert_schemas_to_strict:
try:
schema = ensure_strict_json_schema(schema)
is_strict = True
except Exception as e:
logger.info(f"Error converting MCP schema to strict mode: {e}")
return FunctionTool(
name=tool.name,
description=tool.description or "",
params_json_schema=schema,
on_invoke_tool=invoke_func,
strict_json_schema=is_strict,
)
@classmethod
async def invoke_mcp_tool(
cls, server: "MCPServer", tool: "MCPTool", context: RunContextWrapper[Any], input_json: str
) -> str:
"""Invoke an MCP tool and return the result as a string."""
try:
json_data: dict[str, Any] = json.loads(input_json) if input_json else {}
except Exception as e:
if _debug.DONT_LOG_TOOL_DATA:
logger.debug(f"Invalid JSON input for tool {tool.name}")
else:
logger.debug(f"Invalid JSON input for tool {tool.name}: {input_json}")
raise ModelBehaviorError(
f"Invalid JSON input for tool {tool.name}: {input_json}"
) from e
if _debug.DONT_LOG_TOOL_DATA:
logger.debug(f"Invoking MCP tool {tool.name}")
else:
logger.debug(f"Invoking MCP tool {tool.name} with input {input_json}")
try:
result = await server.call_tool(tool.name, json_data)
except Exception as e:
logger.error(f"Error invoking MCP tool {tool.name}: {e}")
raise AgentsException(f"Error invoking MCP tool {tool.name}: {e}") from e
if _debug.DONT_LOG_TOOL_DATA:
logger.debug(f"MCP tool {tool.name} completed.")
else:
logger.debug(f"MCP tool {tool.name} returned {result}")
# The MCP tool result is a list of content items, whereas OpenAI tool outputs are a single
# string. We'll try to convert.
if len(result.content) == 1:
tool_output = result.content[0].model_dump_json()
elif len(result.content) > 1:
tool_output = json.dumps([item.model_dump() for item in result.content])
else:
logger.error(f"Errored MCP tool result: {result}")
tool_output = "Error running tool."
current_span = get_current_span()
if current_span:
if isinstance(current_span.span_data, FunctionSpanData):
current_span.span_data.output = tool_output
current_span.span_data.mcp_data = {
"server": server.name,
}
else:
logger.warning(
f"Current span is not a FunctionSpanData, skipping tool output: {current_span}"
)
return tool_output
| 1,301
|
3b9b5eaae075d2473dafbdc33c8c6f22618c291f4db71d14acf72e00efcff2cb
| 37.470588
| 99
| 0.600344
| 4.021522
| false
| false
| false
| false
|
infinition/Bjorn
|
resources/waveshare_epd/epd2in7.py
| 18,685
| 0
|
MIT License
|
# *****************************************************************************
# * | File : epd2in7.py
# * | Author : Waveshare team
# * | Function : Electronic paper driver
# * | Info :
# *----------------
# * | This version: V4.0
# * | Date : 2019-06-20
# # | Info : python demo
# -----------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from . import epdconfig
# Display resolution
EPD_WIDTH = 176
EPD_HEIGHT = 264
GRAY1 = 0xff #white
GRAY2 = 0xC0
GRAY3 = 0x80 #gray
GRAY4 = 0x00 #Blackest
logger = logging.getLogger(__name__)
class EPD:
def __init__(self):
self.is_initialized = False # New flag to track if the display has been initialized #INFINITION
self.reset_pin = epdconfig.RST_PIN
self.dc_pin = epdconfig.DC_PIN
self.busy_pin = epdconfig.BUSY_PIN
self.cs_pin = epdconfig.CS_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
self.GRAY1 = GRAY1 #white
self.GRAY2 = GRAY2
self.GRAY3 = GRAY3 #gray
self.GRAY4 = GRAY4 #Blackest
lut_vcom_dc = [0x00, 0x00,
0x00, 0x08, 0x00, 0x00, 0x00, 0x02,
0x60, 0x28, 0x28, 0x00, 0x00, 0x01,
0x00, 0x14, 0x00, 0x00, 0x00, 0x01,
0x00, 0x12, 0x12, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00
]
lut_ww = [
0x40, 0x08, 0x00, 0x00, 0x00, 0x02,
0x90, 0x28, 0x28, 0x00, 0x00, 0x01,
0x40, 0x14, 0x00, 0x00, 0x00, 0x01,
0xA0, 0x12, 0x12, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_bw = [
0x40, 0x08, 0x00, 0x00, 0x00, 0x02,
0x90, 0x28, 0x28, 0x00, 0x00, 0x01,
0x40, 0x14, 0x00, 0x00, 0x00, 0x01,
0xA0, 0x12, 0x12, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_bb = [
0x80, 0x08, 0x00, 0x00, 0x00, 0x02,
0x90, 0x28, 0x28, 0x00, 0x00, 0x01,
0x80, 0x14, 0x00, 0x00, 0x00, 0x01,
0x50, 0x12, 0x12, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_wb = [
0x80, 0x08, 0x00, 0x00, 0x00, 0x02,
0x90, 0x28, 0x28, 0x00, 0x00, 0x01,
0x80, 0x14, 0x00, 0x00, 0x00, 0x01,
0x50, 0x12, 0x12, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
###################full screen update LUT######################
#0~3 gray
gray_lut_vcom = [
0x00, 0x00,
0x00, 0x0A, 0x00, 0x00, 0x00, 0x01,
0x60, 0x14, 0x14, 0x00, 0x00, 0x01,
0x00, 0x14, 0x00, 0x00, 0x00, 0x01,
0x00, 0x13, 0x0A, 0x01, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
#R21
gray_lut_ww =[
0x40, 0x0A, 0x00, 0x00, 0x00, 0x01,
0x90, 0x14, 0x14, 0x00, 0x00, 0x01,
0x10, 0x14, 0x0A, 0x00, 0x00, 0x01,
0xA0, 0x13, 0x01, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
#R22H r
gray_lut_bw =[
0x40, 0x0A, 0x00, 0x00, 0x00, 0x01,
0x90, 0x14, 0x14, 0x00, 0x00, 0x01,
0x00, 0x14, 0x0A, 0x00, 0x00, 0x01,
0x99, 0x0C, 0x01, 0x03, 0x04, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
#R23H w
gray_lut_wb =[
0x40, 0x0A, 0x00, 0x00, 0x00, 0x01,
0x90, 0x14, 0x14, 0x00, 0x00, 0x01,
0x00, 0x14, 0x0A, 0x00, 0x00, 0x01,
0x99, 0x0B, 0x04, 0x04, 0x01, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
#R24H b
gray_lut_bb =[
0x80, 0x0A, 0x00, 0x00, 0x00, 0x01,
0x90, 0x14, 0x14, 0x00, 0x00, 0x01,
0x20, 0x14, 0x0A, 0x00, 0x00, 0x01,
0x50, 0x13, 0x01, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
# Hardware reset
def reset(self):
epdconfig.digital_write(self.reset_pin, 1)
epdconfig.delay_ms(200)
epdconfig.digital_write(self.reset_pin, 0)
epdconfig.delay_ms(5)
epdconfig.digital_write(self.reset_pin, 1)
epdconfig.delay_ms(200)
def send_command(self, command):
epdconfig.digital_write(self.dc_pin, 0)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte([command])
epdconfig.digital_write(self.cs_pin, 1)
def send_data(self, data):
epdconfig.digital_write(self.dc_pin, 1)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte([data])
epdconfig.digital_write(self.cs_pin, 1)
def ReadBusy(self):
logger.debug("e-Paper busy")
while(epdconfig.digital_read(self.busy_pin) == 0): # 0: idle, 1: busy
epdconfig.delay_ms(200)
logger.debug("e-Paper busy release")
def set_lut(self):
self.send_command(0x20) # vcom
for count in range(0, 44):
self.send_data(self.lut_vcom_dc[count])
self.send_command(0x21) # ww --
for count in range(0, 42):
self.send_data(self.lut_ww[count])
self.send_command(0x22) # bw r
for count in range(0, 42):
self.send_data(self.lut_bw[count])
self.send_command(0x23) # wb w
for count in range(0, 42):
self.send_data(self.lut_bb[count])
self.send_command(0x24) # bb b
for count in range(0, 42):
self.send_data(self.lut_wb[count])
def gray_SetLut(self):
self.send_command(0x20)
for count in range(0, 44): #vcom
self.send_data(self.gray_lut_vcom[count])
self.send_command(0x21) #red not use
for count in range(0, 42):
self.send_data(self.gray_lut_ww[count])
self.send_command(0x22) #bw r
for count in range(0, 42):
self.send_data(self.gray_lut_bw[count])
self.send_command(0x23) #wb w
for count in range(0, 42):
self.send_data(self.gray_lut_wb[count])
self.send_command(0x24) #bb b
for count in range(0, 42):
self.send_data(self.gray_lut_bb[count])
self.send_command(0x25) #vcom
for count in range(0, 42):
self.send_data(self.gray_lut_ww[count])
def init(self):
if not self.is_initialized: # Avoid repeated initialization and accumulation of File descriptors #INFINITION
if epdconfig.module_init() != 0:
return -1
self.reset()
self.send_command(0x01) # POWER_SETTING
self.send_data(0x03) # VDS_EN, VDG_EN
self.send_data(0x00) # VCOM_HV, VGHL_LV[1], VGHL_LV[0]
self.send_data(0x2b) # VDH
self.send_data(0x2b) # VDL
self.send_data(0x09) # VDHR
self.send_command(0x06) # BOOSTER_SOFT_START
self.send_data(0x07)
self.send_data(0x07)
self.send_data(0x17)
# Power optimization
self.send_command(0xF8)
self.send_data(0x60)
self.send_data(0xA5)
# Power optimization
self.send_command(0xF8)
self.send_data(0x89)
self.send_data(0xA5)
# Power optimization
self.send_command(0xF8)
self.send_data(0x90)
self.send_data(0x00)
# Power optimization
self.send_command(0xF8)
self.send_data(0x93)
self.send_data(0x2A)
# Power optimization
self.send_command(0xF8)
self.send_data(0xA0)
self.send_data(0xA5)
# Power optimization
self.send_command(0xF8)
self.send_data(0xA1)
self.send_data(0x00)
# Power optimization
self.send_command(0xF8)
self.send_data(0x73)
self.send_data(0x41)
self.send_command(0x16) # PARTIAL_DISPLAY_REFRESH
self.send_data(0x00)
self.send_command(0x04) # POWER_ON
self.ReadBusy()
self.send_command(0x00) # PANEL_SETTING
self.send_data(0xAF) # KW-BF KWR-AF BWROTP 0f
self.send_command(0x30) # PLL_CONTROL
self.send_data(0x3A) # 3A 100HZ 29 150Hz 39 200HZ 31 171HZ
self.send_command(0X50) #VCOM AND DATA INTERVAL SETTING
self.send_data(0x57)
self.send_command(0x82) # VCM_DC_SETTING_REGISTER
self.send_data(0x12)
self.set_lut()
return 0
def Init_4Gray(self):
if (epdconfig.module_init() != 0):
return -1
self.reset()
self.send_command(0x01) #POWER SETTING
self.send_data (0x03)
self.send_data (0x00)
self.send_data (0x2b)
self.send_data (0x2b)
self.send_command(0x06) #booster soft start
self.send_data (0x07) #A
self.send_data (0x07) #B
self.send_data (0x17) #C
self.send_command(0xF8) #boost??
self.send_data (0x60)
self.send_data (0xA5)
self.send_command(0xF8) #boost??
self.send_data (0x89)
self.send_data (0xA5)
self.send_command(0xF8) #boost??
self.send_data (0x90)
self.send_data (0x00)
self.send_command(0xF8) #boost??
self.send_data (0x93)
self.send_data (0x2A)
self.send_command(0xF8) #boost??
self.send_data (0xa0)
self.send_data (0xa5)
self.send_command(0xF8) #boost??
self.send_data (0xa1)
self.send_data (0x00)
self.send_command(0xF8) #boost??
self.send_data (0x73)
self.send_data (0x41)
self.send_command(0x16)
self.send_data(0x00)
self.send_command(0x04)
self.ReadBusy()
self.send_command(0x00) #panel setting
self.send_data(0xbf) #KW-BF KWR-AF BWROTP 0f
self.send_command(0x30) #PLL setting
self.send_data (0x90) #100hz
self.send_command(0x61) #resolution setting
self.send_data (0x00) #176
self.send_data (0xb0)
self.send_data (0x01) #264
self.send_data (0x08)
self.send_command(0x82) #vcom_DC setting
self.send_data (0x12)
self.send_command(0X50) #VCOM AND DATA INTERVAL SETTING
self.send_data(0x57)
def getbuffer(self, image):
# logger.debug("bufsiz = ",int(self.width/8) * self.height)
buf = [0xFF] * (int(self.width/8) * self.height)
image_monocolor = image.convert('1')
imwidth, imheight = image_monocolor.size
pixels = image_monocolor.load()
# logger.debug("imwidth = %d, imheight = %d",imwidth,imheight)
if(imwidth == self.width and imheight == self.height):
logger.debug("Vertical")
for y in range(imheight):
for x in range(imwidth):
# Set the bits for the column of pixels at the current position.
if pixels[x, y] == 0:
buf[int((x + y * self.width) / 8)] &= ~(0x80 >> (x % 8))
elif(imwidth == self.height and imheight == self.width):
logger.debug("Horizontal")
for y in range(imheight):
for x in range(imwidth):
newx = y
newy = self.height - x - 1
if pixels[x, y] == 0:
buf[int((newx + newy*self.width) / 8)] &= ~(0x80 >> (y % 8))
return buf
def getbuffer_4Gray(self, image):
# logger.debug("bufsiz = ",int(self.width/8) * self.height)
buf = [0xFF] * (int(self.width / 4) * self.height)
image_monocolor = image.convert('L')
imwidth, imheight = image_monocolor.size
pixels = image_monocolor.load()
i=0
# logger.debug("imwidth = %d, imheight = %d",imwidth,imheight)
if(imwidth == self.width and imheight == self.height):
logger.debug("Vertical")
for y in range(imheight):
for x in range(imwidth):
# Set the bits for the column of pixels at the current position.
if(pixels[x, y] == 0xC0):
pixels[x, y] = 0x80
elif (pixels[x, y] == 0x80):
pixels[x, y] = 0x40
i= i+1
if(i%4 == 0):
buf[int((x + (y * self.width))/4)] = ((pixels[x-3, y]&0xc0) | (pixels[x-2, y]&0xc0)>>2 | (pixels[x-1, y]&0xc0)>>4 | (pixels[x, y]&0xc0)>>6)
elif(imwidth == self.height and imheight == self.width):
logger.debug("Horizontal")
for x in range(imwidth):
for y in range(imheight):
newx = y
newy = self.height - x - 1
if(pixels[x, y] == 0xC0):
pixels[x, y] = 0x80
elif (pixels[x, y] == 0x80):
pixels[x, y] = 0x40
i= i+1
if(i%4 == 0):
buf[int((newx + (newy * self.width))/4)] = ((pixels[x, y-3]&0xc0) | (pixels[x, y-2]&0xc0)>>2 | (pixels[x, y-1]&0xc0)>>4 | (pixels[x, y]&0xc0)>>6)
return buf
def display(self, image):
self.send_command(0x10)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(0xFF)
self.send_command(0x13)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(image[i])
self.send_command(0x12)
self.ReadBusy()
def display_4Gray(self, image):
self.send_command(0x10)
for i in range(0, 5808): #5808*4 46464
temp3=0
for j in range(0, 2):
temp1 = image[i*2+j]
for k in range(0, 2):
temp2 = temp1&0xC0
if(temp2 == 0xC0):
temp3 |= 0x01#white
elif(temp2 == 0x00):
temp3 |= 0x00 #black
elif(temp2 == 0x80):
temp3 |= 0x01 #gray1
else: #0x40
temp3 |= 0x00 #gray2
temp3 <<= 1
temp1 <<= 2
temp2 = temp1&0xC0
if(temp2 == 0xC0): #white
temp3 |= 0x01
elif(temp2 == 0x00): #black
temp3 |= 0x00
elif(temp2 == 0x80):
temp3 |= 0x01 #gray1
else : #0x40
temp3 |= 0x00 #gray2
if(j!=1 or k!=1):
temp3 <<= 1
temp1 <<= 2
self.send_data(temp3)
self.send_command(0x13)
for i in range(0, 5808): #5808*4 46464
temp3=0
for j in range(0, 2):
temp1 = image[i*2+j]
for k in range(0, 2):
temp2 = temp1&0xC0
if(temp2 == 0xC0):
temp3 |= 0x01#white
elif(temp2 == 0x00):
temp3 |= 0x00 #black
elif(temp2 == 0x80):
temp3 |= 0x00 #gray1
else: #0x40
temp3 |= 0x01 #gray2
temp3 <<= 1
temp1 <<= 2
temp2 = temp1&0xC0
if(temp2 == 0xC0): #white
temp3 |= 0x01
elif(temp2 == 0x00): #black
temp3 |= 0x00
elif(temp2 == 0x80):
temp3 |= 0x00 #gray1
else: #0x40
temp3 |= 0x01 #gray2
if(j!=1 or k!=1):
temp3 <<= 1
temp1 <<= 2
self.send_data(temp3)
self.gray_SetLut()
self.send_command(0x12)
epdconfig.delay_ms(200)
self.ReadBusy()
# pass
def Clear(self, color=0xFF):
self.send_command(0x10)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(color)
self.send_command(0x13)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(color)
self.send_command(0x12)
self.ReadBusy()
def sleep(self):
self.send_command(0X50)
self.send_data(0xf7)
self.send_command(0X02)
self.send_command(0X07)
self.send_data(0xA5)
epdconfig.delay_ms(2000)
epdconfig.module_exit()
### END OF FILE ###
| 6,451
|
e523610daafa017173588d352c9bf44eb84bc8e48f87ced1259781b23df1dd47
| 34.455408
| 170
| 0.510035
| 2.89645
| false
| false
| false
| false
|
browser-use/browser-use
|
examples/integrations/slack/slack_example.py
| 1,359
| 0
|
MIT License
|
import os
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI
from pydantic import SecretStr
from browser_use import BrowserConfig
from examples.integrations.slack.slack_api import SlackBot, app
load_dotenv()
# load credentials from environment variables
bot_token = os.getenv('SLACK_BOT_TOKEN')
if not bot_token:
raise ValueError('Slack bot token not found in .env file.')
signing_secret = os.getenv('SLACK_SIGNING_SECRET')
if not signing_secret:
raise ValueError('Slack signing secret not found in .env file.')
api_key = os.getenv('GEMINI_API_KEY')
if not api_key:
raise ValueError('GEMINI_API_KEY is not set')
llm = ChatGoogleGenerativeAI(model='gemini-2.0-flash-exp', api_key=SecretStr(api_key))
slack_bot = SlackBot(
llm=llm, # required; instance of BaseChatModel
bot_token=bot_token, # required; Slack bot token
signing_secret=signing_secret, # required; Slack signing secret
ack=True, # optional; whether to acknowledge task receipt with a message, defaults to False
browser_config=BrowserConfig(
headless=True
), # optional; useful for changing headless mode or other browser configs, defaults to headless mode
)
app.dependency_overrides[SlackBot] = lambda: slack_bot
if __name__ == '__main__':
import uvicorn
uvicorn.run('integrations.slack.slack_api:app', host='0.0.0.0', port=3000)
| 435
|
2972c252bf780263e35a636f12f3b23ee0f9c952a752b149f63a8014cb7ff23f
| 31.357143
| 102
| 0.762325
| 3.124138
| false
| true
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama4/vision/embedding.py
| 7,179
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import math
from typing import Any, Callable, Dict, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairscale.nn.model_parallel.layers import ColumnParallelLinear, RowParallelLinear
from ..args import VisionArgs
from .encoder import VisionEncoder
class PixelShuffle(nn.Module):
def __init__(self, ps_ratio):
super().__init__()
self.ps_ratio = ps_ratio
def forward(self, x):
# x: [B, N, C], N = number of patches
assert self.ps_ratio is not None, "ps_ratio is required for pixel shuffle"
assert x.dim() == 3, "pixel shuffle requires encoded patches [B, N, C]"
hh = ww = int(math.sqrt(x.shape[1]))
x = x.reshape(x.shape[0], hh, ww, -1)
x = pixel_shuffle_op(x, ps_ratio=self.ps_ratio)
pixel_shuffle_patches = x.reshape(x.shape[0], -1, x.shape[-1])
return pixel_shuffle_patches
def pixel_shuffle_op(input_x, ps_ratio):
n, w, h, c = input_x.size()
input_x = input_x.view(n, w, int(h * ps_ratio), int(c / ps_ratio))
input_x = input_x.permute(0, 2, 1, 3).contiguous()
input_x = input_x.view(
n,
int(h * ps_ratio),
int(w * ps_ratio),
int(c / (ps_ratio * ps_ratio)),
)
input_x = input_x.permute(0, 2, 1, 3).contiguous()
return input_x
class SimpleMLP(torch.nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
bias: bool = True,
dropout: float = 0.0,
act_layer: Callable = nn.GELU,
):
super().__init__()
# layers
self.c_fc = ColumnParallelLinear(
dim,
hidden_dim,
bias=bias,
gather_output=False,
)
self.c_proj = RowParallelLinear(
hidden_dim,
hidden_dim,
bias=bias,
input_is_parallel=True,
)
self.non_linearity = act_layer()
self.dropout = dropout
def forward(self, x):
hidden = self.c_fc(x)
hidden = self.non_linearity(hidden)
hidden = F.dropout(hidden, p=self.dropout, training=self.training)
return self.non_linearity(self.c_proj(hidden))
class PixelShuffleMLP(torch.nn.Module):
def __init__(
self,
ps_ratio: float,
input_dim: int,
output_dim: int = 4096,
add_fc: bool = False,
):
super().__init__()
self.pixel_shuffle = PixelShuffle(ps_ratio)
self.mlp = SimpleMLP(
int(input_dim // (ps_ratio**2)),
output_dim,
bias=False,
dropout=0.0,
act_layer=nn.GELU,
)
self.fc = nn.Identity()
if add_fc:
self.fc = ColumnParallelLinear(
output_dim,
output_dim,
bias=False,
)
def forward(self, encoded_patches: torch.Tensor) -> torch.Tensor:
encoded_patches = self.pixel_shuffle(encoded_patches)
return self.fc(self.mlp(encoded_patches))
class VisionEmbeddings(torch.nn.Module):
def __init__(self, args: VisionArgs):
super().__init__()
self.args = args
image_size = args.image_size
patch_size = args.patch_size
self.vision_encoder = VisionEncoder(
image_size=(image_size.height, image_size.width),
patch_size=(patch_size.height, patch_size.width),
dim=args.dim,
layers=args.n_layers,
heads=args.n_heads,
mlp_ratio=args.mlp_ratio,
)
self.vision_encoder = self.vision_encoder.to(torch.bfloat16)
self.vision_adapter = PixelShuffleMLP(
ps_ratio=args.pixel_shuffle_ratio,
input_dim=args.dim,
output_dim=args.output_dim,
)
self.output_dim = args.output_dim
self._register_load_state_dict_pre_hook(self.load_hook)
def load_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Dict[str, Any],
strict: bool = True,
missing_keys: List[str] = None,
unexpected_keys: List[str] = None,
error_msgs: List[str] = None,
return_state_dict: bool = False,
) -> None:
original_sd = self.state_dict()
for k in state_dict:
if k.startswith(prefix) and len(state_dict[k].shape) == 1 and state_dict[k].shape[0] == 0:
state_dict[k] = state_dict[k].reshape(original_sd[k[len(prefix) :]].shape)
def _get_empty_sequence(self, h):
return torch.zeros(
h.shape[0],
h.shape[1],
self.output_dim,
device=h.device,
dtype=h.dtype,
)
# x_images is batched; each batch sample contains a list of images. so this is List[List[torch.Tensor]]
# each image is a tensor of shape [num_tiles, C, H, W]
def forward(
self,
image_batch: List[List[torch.Tensor]],
image_mask: torch.Tensor,
h_ref: torch.Tensor,
) -> torch.Tensor:
images_flattened = [image for sample in image_batch for image in sample]
images_flattened = torch.vstack(images_flattened).unsqueeze(1).to(h_ref.dtype).to(h_ref.device)
embedding = self.vision_encoder(images_flattened)
projected_embedding = self.vision_adapter(embedding)
h_image = self._get_empty_sequence(h_ref)
return scatter_embeddings(image_batch, image_mask, h_image, projected_embedding)
def scatter_embeddings(image_batch, image_mask, h_image, encoded_patches_proj):
# If dynamic transform is used and the batch contains 2 images (where image_1 has 2 chunks and image_2 has 3 chunks),
# `num_images_per_sequence` now records the number of chunks per image as `[2, 3]`.
# `encoded_patches_proj.split` will then split the image chunks into 2 groups: `[image_1_chunks, image_2_chunks]`.
num_images_per_sequence = [sum(image.size(0) for image in sample_images) for sample_images in image_batch]
assert not torch.isnan(encoded_patches_proj).any()
assert sum(num_images_per_sequence) == encoded_patches_proj.size(0), (
f"{sum(num_images_per_sequence)=} != {encoded_patches_proj.shape=}"
)
encoded_patches_list = encoded_patches_proj.split(num_images_per_sequence, dim=0)
for index in range(h_image.size(0)):
encoded_patches_per_sample = encoded_patches_list[index]
sample_image_mask = image_mask[index]
if encoded_patches_per_sample.numel() == 0:
continue
encoded_patches_per_sample = encoded_patches_per_sample.contiguous().view(
-1, encoded_patches_per_sample.size(-1)
)
n_tokens_to_fill = sample_image_mask.sum()
assert n_tokens_to_fill <= encoded_patches_per_sample.size(0)
h_image[index].masked_scatter_(
sample_image_mask.expand(-1, h_image.size(-1)),
encoded_patches_per_sample[:n_tokens_to_fill],
)
return h_image
| 2,050
|
405100c809fa894ed002903f616445228e550bec387d84757540a3bde69bde1e
| 33.349282
| 121
| 0.592562
| 3.501951
| false
| false
| false
| false
|
deepseek-ai/DeepSeek-VL2
|
deepseek_vl2/models/conversation.py
| 10,087
| 0
|
MIT License
|
"""
From https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
"""
import dataclasses
from enum import IntEnum, auto
from typing import Any, Dict, List
class SeparatorStyle(IntEnum):
"""Separator styles."""
DeepSeek = auto()
DeepSeekV2 = auto()
PLAIN = auto()
ALIGNMENT = auto()
@dataclasses.dataclass
class Conversation:
"""A class that manages prompt templates and keeps all conversation history."""
# The name of this template
name: str
# The template of the system prompt
system_template: str = "{system_message}"
# The system message
system_message: str = ""
# The names of two roles
roles: List[str] = (("USER", "ASSISTANT"),)
# All messages. Each item is (role, message).
messages: List[List[str]] = ()
# The number of few shot examples
offset: int = 0
# The separator style and configurations
sep_style: SeparatorStyle = SeparatorStyle.DeepSeek
sep: str = "\n"
sep2: str = None
# Stop criteria (the default one is EOS token)
stop_str: str = None
# Stops generation if meeting any token in this list
stop_token_ids: List[int] = None
def get_prompt(self) -> str:
"""Get the prompt for generation."""
system_prompt = self.system_template.format(system_message=self.system_message)
if self.sep_style == SeparatorStyle.DeepSeek:
seps = [self.sep, self.sep2]
if system_prompt == "" or system_prompt is None:
ret = ""
else:
ret = system_prompt + seps[0]
for i, (role, message) in enumerate(self.messages):
if message:
ret += role + ": " + message + seps[i % 2]
else:
ret += role + ":"
return ret
elif self.sep_style == SeparatorStyle.DeepSeekV2:
seps = [self.sep, self.sep2]
if system_prompt == "" or system_prompt is None:
ret = ""
else:
ret = system_prompt + seps[0]
for i, (role, message) in enumerate(self.messages):
if message:
if role == "User":
ret += "<|sft▁begin|>\n" + message + self.sep #<|sft▁begin|>User Input<|sft▁end|>\nResponse<|end▁of▁sentence|>
else:
ret += message + self.sep2
else:
ret = ret
return ret
elif self.sep_style == SeparatorStyle.PLAIN:
seps = [self.sep, self.sep2]
ret = ""
for i, (role, message) in enumerate(self.messages):
if message:
if type(message) is tuple:
message, _, _ = message
if i % 2 == 0:
ret += message + seps[i % 2]
else:
ret += message + seps[i % 2]
else:
ret += ""
return ret
elif self.sep_style == SeparatorStyle.ALIGNMENT:
seps = [self.sep, self.sep2]
ret = ""
for i, (role, message) in enumerate(self.messages):
if message:
if type(message) is tuple:
message, _, _ = message
if i % 2 == 0:
ret += '<image>\n' + seps[i % 2]
else:
ret += message + seps[i % 2]
else:
ret += ""
return ret
else:
raise ValueError(f"Invalid style: {self.sep_style}")
def set_system_message(self, system_message: str):
"""Set the system message."""
self.system_message = system_message
def append_message(self, role: str, message: str):
"""Append a new message."""
self.messages.append([role, message])
def update_last_message(self, message: str):
"""Update the last output.
The last message is typically set to be None when constructing the prompt,
so we need to update it in-place after getting the response from a model.
"""
self.messages[-1][1] = message
def reset_message(self):
"""Reset a new message."""
self.messages = []
def to_gradio_chatbot(self):
"""Convert the conversation to gradio chatbot format."""
ret = []
for i, (role, msg) in enumerate(self.messages[self.offset :]):
if i % 2 == 0:
ret.append([msg, None])
else:
ret[-1][-1] = msg
return ret
def to_openai_api_messages(self):
"""Convert the conversation to OpenAI chat completion format."""
system_prompt = self.system_template.format(system_message=self.system_message)
ret = [{"role": "system", "content": system_prompt}]
for i, (_, msg) in enumerate(self.messages[self.offset :]):
if i % 2 == 0:
ret.append({"role": "user", "content": msg})
else:
if msg is not None:
ret.append({"role": "assistant", "content": msg})
return ret
def copy(self):
return Conversation(
name=self.name,
system_template=self.system_template,
system_message=self.system_message,
roles=self.roles,
messages=[[x, y] for x, y in self.messages],
offset=self.offset,
sep_style=self.sep_style,
sep=self.sep,
sep2=self.sep2,
stop_str=self.stop_str,
stop_token_ids=self.stop_token_ids,
)
def dict(self):
return {
"template_name": self.name,
"system_message": self.system_message,
"roles": self.roles,
"messages": self.messages,
"offset": self.offset,
}
# A global registry for all conversation templates
conv_templates: Dict[str, Conversation] = {}
def register_conv_template(template: Conversation, override: bool = False):
"""Register a new conversation template."""
if not override:
assert template.name not in conv_templates, f"{template.name} has been registered."
conv_templates[template.name] = template
def get_conv_template(name: str) -> Conversation:
"""Get a conversation template."""
return conv_templates[name].copy()
# register_conv_template(
# Conversation(
# name="deepseek",
# system_template="{system_message}",
# # system_message="You are a helpful assistant. Please answer truthfully and write out your "
# # "thinking step by step to be sure you get the right answer.",
# system_message="",
# roles=("User", "Assistant"),
# messages=(),
# offset=0,
# sep_style=SeparatorStyle.DeepSeek,
# sep="\n\n",
# sep2="<|end▁of▁sentence|>",
# stop_token_ids=[100001],
# stop_str=["User:", "<|end▁of▁sentence|>"]
# )
# )
register_conv_template(
Conversation(
name="deepseek",
system_template="{system_message}",
# system_message="You are a helpful assistant. Please answer truthfully and write out your "
# "thinking step by step to be sure you get the right answer.",
system_message="",
roles=("<|User|>", "<|Assistant|>"),
messages=(),
offset=0,
sep_style=SeparatorStyle.DeepSeek,
sep="\n\n",
sep2="<|end▁of▁sentence|>",
stop_token_ids=[100001],
stop_str=["User:", "<|end▁of▁sentence|>"]
)
)
# register_conv_template(
# Conversation(
# name="deepseekv2",
# system_template="{system_message}",
# system_message="",
# roles=("User", "Assistant"),
# messages=(),
# offset=0,
# sep_style=SeparatorStyle.DeepSeekV2,
# sep="\n<|sft▁end|>",
# sep2="<|end▁of▁sentence|>",
# stop_token_ids=[100001],
# stop_str=["User:", "<|end▁of▁sentence|>"]
# )
# )
register_conv_template(
Conversation(
name="deepseekv2",
system_template="{system_message}",
system_message="",
roles=("|<User>|", "|<Assistant>|"),
messages=(),
offset=0,
sep_style=SeparatorStyle.DeepSeekV2,
sep="\n<|sft▁end|>",
sep2="<|end▁of▁sentence|>",
stop_token_ids=[100001],
stop_str=["User:", "<|end▁of▁sentence|>"]
)
)
register_conv_template(
Conversation(
name="plain",
system_template="",
system_message="",
roles=("", ""),
messages=(),
offset=0,
sep_style=SeparatorStyle.PLAIN,
sep="",
sep2="",
stop_token_ids=[100001],
stop_str=['</s>'],
)
)
register_conv_template(
Conversation(
name="alignment",
system_template="",
system_message="",
roles=("", ""),
messages=(),
offset=0,
sep_style=SeparatorStyle.ALIGNMENT,
sep="",
sep2="",
stop_token_ids=[100001],
stop_str=['</s>'],
)
)
if __name__ == "__main__":
print("deepseek template:")
conv = get_conv_template("deepseek")
conv.append_message(conv.roles[0], "Hello!")
conv.append_message(conv.roles[1], "Hi! This is Tony.")
conv.append_message(conv.roles[0], "Who are you?")
conv.append_message(conv.roles[1], "I am a helpful assistant.")
conv.append_message(conv.roles[0], "How are you?")
conv.append_message(conv.roles[1], None)
print(conv.get_prompt())
print("deepseekv2 template:")
conv = get_conv_template("deepseekv2")
conv.append_message(conv.roles[0], "Hello!")
conv.append_message(conv.roles[1], "Hi! This is Tony.")
conv.append_message(conv.roles[0], "Who are you?")
conv.append_message(conv.roles[1], "I am a helpful assistant.")
conv.append_message(conv.roles[0], "How are you?")
conv.append_message(conv.roles[1], None)
print(conv.get_prompt())
| 2,690
|
a1be46cdbb841bb6ef20cbb83e6735096ecc9c92a2044941f6aa513e9342b3f0
| 31.53871
| 134
| 0.536334
| 3.749814
| false
| false
| false
| false
|
microsoft/markitdown
|
packages/markitdown/src/markitdown/converters/_llm_caption.py
| 1,450
| 0
|
MIT License
|
from typing import BinaryIO, Any, Union
import base64
import mimetypes
from .._stream_info import StreamInfo
def llm_caption(
file_stream: BinaryIO, stream_info: StreamInfo, *, client, model, prompt=None
) -> Union[None, str]:
if prompt is None or prompt.strip() == "":
prompt = "Write a detailed caption for this image."
# Get the content type
content_type = stream_info.mimetype
if not content_type:
content_type, _ = mimetypes.guess_type("_dummy" + (stream_info.extension or ""))
if not content_type:
content_type = "application/octet-stream"
# Convert to base64
cur_pos = file_stream.tell()
try:
base64_image = base64.b64encode(file_stream.read()).decode("utf-8")
except Exception as e:
return None
finally:
file_stream.seek(cur_pos)
# Prepare the data-uri
data_uri = f"data:{content_type};base64,{base64_image}"
# Prepare the OpenAI API request
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": data_uri,
},
},
],
}
]
# Call the OpenAI API
response = client.chat.completions.create(model=model, messages=messages)
return response.choices[0].message.content
| 365
|
2b7677b538a51f631499361ec79c39e15c3cff19ae278c5d479e98853f56997c
| 28
| 88
| 0.562069
| 3.972603
| false
| false
| false
| false
|
browser-use/browser-use
|
browser_use/controller/service.py
| 33,976
| 0
|
MIT License
|
import asyncio
import datetime
import enum
import json
import logging
import re
from typing import Dict, Generic, Optional, Tuple, Type, TypeVar, cast
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.prompts import PromptTemplate
from playwright.async_api import ElementHandle, Page
# from lmnr.sdk.laminar import Laminar
from pydantic import BaseModel
from browser_use.agent.views import ActionModel, ActionResult
from browser_use.browser.context import BrowserContext
from browser_use.controller.registry.service import Registry
from browser_use.controller.views import (
ClickElementAction,
ClickElementBySelectorAction,
ClickElementByTextAction,
ClickElementByXpathAction,
CloseTabAction,
DoneAction,
DragDropAction,
GoToUrlAction,
InputTextAction,
NoParamsAction,
OpenTabAction,
Position,
ScrollAction,
SearchGoogleAction,
SendKeysAction,
SwitchTabAction,
WaitForElementAction,
)
from browser_use.utils import time_execution_sync
logger = logging.getLogger(__name__)
Context = TypeVar('Context')
class Controller(Generic[Context]):
def __init__(
self,
exclude_actions: list[str] = [],
output_model: Optional[Type[BaseModel]] = None,
):
self.registry = Registry[Context](exclude_actions)
"""Register all default browser actions"""
if output_model is not None:
# Create a new model that extends the output model with success parameter
class ExtendedOutputModel(BaseModel): # type: ignore
success: bool = True
data: output_model
@self.registry.action(
'Complete task - with return text and if the task is finished (success=True) or not yet completely finished (success=False), because last step is reached',
param_model=ExtendedOutputModel,
)
async def done(params: ExtendedOutputModel):
# Exclude success from the output JSON since it's an internal parameter
output_dict = params.data.model_dump()
# Enums are not serializable, convert to string
for key, value in output_dict.items():
if isinstance(value, enum.Enum):
output_dict[key] = value.value
return ActionResult(is_done=True, success=params.success, extracted_content=json.dumps(output_dict))
else:
@self.registry.action(
'Complete task - with return text and if the task is finished (success=True) or not yet completely finished (success=False), because last step is reached',
param_model=DoneAction,
)
async def done(params: DoneAction):
return ActionResult(is_done=True, success=params.success, extracted_content=params.text)
# Basic Navigation Actions
@self.registry.action(
'Search the query in Google in the current tab, the query should be a search query like humans search in Google, concrete and not vague or super long. More the single most important items. ',
param_model=SearchGoogleAction,
)
async def search_google(params: SearchGoogleAction, browser: BrowserContext):
page = await browser.get_current_page()
await page.goto(f'https://www.google.com/search?q={params.query}&udm=14')
await page.wait_for_load_state()
msg = f'🔍 Searched for "{params.query}" in Google'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
@self.registry.action('Navigate to URL in the current tab', param_model=GoToUrlAction)
async def go_to_url(params: GoToUrlAction, browser: BrowserContext):
page = await browser.get_current_page()
await page.goto(params.url)
await page.wait_for_load_state()
msg = f'🔗 Navigated to {params.url}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
@self.registry.action('Go back', param_model=NoParamsAction)
async def go_back(_: NoParamsAction, browser: BrowserContext):
await browser.go_back()
msg = '🔙 Navigated back'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
# wait for x seconds
@self.registry.action('Wait for x seconds default 3')
async def wait(seconds: int = 3):
msg = f'🕒 Waiting for {seconds} seconds'
logger.info(msg)
await asyncio.sleep(seconds)
return ActionResult(extracted_content=msg, include_in_memory=True)
@self.registry.action('Wait for element to be visible', param_model=WaitForElementAction)
async def wait_for_element(params: WaitForElementAction, browser: BrowserContext):
"""Waits for the element specified by the CSS selector to become visible within the given timeout."""
try:
await browser.wait_for_element(params.selector, params.timeout)
msg = f'👀 Element with selector "{params.selector}" became visible within {params.timeout}ms.'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
err_msg = f'❌ Failed to wait for element "{params.selector}" within {params.timeout}ms: {str(e)}'
logger.error(err_msg)
raise Exception(err_msg)
# Element Interaction Actions
@self.registry.action('Click element by index', param_model=ClickElementAction)
async def click_element_by_index(params: ClickElementAction, browser: BrowserContext):
session = await browser.get_session()
if params.index not in await browser.get_selector_map():
raise Exception(f'Element with index {params.index} does not exist - retry or use alternative actions')
element_node = await browser.get_dom_element_by_index(params.index)
initial_pages = len(session.context.pages)
# if element has file uploader then dont click
if await browser.is_file_uploader(element_node):
msg = f'Index {params.index} - has an element which opens file upload dialog. To upload files please use a specific function to upload files '
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
msg = None
try:
download_path = await browser._click_element_node(element_node)
if download_path:
msg = f'💾 Downloaded file to {download_path}'
else:
msg = f'🖱️ Clicked button with index {params.index}: {element_node.get_all_text_till_next_clickable_element(max_depth=2)}'
logger.info(msg)
logger.debug(f'Element xpath: {element_node.xpath}')
if len(session.context.pages) > initial_pages:
new_tab_msg = 'New tab opened - switching to it'
msg += f' - {new_tab_msg}'
logger.info(new_tab_msg)
await browser.switch_to_tab(-1)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
logger.warning(f'Element not clickable with index {params.index} - most likely the page changed')
return ActionResult(error=str(e))
@self.registry.action('Click element by selector', param_model=ClickElementBySelectorAction)
async def click_element_by_selector(params: ClickElementBySelectorAction, browser: BrowserContext):
try:
element_node = await browser.get_locate_element_by_css_selector(params.css_selector)
if element_node:
try:
await element_node.scroll_into_view_if_needed()
await element_node.click(timeout=1500, force=True)
except Exception:
try:
# Handle with js evaluate if fails to click using playwright
await element_node.evaluate('el => el.click()')
except Exception as e:
logger.warning(f"Element not clickable with css selector '{params.css_selector}' - {e}")
return ActionResult(error=str(e))
msg = f'🖱️ Clicked on element with text "{params.css_selector}"'
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
logger.warning(f'Element not clickable with selector {params.css_selector} - most likely the page changed')
return ActionResult(error=str(e))
@self.registry.action('Click on element by xpath', param_model=ClickElementByXpathAction)
async def click_element_by_xpath(params: ClickElementByXpathAction, browser: BrowserContext):
try:
element_node = await browser.get_locate_element_by_xpath(params.xpath)
if element_node:
try:
await element_node.scroll_into_view_if_needed()
await element_node.click(timeout=1500, force=True)
except Exception:
try:
# Handle with js evaluate if fails to click using playwright
await element_node.evaluate('el => el.click()')
except Exception as e:
logger.warning(f"Element not clickable with xpath '{params.xpath}' - {e}")
return ActionResult(error=str(e))
msg = f'🖱️ Clicked on element with text "{params.xpath}"'
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
logger.warning(f'Element not clickable with xpath {params.xpath} - most likely the page changed')
return ActionResult(error=str(e))
@self.registry.action('Click element with text', param_model=ClickElementByTextAction)
async def click_element_by_text(params: ClickElementByTextAction, browser: BrowserContext):
try:
element_node = await browser.get_locate_element_by_text(
text=params.text, nth=params.nth, element_type=params.element_type
)
if element_node:
try:
await element_node.scroll_into_view_if_needed()
await element_node.click(timeout=1500, force=True)
except Exception:
try:
# Handle with js evaluate if fails to click using playwright
await element_node.evaluate('el => el.click()')
except Exception as e:
logger.warning(f"Element not clickable with text '{params.text}' - {e}")
return ActionResult(error=str(e))
msg = f'🖱️ Clicked on element with text "{params.text}"'
return ActionResult(extracted_content=msg, include_in_memory=True)
else:
return ActionResult(error=f"No element found for text '{params.text}'")
except Exception as e:
logger.warning(f"Element not clickable with text '{params.text}' - {e}")
return ActionResult(error=str(e))
@self.registry.action(
'Input text into a input interactive element',
param_model=InputTextAction,
)
async def input_text(params: InputTextAction, browser: BrowserContext, has_sensitive_data: bool = False):
if params.index not in await browser.get_selector_map():
raise Exception(f'Element index {params.index} does not exist - retry or use alternative actions')
element_node = await browser.get_dom_element_by_index(params.index)
await browser._input_text_element_node(element_node, params.text)
if not has_sensitive_data:
msg = f'⌨️ Input {params.text} into index {params.index}'
else:
msg = f'⌨️ Input sensitive data into index {params.index}'
logger.info(msg)
logger.debug(f'Element xpath: {element_node.xpath}')
return ActionResult(extracted_content=msg, include_in_memory=True)
# Save PDF
@self.registry.action(
'Save the current page as a PDF file',
)
async def save_pdf(browser: BrowserContext):
page = await browser.get_current_page()
short_url = re.sub(r'^https?://(?:www\.)?|/$', '', page.url)
slug = re.sub(r'[^a-zA-Z0-9]+', '-', short_url).strip('-').lower()
sanitized_filename = f'{slug}.pdf'
await page.emulate_media('screen')
await page.pdf(path=sanitized_filename, format='A4', print_background=False)
msg = f'Saving page with URL {page.url} as PDF to ./{sanitized_filename}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
# Tab Management Actions
@self.registry.action('Switch tab', param_model=SwitchTabAction)
async def switch_tab(params: SwitchTabAction, browser: BrowserContext):
await browser.switch_to_tab(params.page_id)
# Wait for tab to be ready
page = await browser.get_current_page()
await page.wait_for_load_state()
msg = f'🔄 Switched to tab {params.page_id}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
@self.registry.action('Open url in new tab', param_model=OpenTabAction)
async def open_tab(params: OpenTabAction, browser: BrowserContext):
await browser.create_new_tab(params.url)
msg = f'🔗 Opened new tab with {params.url}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
@self.registry.action('Close an existing tab', param_model=CloseTabAction)
async def close_tab(params: CloseTabAction, browser: BrowserContext):
await browser.switch_to_tab(params.page_id)
page = await browser.get_current_page()
url = page.url
await page.close()
msg = f'❌ Closed tab #{params.page_id} with url {url}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
# Content Actions
@self.registry.action(
'Extract page content to retrieve specific information from the page, e.g. all company names, a specific description, all information about, links with companies in structured format or simply links',
)
async def extract_content(
goal: str, should_strip_link_urls: bool, browser: BrowserContext, page_extraction_llm: BaseChatModel
):
page = await browser.get_current_page()
import markdownify
strip = []
if should_strip_link_urls:
strip = ['a', 'img']
content = markdownify.markdownify(await page.content(), strip=strip)
# manually append iframe text into the content so it's readable by the LLM (includes cross-origin iframes)
for iframe in page.frames:
if iframe.url != page.url and not iframe.url.startswith('data:'):
content += f'\n\nIFRAME {iframe.url}:\n'
content += markdownify.markdownify(await iframe.content())
prompt = 'Your task is to extract the content of the page. You will be given a page and a goal and you should extract all relevant information around this goal from the page. If the goal is vague, summarize the page. Respond in json format. Extraction goal: {goal}, Page: {page}'
template = PromptTemplate(input_variables=['goal', 'page'], template=prompt)
try:
output = page_extraction_llm.invoke(template.format(goal=goal, page=content))
msg = f'📄 Extracted from page\n: {output.content}\n'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
logger.debug(f'Error extracting content: {e}')
msg = f'📄 Extracted from page\n: {content}\n'
logger.info(msg)
return ActionResult(extracted_content=msg)
# HTML Download
@self.registry.action(
'Save the raw HTML content of the current page to a local file',
param_model=NoParamsAction,
)
async def save_html_to_file(_: NoParamsAction, browser: BrowserContext) -> ActionResult:
"""Retrieves and returns the full HTML content of the current page to a file"""
try:
page = await browser.get_current_page()
html_content = await page.content()
# Create a filename based on the page URL
short_url = re.sub(r'^https?://(?:www\.)?|/$', '', page.url)
slug = re.sub(r'[^a-zA-Z0-9]+', '-', short_url).strip('-').lower()[:64]
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
sanitized_filename = f'{slug}_{timestamp}.html'
# Save HTML to file
with open(sanitized_filename, 'w', encoding='utf-8') as f:
f.write(html_content)
msg = f'Saved HTML content of page with URL {page.url} to ./{sanitized_filename}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
error_msg = f'Failed to save HTML content: {str(e)}'
logger.error(error_msg)
return ActionResult(error=error_msg, extracted_content='')
@self.registry.action(
'Scroll down the page by pixel amount - if no amount is specified, scroll down one page',
param_model=ScrollAction,
)
async def scroll_down(params: ScrollAction, browser: BrowserContext):
page = await browser.get_current_page()
if params.amount is not None:
await page.evaluate(f'window.scrollBy(0, {params.amount});')
else:
await page.evaluate('window.scrollBy(0, window.innerHeight);')
amount = f'{params.amount} pixels' if params.amount is not None else 'one page'
msg = f'🔍 Scrolled down the page by {amount}'
logger.info(msg)
return ActionResult(
extracted_content=msg,
include_in_memory=True,
)
# scroll up
@self.registry.action(
'Scroll up the page by pixel amount - if no amount is specified, scroll up one page',
param_model=ScrollAction,
)
async def scroll_up(params: ScrollAction, browser: BrowserContext):
page = await browser.get_current_page()
if params.amount is not None:
await page.evaluate(f'window.scrollBy(0, -{params.amount});')
else:
await page.evaluate('window.scrollBy(0, -window.innerHeight);')
amount = f'{params.amount} pixels' if params.amount is not None else 'one page'
msg = f'🔍 Scrolled up the page by {amount}'
logger.info(msg)
return ActionResult(
extracted_content=msg,
include_in_memory=True,
)
# send keys
@self.registry.action(
'Send strings of special keys like Escape,Backspace, Insert, PageDown, Delete, Enter, Shortcuts such as `Control+o`, `Control+Shift+T` are supported as well. This gets used in keyboard.press. ',
param_model=SendKeysAction,
)
async def send_keys(params: SendKeysAction, browser: BrowserContext):
page = await browser.get_current_page()
try:
await page.keyboard.press(params.keys)
except Exception as e:
if 'Unknown key' in str(e):
# loop over the keys and try to send each one
for key in params.keys:
try:
await page.keyboard.press(key)
except Exception as e:
logger.debug(f'Error sending key {key}: {str(e)}')
raise e
else:
raise e
msg = f'⌨️ Sent keys: {params.keys}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
@self.registry.action(
description='If you dont find something which you want to interact with, scroll to it',
)
async def scroll_to_text(text: str, browser: BrowserContext): # type: ignore
page = await browser.get_current_page()
try:
# Try different locator strategies
locators = [
page.get_by_text(text, exact=False),
page.locator(f'text={text}'),
page.locator(f"//*[contains(text(), '{text}')]"),
]
for locator in locators:
try:
# First check if element exists and is visible
if await locator.count() > 0 and await locator.first.is_visible():
await locator.first.scroll_into_view_if_needed()
await asyncio.sleep(0.5) # Wait for scroll to complete
msg = f'🔍 Scrolled to text: {text}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
logger.debug(f'Locator attempt failed: {str(e)}')
continue
msg = f"Text '{text}' not found or not visible on page"
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
msg = f"Failed to scroll to text '{text}': {str(e)}"
logger.error(msg)
return ActionResult(error=msg, include_in_memory=True)
@self.registry.action(
description='Get all options from a native dropdown',
)
async def get_dropdown_options(index: int, browser: BrowserContext) -> ActionResult:
"""Get all options from a native dropdown"""
page = await browser.get_current_page()
selector_map = await browser.get_selector_map()
dom_element = selector_map[index]
try:
# Frame-aware approach since we know it works
all_options = []
frame_index = 0
for frame in page.frames:
try:
options = await frame.evaluate(
"""
(xpath) => {
const select = document.evaluate(xpath, document, null,
XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
if (!select) return null;
return {
options: Array.from(select.options).map(opt => ({
text: opt.text, //do not trim, because we are doing exact match in select_dropdown_option
value: opt.value,
index: opt.index
})),
id: select.id,
name: select.name
};
}
""",
dom_element.xpath,
)
if options:
logger.debug(f'Found dropdown in frame {frame_index}')
logger.debug(f'Dropdown ID: {options["id"]}, Name: {options["name"]}')
formatted_options = []
for opt in options['options']:
# encoding ensures AI uses the exact string in select_dropdown_option
encoded_text = json.dumps(opt['text'])
formatted_options.append(f'{opt["index"]}: text={encoded_text}')
all_options.extend(formatted_options)
except Exception as frame_e:
logger.debug(f'Frame {frame_index} evaluation failed: {str(frame_e)}')
frame_index += 1
if all_options:
msg = '\n'.join(all_options)
msg += '\nUse the exact text string in select_dropdown_option'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
else:
msg = 'No options found in any frame for dropdown'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
logger.error(f'Failed to get dropdown options: {str(e)}')
msg = f'Error getting options: {str(e)}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
@self.registry.action(
description='Select dropdown option for interactive element index by the text of the option you want to select',
)
async def select_dropdown_option(
index: int,
text: str,
browser: BrowserContext,
) -> ActionResult:
"""Select dropdown option by the text of the option you want to select"""
page = await browser.get_current_page()
selector_map = await browser.get_selector_map()
dom_element = selector_map[index]
# Validate that we're working with a select element
if dom_element.tag_name != 'select':
logger.error(f'Element is not a select! Tag: {dom_element.tag_name}, Attributes: {dom_element.attributes}')
msg = f'Cannot select option: Element with index {index} is a {dom_element.tag_name}, not a select'
return ActionResult(extracted_content=msg, include_in_memory=True)
logger.debug(f"Attempting to select '{text}' using xpath: {dom_element.xpath}")
logger.debug(f'Element attributes: {dom_element.attributes}')
logger.debug(f'Element tag: {dom_element.tag_name}')
xpath = '//' + dom_element.xpath
try:
frame_index = 0
for frame in page.frames:
try:
logger.debug(f'Trying frame {frame_index} URL: {frame.url}')
# First verify we can find the dropdown in this frame
find_dropdown_js = """
(xpath) => {
try {
const select = document.evaluate(xpath, document, null,
XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
if (!select) return null;
if (select.tagName.toLowerCase() !== 'select') {
return {
error: `Found element but it's a ${select.tagName}, not a SELECT`,
found: false
};
}
return {
id: select.id,
name: select.name,
found: true,
tagName: select.tagName,
optionCount: select.options.length,
currentValue: select.value,
availableOptions: Array.from(select.options).map(o => o.text.trim())
};
} catch (e) {
return {error: e.toString(), found: false};
}
}
"""
dropdown_info = await frame.evaluate(find_dropdown_js, dom_element.xpath)
if dropdown_info:
if not dropdown_info.get('found'):
logger.error(f'Frame {frame_index} error: {dropdown_info.get("error")}')
continue
logger.debug(f'Found dropdown in frame {frame_index}: {dropdown_info}')
# "label" because we are selecting by text
# nth(0) to disable error thrown by strict mode
# timeout=1000 because we are already waiting for all network events, therefore ideally we don't need to wait a lot here (default 30s)
selected_option_values = (
await frame.locator('//' + dom_element.xpath).nth(0).select_option(label=text, timeout=1000)
)
msg = f'selected option {text} with value {selected_option_values}'
logger.info(msg + f' in frame {frame_index}')
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as frame_e:
logger.error(f'Frame {frame_index} attempt failed: {str(frame_e)}')
logger.error(f'Frame type: {type(frame)}')
logger.error(f'Frame URL: {frame.url}')
frame_index += 1
msg = f"Could not select option '{text}' in any frame"
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
msg = f'Selection failed: {str(e)}'
logger.error(msg)
return ActionResult(error=msg, include_in_memory=True)
@self.registry.action(
'Drag and drop elements or between coordinates on the page - useful for canvas drawing, sortable lists, sliders, file uploads, and UI rearrangement',
param_model=DragDropAction,
)
async def drag_drop(params: DragDropAction, browser: BrowserContext) -> ActionResult:
"""
Performs a precise drag and drop operation between elements or coordinates.
"""
async def get_drag_elements(
page: Page,
source_selector: str,
target_selector: str,
) -> Tuple[Optional[ElementHandle], Optional[ElementHandle]]:
"""Get source and target elements with appropriate error handling."""
source_element = None
target_element = None
try:
# page.locator() auto-detects CSS and XPath
source_locator = page.locator(source_selector)
target_locator = page.locator(target_selector)
# Check if elements exist
source_count = await source_locator.count()
target_count = await target_locator.count()
if source_count > 0:
source_element = await source_locator.first.element_handle()
logger.debug(f'Found source element with selector: {source_selector}')
else:
logger.warning(f'Source element not found: {source_selector}')
if target_count > 0:
target_element = await target_locator.first.element_handle()
logger.debug(f'Found target element with selector: {target_selector}')
else:
logger.warning(f'Target element not found: {target_selector}')
except Exception as e:
logger.error(f'Error finding elements: {str(e)}')
return source_element, target_element
async def get_element_coordinates(
source_element: ElementHandle,
target_element: ElementHandle,
source_position: Optional[Position],
target_position: Optional[Position],
) -> Tuple[Optional[Tuple[int, int]], Optional[Tuple[int, int]]]:
"""Get coordinates from elements with appropriate error handling."""
source_coords = None
target_coords = None
try:
# Get source coordinates
if source_position:
source_coords = (source_position.x, source_position.y)
else:
source_box = await source_element.bounding_box()
if source_box:
source_coords = (
int(source_box['x'] + source_box['width'] / 2),
int(source_box['y'] + source_box['height'] / 2),
)
# Get target coordinates
if target_position:
target_coords = (target_position.x, target_position.y)
else:
target_box = await target_element.bounding_box()
if target_box:
target_coords = (
int(target_box['x'] + target_box['width'] / 2),
int(target_box['y'] + target_box['height'] / 2),
)
except Exception as e:
logger.error(f'Error getting element coordinates: {str(e)}')
return source_coords, target_coords
async def execute_drag_operation(
page: Page,
source_x: int,
source_y: int,
target_x: int,
target_y: int,
steps: int,
delay_ms: int,
) -> Tuple[bool, str]:
"""Execute the drag operation with comprehensive error handling."""
try:
# Try to move to source position
try:
await page.mouse.move(source_x, source_y)
logger.debug(f'Moved to source position ({source_x}, {source_y})')
except Exception as e:
logger.error(f'Failed to move to source position: {str(e)}')
return False, f'Failed to move to source position: {str(e)}'
# Press mouse button down
await page.mouse.down()
# Move to target position with intermediate steps
for i in range(1, steps + 1):
ratio = i / steps
intermediate_x = int(source_x + (target_x - source_x) * ratio)
intermediate_y = int(source_y + (target_y - source_y) * ratio)
await page.mouse.move(intermediate_x, intermediate_y)
if delay_ms > 0:
await asyncio.sleep(delay_ms / 1000)
# Move to final target position
await page.mouse.move(target_x, target_y)
# Move again to ensure dragover events are properly triggered
await page.mouse.move(target_x, target_y)
# Release mouse button
await page.mouse.up()
return True, 'Drag operation completed successfully'
except Exception as e:
return False, f'Error during drag operation: {str(e)}'
page = await browser.get_current_page()
try:
# Initialize variables
source_x: Optional[int] = None
source_y: Optional[int] = None
target_x: Optional[int] = None
target_y: Optional[int] = None
# Normalize parameters
steps = max(1, params.steps or 10)
delay_ms = max(0, params.delay_ms or 5)
# Case 1: Element selectors provided
if params.element_source and params.element_target:
logger.debug('Using element-based approach with selectors')
source_element, target_element = await get_drag_elements(
page,
params.element_source,
params.element_target,
)
if not source_element or not target_element:
error_msg = f'Failed to find {"source" if not source_element else "target"} element'
return ActionResult(error=error_msg, include_in_memory=True)
source_coords, target_coords = await get_element_coordinates(
source_element, target_element, params.element_source_offset, params.element_target_offset
)
if not source_coords or not target_coords:
error_msg = f'Failed to determine {"source" if not source_coords else "target"} coordinates'
return ActionResult(error=error_msg, include_in_memory=True)
source_x, source_y = source_coords
target_x, target_y = target_coords
# Case 2: Coordinates provided directly
elif all(
coord is not None
for coord in [params.coord_source_x, params.coord_source_y, params.coord_target_x, params.coord_target_y]
):
logger.debug('Using coordinate-based approach')
source_x = params.coord_source_x
source_y = params.coord_source_y
target_x = params.coord_target_x
target_y = params.coord_target_y
else:
error_msg = 'Must provide either source/target selectors or source/target coordinates'
return ActionResult(error=error_msg, include_in_memory=True)
# Validate coordinates
if any(coord is None for coord in [source_x, source_y, target_x, target_y]):
error_msg = 'Failed to determine source or target coordinates'
return ActionResult(error=error_msg, include_in_memory=True)
# Perform the drag operation
success, message = await execute_drag_operation(
page,
cast(int, source_x),
cast(int, source_y),
cast(int, target_x),
cast(int, target_y),
steps,
delay_ms,
)
if not success:
logger.error(f'Drag operation failed: {message}')
return ActionResult(error=message, include_in_memory=True)
# Create descriptive message
if params.element_source and params.element_target:
msg = f"🖱️ Dragged element '{params.element_source}' to '{params.element_target}'"
else:
msg = f'🖱️ Dragged from ({source_x}, {source_y}) to ({target_x}, {target_y})'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
error_msg = f'Failed to perform drag and drop: {str(e)}'
logger.error(error_msg)
return ActionResult(error=error_msg, include_in_memory=True)
# Register ---------------------------------------------------------------
def action(self, description: str, **kwargs):
"""Decorator for registering custom actions
@param description: Describe the LLM what the function does (better description == better function calling)
"""
return self.registry.action(description, **kwargs)
# Act --------------------------------------------------------------------
@time_execution_sync('--act')
async def act(
self,
action: ActionModel,
browser_context: BrowserContext,
#
page_extraction_llm: Optional[BaseChatModel] = None,
sensitive_data: Optional[Dict[str, str]] = None,
available_file_paths: Optional[list[str]] = None,
#
context: Context | None = None,
) -> ActionResult:
"""Execute an action"""
try:
for action_name, params in action.model_dump(exclude_unset=True).items():
if params is not None:
# with Laminar.start_as_current_span(
# name=action_name,
# input={
# 'action': action_name,
# 'params': params,
# },
# span_type='TOOL',
# ):
result = await self.registry.execute_action(
action_name,
params,
browser=browser_context,
page_extraction_llm=page_extraction_llm,
sensitive_data=sensitive_data,
available_file_paths=available_file_paths,
context=context,
)
# Laminar.set_span_output(result)
if isinstance(result, str):
return ActionResult(extracted_content=result)
elif isinstance(result, ActionResult):
return result
elif result is None:
return ActionResult()
else:
raise ValueError(f'Invalid action result type: {type(result)} of {result}')
return ActionResult()
except Exception as e:
raise e
| 9,999
|
6ca511d3defd5e4d3c24ad7ebc8b74065e81c83269d3df7de61c88ad2517bdae
| 36.542541
| 282
| 0.683306
| 3.39794
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama4/prompts.py
| 14,355
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List
from ..datatypes import RawMediaItem, RawMessage, RawTextItem
from ..prompt_format import (
Llama4UseCase,
TextCompletionContent,
UseCase,
)
THIS_DIR = Path(__file__).parent
def usecases(base_model: bool = False) -> List[UseCase | str]:
with open(THIS_DIR.parent / "resources/small_dog.jpg", "rb") as f:
img_small_dog = f.read()
with open(THIS_DIR.parent / "resources/dog.jpg", "rb") as f:
img_dog = f.read()
with open(THIS_DIR.parent / "resources/pasta.jpeg", "rb") as f:
img_pasta = f.read()
out = []
out.extend(
[
textwrap.dedent(
"""
# Llama 4 - Prompt Formats
## Tokens
Here is a list of special tokens that are supported by Llama 4:
- `<|begin_of_text|>`: Specifies the start of the prompt
- `<|end_of_text|>`: Model will cease to generate more tokens. This token is generated only by the base models.
- `<|header_start|>` and `<|header_end|>`: These tokens enclose the role for a particular message. The possible roles are: [system, user and assistant].
- `<|eot|>`: End of turn. Represents when the model has determined that it has finished interacting with the user message that initiated its response. This is used in two scenarios:
- at the end of a direct interaction between the model and the user
- at the end of multiple interactions between the model and any available tools
This token signals to the executor that the model has finished generating a response.
- `<|image_start|>` and `<|image_end|>`: These tokens enclose the image data in the prompt.
- `<|patch|>`: This token represents a piece of the tile/
- `<|tile_y_separator|>` and `<|tile_x_separator|>`: These tokens are used to separate the y and x tiles of an image
- `<|image|>`: In the new architecture, this token now separates the regular sized image information from a downsized version of it that fits in a single tile. The longer side is used for calculating the scale factor and the rest is padded to fit the tile.
"""
),
textwrap.dedent(
"""
There are 3 different roles that are supported by Llama 4
- `system`: Sets the context in which to interact with the AI model. It typically includes rules, guidelines, or necessary information that helps the model respond effectively.
- `user`: Represents the human interacting with the model. It includes the inputs, commands, and questions to the model.
- `assistant`: Represents the response generated by the AI model based on the context provided in the `system`, `tool` and `user` prompts.
"""
),
]
)
if base_model:
out.extend(
[
"# Llama 4 Base Model",
Llama4UseCase(
title="Text completion - Paris information",
description="Text completion for Llama 4 base model uses this format.",
dialogs=[TextCompletionContent(content="The capital of France is Paris")],
),
Llama4UseCase(
title="Text completion - The color of the sky",
description="Text completion for Llama 4 base model uses this format.",
dialogs=[
TextCompletionContent(content="The color of the sky is blue but sometimes it can also be")
],
notes="",
),
Llama4UseCase(
title="Text completion - Translation example",
description="Text completion for Llama 4 base model uses this format.",
dialogs=[
TextCompletionContent(
content="""apple is pomme,
bannana is banane,
cherry is"""
)
],
notes="",
),
]
)
out.extend(
[
"# Llama 4 Instruct Model",
Llama4UseCase(
title="Simple User and assistant conversation",
description="Here is a regular multi-turn user assistant conversation and how its formatted.",
dialogs=[
[
RawMessage(role="system", content="You are a helpful assistant"),
RawMessage(
role="user",
content="Answer who are you in the form of jeopardy?",
),
]
],
notes="",
max_gen_len=512,
),
"# Image prompt format",
Llama4UseCase(
title="Single image prompt format - small image",
description="This example passes an image that is smaller than the tile size, to show the tile separator tokens are not needed",
dialogs=[
[
RawMessage(
role="user",
content=[
RawMediaItem(data=BytesIO(img_small_dog)),
RawTextItem(text="Describe this image in two sentences"),
],
)
]
],
notes="""Notice the structure of the image section:
```
<|image_start|><|image|><|patch|>...<|patch|><|image_end|>
```
This is due to the image being smaller than the tile size.
""",
max_gen_len=512,
),
Llama4UseCase(
title="Single image prompt format",
description="Here is an example of how to pass an image to the model",
dialogs=[
[
RawMessage(
role="user",
content=[
RawMediaItem(data=BytesIO(img_dog)),
RawTextItem(text="Describe this image in two sentences"),
],
)
]
],
notes="""With a bigger image, the image will include the tile separator tokens. Additionally, the image tag now separates a scaled down version of the image from the regular sized image.
```
<|image_start|><|patch|>...<|patch|><|tile_x_separator|><|patch|>...<|patch|><|tile_y_separator|><|patch|>...<|patch|><|image|><|patch|>...<|patch|><|image_end|>
```
""",
max_gen_len=1024,
),
Llama4UseCase(
title="Multiple images prompt format",
description="Here is an example of how to pass an image to the model",
dialogs=[
[
RawMessage(
role="user",
content=[
RawMediaItem(data=BytesIO(img_dog)),
RawMediaItem(data=BytesIO(img_pasta)),
RawTextItem(text="Describe these images in two sentences"),
],
)
]
],
notes="With multiple images, each one is encapsulated in their corresponding image tags.",
max_gen_len=4096,
),
"# Tool calling\nWe are continuing the format for zero shot function calling used in previous versions of Llama. All available functions can be provided either in the system message or in the user message.",
Llama4UseCase(
title="Zero shot function calling - system message",
dialogs=[
[
RawMessage(
role="system",
content="""You are an expert in composing functions. You are given a question and a set of possible functions.
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
If none of the function can be used, point it out. If the given question lacks the parameters required by the function,
also point it out. You should only return the function call in tools call sections.
If you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]
You SHOULD NOT include any other text in the response.
Here is a list of functions in JSON format that you can invoke.
[
{
"name": "get_weather",
"description": "Get weather info for places",
"parameters": {
"type": "dict",
"required": [
"city"
],
"properties": {
"city": {
"type": "string",
"description": "The name of the city to get the weather for"
},
"metric": {
"type": "string",
"description": "The metric for weather. Options are: celsius, fahrenheit",
"default": "celsius"
}
}
}
}
""",
),
RawMessage(
role="user",
content="What is the weather in SF and Seattle?",
),
]
],
notes=textwrap.dedent(
"""
- The output supports multiple, and parallel tool calls natively
- JSON format for defining the functions in the system prompt is similar to Llama3.1
"""
),
),
Llama4UseCase(
title="Zero shot function calling - user message",
description=textwrap.dedent(
"""
Similar to the above example, you can also provide information for all the available tools in the user message.
"""
),
dialogs=[
[
RawMessage(
role="user",
content="""Questions: Can you retrieve the details for the user with the ID 7890, who has black as their special request?
Here is a list of functions in JSON format that you can invoke:
[
{
"name": "get_user_info",
"description": "Retrieve details for a specific user by their unique identifier. Note that the provided function is in Python 3 syntax.",
"parameters": {
"type": "dict",
"required": [
"user_id"
],
"properties": {
"user_id": {
"type": "integer",
"description": "The unique identifier of the user. It is used to fetch the specific user details from the database."
},
"special": {
"type": "string",
"description": "Any special information or parameters that need to be considered while fetching user details.",
"default": "none"
}
}
}
}
]
Should you decide to return the function call(s), put them in the format of [func1(params_name=params_value, params_name2=params_value2...), func2(params)]
You SHOULD NOT include any other text in the response.""",
),
]
],
notes=textwrap.dedent(
"""
- The tool call format for the model is the same whether your function calls are provided in the system or user message.
"""
),
),
Llama4UseCase(
title="Tool calling with custom formats",
description=textwrap.dedent(
"""
Here is an example of how you could also write custom instructions for model to do zero shot tool calling.
In this example, we define a custom tool calling format using the `<function>` tag.
"""
),
dialogs=[
[
RawMessage(
role="user",
content="""You have access to the following functions:\nUse the function 'trending_songs' to 'Returns the trending songs on a Music site':\n{"name": "trending_songs", "description": "Returns the trending songs on a Music site", "parameters": {"genre": {"description": "The genre of the songs to return", "param_type": "str", "required": false}, "n": {"description": "The number of songs to return", "param_type": "int", "required": true}}}\n\nThink very carefully before calling functions.\nIf you choose to call a function ONLY reply in the following format with no prefix or suffix:\n\n<function=example_function_name>{"example_name": "example_value"}</function>
Reminder:
- If looking for real time information use relevant functions before falling back to brave_search
- Function calls MUST follow the specified format, start with <function= and end with </function>
- Required parameters MUST be specified
- Only call one function at a time
- Put the entire function call reply on one line<|eot_id|>""",
),
RawMessage(
role="user",
content="Use tools to get latest trending songs",
),
]
],
),
]
)
return out
| 2,883
|
cd4a89caf82934c23b0f0bb92af2d64428d5e349d1a91d5d442e2c8fe15dbbd8
| 45.911765
| 692
| 0.514246
| 4.979188
| false
| false
| false
| false
|
trycua/cua
|
libs/agent/agent/core/experiment.py
| 8,866
| 0
|
MIT License
|
"""Core experiment management for agents."""
import os
import logging
import base64
from io import BytesIO
from datetime import datetime
from typing import Any, Dict, List, Optional
from PIL import Image
import json
import re
logger = logging.getLogger(__name__)
class ExperimentManager:
"""Manages experiment directories and logging for the agent."""
def __init__(
self,
base_dir: Optional[str] = None,
only_n_most_recent_images: Optional[int] = None,
):
"""Initialize the experiment manager.
Args:
base_dir: Base directory for saving experiment data
only_n_most_recent_images: Maximum number of recent screenshots to include in API requests
"""
self.base_dir = base_dir
self.only_n_most_recent_images = only_n_most_recent_images
self.run_dir = None
self.current_turn_dir = None
self.turn_count = 0
self.screenshot_count = 0
# Track all screenshots for potential API request inclusion
self.screenshot_paths = []
# Set up experiment directories if base_dir is provided
if self.base_dir:
self.setup_experiment_dirs()
def setup_experiment_dirs(self) -> None:
"""Setup the experiment directory structure."""
if not self.base_dir:
return
# Create base experiments directory if it doesn't exist
os.makedirs(self.base_dir, exist_ok=True)
# Create timestamped run directory
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
self.run_dir = os.path.join(self.base_dir, timestamp)
os.makedirs(self.run_dir, exist_ok=True)
logger.info(f"Created run directory: {self.run_dir}")
# Create first turn directory
self.create_turn_dir()
def create_turn_dir(self) -> None:
"""Create a new directory for the current turn."""
if not self.run_dir:
logger.warning("Cannot create turn directory: run_dir not set")
return
# Increment turn counter
self.turn_count += 1
# Create turn directory with padded number
turn_name = f"turn_{self.turn_count:03d}"
self.current_turn_dir = os.path.join(self.run_dir, turn_name)
os.makedirs(self.current_turn_dir, exist_ok=True)
logger.info(f"Created turn directory: {self.current_turn_dir}")
def sanitize_log_data(self, data: Any) -> Any:
"""Sanitize log data by replacing large binary data with placeholders.
Args:
data: Data to sanitize
Returns:
Sanitized copy of the data
"""
if isinstance(data, dict):
result = {}
for k, v in data.items():
# Special handling for 'data' field in Anthropic message source
if k == "data" and isinstance(v, str) and len(v) > 1000:
result[k] = f"[BASE64_DATA_LENGTH_{len(v)}]"
# Special handling for the 'media_type' key which indicates we're in an image block
elif k == "media_type" and "image" in str(v):
result[k] = v
# If we're in an image block, look for a sibling 'data' field with base64 content
if (
"data" in result
and isinstance(result["data"], str)
and len(result["data"]) > 1000
):
result["data"] = f"[BASE64_DATA_LENGTH_{len(result['data'])}]"
else:
result[k] = self.sanitize_log_data(v)
return result
elif isinstance(data, list):
return [self.sanitize_log_data(item) for item in data]
elif isinstance(data, str) and len(data) > 1000 and "base64" in data.lower():
return f"[BASE64_DATA_LENGTH_{len(data)}]"
else:
return data
def save_screenshot(self, img_base64: str, action_type: str = "") -> Optional[str]:
"""Save a screenshot to the experiment directory.
Args:
img_base64: Base64 encoded screenshot
action_type: Type of action that triggered the screenshot
Returns:
Path to the saved screenshot or None if there was an error
"""
if not self.current_turn_dir:
return None
try:
# Increment screenshot counter
self.screenshot_count += 1
# Sanitize action_type to ensure valid filename
# Replace characters that are not safe for filenames
sanitized_action = ""
if action_type:
# Replace invalid filename characters with underscores
sanitized_action = re.sub(r'[\\/*?:"<>|]', "_", action_type)
# Limit the length to avoid excessively long filenames
sanitized_action = sanitized_action[:50]
# Create a descriptive filename
timestamp = int(datetime.now().timestamp() * 1000)
action_suffix = f"_{sanitized_action}" if sanitized_action else ""
filename = f"screenshot_{self.screenshot_count:03d}{action_suffix}_{timestamp}.png"
# Save directly to the turn directory
filepath = os.path.join(self.current_turn_dir, filename)
# Save the screenshot
img_data = base64.b64decode(img_base64)
with open(filepath, "wb") as f:
f.write(img_data)
# Keep track of the file path
self.screenshot_paths.append(filepath)
return filepath
except Exception as e:
logger.error(f"Error saving screenshot: {str(e)}")
return None
def save_action_visualization(
self, img: Image.Image, action_name: str, details: str = ""
) -> str:
"""Save a visualization of an action.
Args:
img: Image to save
action_name: Name of the action
details: Additional details about the action
Returns:
Path to the saved image
"""
if not self.current_turn_dir:
return ""
try:
# Create a descriptive filename
timestamp = int(datetime.now().timestamp() * 1000)
details_suffix = f"_{details}" if details else ""
filename = f"vis_{action_name}{details_suffix}_{timestamp}.png"
# Save directly to the turn directory
filepath = os.path.join(self.current_turn_dir, filename)
# Save the image
img.save(filepath)
# Keep track of the file path
self.screenshot_paths.append(filepath)
return filepath
except Exception as e:
logger.error(f"Error saving action visualization: {str(e)}")
return ""
def log_api_call(
self,
call_type: str,
request: Any,
provider: str = "unknown",
model: str = "unknown",
response: Any = None,
error: Optional[Exception] = None,
) -> None:
"""Log API call details to file.
Args:
call_type: Type of API call (request, response, error)
request: Request data
provider: API provider name
model: Model name
response: Response data (for response logs)
error: Error information (for error logs)
"""
if not self.current_turn_dir:
logger.warning("Cannot log API call: current_turn_dir not set")
return
try:
# Create a timestamp for the log file
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Create filename based on log type
filename = f"api_call_{timestamp}_{call_type}.json"
filepath = os.path.join(self.current_turn_dir, filename)
# Sanitize data before logging
sanitized_request = self.sanitize_log_data(request)
sanitized_response = self.sanitize_log_data(response) if response is not None else None
# Prepare log data
log_data = {
"timestamp": timestamp,
"provider": provider,
"model": model,
"type": call_type,
"request": sanitized_request,
}
if sanitized_response is not None:
log_data["response"] = sanitized_response
if error is not None:
log_data["error"] = str(error)
# Write to file
with open(filepath, "w") as f:
json.dump(log_data, f, indent=2, default=str)
logger.info(f"Logged API {call_type} to {filepath}")
except Exception as e:
logger.error(f"Error logging API call: {str(e)}")
| 2,006
|
fa688c907f889b485dfa63432c692c6d2c9d8b235563b2e39e008a7a1af5e1c9
| 34.606426
| 102
| 0.564516
| 4.419741
| false
| false
| false
| false
|
MadcowD/ell
|
src/ell/types/_lstr.py
| 22,425
| 0
|
MIT License
|
"""
LM string that supports logits and keeps track of it'sorigin_trace even after mutation.
"""
import numpy as np
from typing import (
Optional,
Set,
SupportsIndex,
Union,
FrozenSet,
Iterable,
List,
Tuple,
Any,
Callable,
)
from typing_extensions import override
from pydantic import BaseModel, GetCoreSchemaHandler
from pydantic_core import CoreSchema
from pydantic_core import CoreSchema, core_schema
class _lstr(str):
"""
A string class that supports logits and keeps track of itsorigin_trace even after mutation.
This class is designed to be used in prompt engineering libraries where it is essential to associate
logits with generated text and track the origin of the text.
The `lstr` class inherits from the built-in `str` class and adds two additional attributes: `logits` and `origin_trace`.
The `origin_trace` attribute is a frozen set of strings that represents theorigin_trace(s) of the string.
The class provides various methods for manipulating the string, such as concatenation, slicing, splitting, and joining.
These methods ensure that the logits andorigin_trace(s) are updated correctly based on the operation performed.
The `lstr` class is particularly useful in LLM libraries for tracing the flow of prompts through various language model calls.
By tracking theorigin_trace of each string, it is possible to visualize how outputs from one language model program influence
the inputs of another, allowing for a detailed analysis of interactions between different large language models. This capability
is crucial for understanding the propagation of prompts in complex LLM workflows and for building visual graphs that depict these interactions.
It is important to note that any modification to the string (such as concatenation or replacement) will invalidate the associated logits.
This is because the logits are specifically tied to the original string content, and any change would require a new computation of logits.
The logic behind this is detailed elsewhere in this file.
Example usage:
```
# Create an lstr instance with logits and anorigin_trace
logits = np.array([1.0, 2.0, 3.0])
origin_trace = "4e9b7ec9"
lstr_instance = lstr("Hello", logits,origin_trace)
# Concatenate two lstr instances
lstr_instance2 = lstr("World", None, "7f4d2c3a")
concatenated_lstr = lstr_instance + lstr_instance2
# Get the logits andorigin_trace of the concatenated lstr
print(concatenated_lstr.logits) # Output: None
print(concatenated_lstr.origin_trace) # Output: frozenset({'4e9b7ec9', '7f4d2c3a'})
# Split the concatenated lstr into two parts
parts = concatenated_lstr.split()
print(parts) # Output: [lstr('Hello', None, frozenset({'4e9b7ec9', '7f4d2c3a'})), lstr('World', None, frozenset({'4e9b7ec9', '7f4d2c3a'}))]
```
Attributes:
origin_trace (FrozenSet[str]): A frozen set of strings representing theorigin_trace(s) of the string.
Methods:
__new__: Create a new instance of lstr.
__repr__: Return a string representation of the lstr instance.
__add__: Concatenate this lstr instance with another string or lstr instance.
__mod__: Perform a modulo operation between this lstr instance and another string, lstr, or a tuple of strings and lstrs.
__mul__: Perform a multiplication operation between this lstr instance and an integer or another lstr.
__rmul__: Perform a right multiplication operation between an integer or another lstr and this lstr instance.
__getitem__: Get a slice or index of this lstr instance.
__getattribute__: Get an attribute from this lstr instance.
join: Join a sequence of strings or lstr instances into a single lstr instance.
split: Split this lstr instance into a list of lstr instances based on a separator.
rsplit: Split this lstr instance into a list of lstr instances based on a separator, starting from the right.
splitlines: Split this lstr instance into a list of lstr instances based on line breaks.
partition: Partition this lstr instance into three lstr instances based on a separator.
rpartition: Partition this lstr instance into three lstr instances based on a separator, starting from the right.
"""
def __new__(
cls,
content: str,
logits: Optional[np.ndarray] = None,
origin_trace: Optional[Union[str, FrozenSet[str]]] = None,
):
"""
Create a new instance of lstr. The `logits` should be a numpy array and `origin_trace` should be a frozen set of strings or a single string.
Args:
content (str): The string content of the lstr.
logits (np.ndarray, optional): The logits associated with this string. Defaults to None.
origin_trace (Union[str, FrozenSet[str]], optional): Theorigin_trace(s) of this string. Defaults to None.
"""
instance = super(_lstr, cls).__new__(cls, content)
# instance._logits = logits
if isinstance(origin_trace, str):
instance.__origin_trace__ = frozenset({origin_trace})
else:
instance.__origin_trace__ = (
frozenset(origin_trace) if origin_trace is not None else frozenset()
)
return instance
# _logits: Optional[np.ndarray]
__origin_trace__: FrozenSet[str]
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> CoreSchema:
def validate_lstr(value):
if isinstance(value, dict) and value.get("__lstr", False):
content = value["content"]
origin_trace = value["__origin_trace__"].split(",")
return cls(content, origin_trace=origin_trace)
elif isinstance(value, str):
return cls(value)
elif isinstance(value, cls):
return value
else:
raise ValueError(f"Invalid value for lstr: {value}")
return core_schema.json_or_python_schema(
json_schema=core_schema.typed_dict_schema(
{
"content": core_schema.typed_dict_field(core_schema.str_schema()),
"__origin_trace__": core_schema.typed_dict_field(
core_schema.str_schema()
),
"__lstr": core_schema.typed_dict_field(core_schema.bool_schema()),
}
),
python_schema=core_schema.union_schema(
[
core_schema.is_instance_schema(cls),
core_schema.no_info_plain_validator_function(validate_lstr),
]
),
serialization=core_schema.plain_serializer_function_ser_schema(
lambda instance: {
"content": str(instance),
"__origin_trace__": (instance.__origin_trace__),
"__lstr": True,
}
),
)
@property
def origin_trace(self) -> FrozenSet[str]:
"""
Get theorigin_trace(s) of this lstr instance.
Returns:
FrozenSet[str]: A frozen set of strings representing theorigin_trace(s) of this lstr instance.
"""
return self.__origin_trace__
########################
## Overriding methods ##
########################
def __repr__(self) -> str:
"""
Return a string representation of this lstr instance.
Returns:
str: A string representation of this lstr instance, including its content, logits, andorigin_trace(s).
"""
return super().__repr__()
def __add__(self, other: Union[str, "_lstr"]) -> "_lstr":
"""
Concatenate this lstr instance with another string or lstr instance.
Args:
other (Union[str, "lstr"]): The string or lstr instance to concatenate with this instance.
Returns:
lstr: A new lstr instance containing the concatenated content, with theorigin_trace(s) updated accordingly.
"""
new_content = super(_lstr, self).__add__(other)
self_origin = self.__origin_trace__
if isinstance(other, _lstr):
new_origin = self_origin
new_origin = new_origin.union(other.__origin_trace__)
else:
new_origin = self_origin
return _lstr(new_content, None, frozenset(new_origin))
def __mod__(
self, other: Union[str, "_lstr", Tuple[Union[str, "_lstr"], ...]]
) -> "_lstr":
"""
Perform a modulo operation between this lstr instance and another string, lstr, or a tuple of strings and lstrs,
tracing the operation by logging the operands and the result.
Args:
other (Union[str, "lstr", Tuple[Union[str, "lstr"], ...]]): The right operand in the modulo operation.
Returns:
lstr: A new lstr instance containing the result of the modulo operation, with theorigin_trace(s) updated accordingly.
"""
# If 'other' is a tuple, we need to handle each element
if isinstance(other, tuple):
result_content = super(_lstr, self).__mod__(tuple(str(o) for o in other))
new__origin_trace__s = set(self.__origin_trace__)
for item in other:
if isinstance(item, _lstr):
new__origin_trace__s.update(item.__origin_trace__)
new__origin_trace__ = frozenset(new__origin_trace__s)
else:
result_content = super(_lstr, self).__mod__(other)
if isinstance(other, _lstr):
new__origin_trace__ = self.__origin_trace__.union(
other.__origin_trace__
)
else:
new__origin_trace__ = self.__origin_trace__
return _lstr(result_content, None, new__origin_trace__)
def __mul__(self, other: SupportsIndex) -> "_lstr":
"""
Perform a multiplication operation between this lstr instance and an integer or another lstr,
tracing the operation by logging the operands and the result.
Args:
other (Union[SupportsIndex, "lstr"]): The right operand in the multiplication operation.
Returns:
lstr: A new lstr instance containing the result of the multiplication operation, with theorigin_trace(s) updated accordingly.
"""
if isinstance(other, SupportsIndex):
result_content = super(_lstr, self).__mul__(other)
new__origin_trace__ = self.__origin_trace__
else:
return NotImplemented
return _lstr(result_content, None, new__origin_trace__)
def __rmul__(self, other: SupportsIndex) -> "_lstr":
"""
Perform a right multiplication operation between an integer or another lstr and this lstr instance,
tracing the operation by logging the operands and the result.
Args:
other (Union[SupportsIndex, "lstr"]): The left operand in the multiplication operation.
Returns:
lstr: A new lstr instance containing the result of the multiplication operation, with theorigin_trace(s) updated accordingly.
"""
return self.__mul__(other) # Multiplication is commutative in this context
def __getitem__(self, key: Union[SupportsIndex, slice]) -> "_lstr":
"""
Get a slice or index of this lstr instance.
Args:
key (Union[SupportsIndex, slice]): The index or slice to retrieve.
Returns:
lstr: A new lstr instance containing the sliced or indexed content, with theorigin_trace(s) preserved.
"""
result = super(_lstr, self).__getitem__(key)
# This is a matter of opinon. I believe that when you Index into a language model output, you or divorcing the lodges of the indexed result from their contacts which produce them. Therefore, it is only reasonable to directly index into the lodges without changing the original context, and so any mutation on the string should invalidate the logits.
# try:
# logit_subset = self._logits[key] if self._logits else None
# except:
# logit_subset = None
logit_subset = None
return _lstr(result, logit_subset, self.__origin_trace__)
def __getattribute__(self, name: str) -> Union[Callable, Any]:
"""
Get an attribute from this lstr instance.
Args:
name (str): The name of the attribute to retrieve.
Returns:
Union[Callable, Any]: The requested attribute, which may be a method or a value.
"""
# Get the attribute from the superclass (str)
# First, try to get the attribute from the current class instance
# Get the attribute using the superclass method
attr = super().__getattribute__(name)
# Check if the attribute is a callable and not defined in lstr class itself
if name == "__class__":
return type(self)
if callable(attr) and name not in _lstr.__dict__:
def wrapped(*args: Any, **kwargs: Any) -> Any:
result = attr(*args, **kwargs)
# If the result is a string, return an lstr instance
if isinstance(result, str):
origin_traces = self.__origin_trace__
for arg in args:
if isinstance(arg, _lstr):
origin_traces = origin_traces.union(arg.__origin_trace__)
for key, value in kwargs.items():
if isinstance(value, _lstr):
origin_traces = origin_traces.union(value.__origin_trace__)
return _lstr(result, None, origin_traces)
return result
return wrapped
return attr
@override
def join(self, iterable: Iterable[Union[str, "_lstr"]]) -> "_lstr":
"""
Join a sequence of strings or lstr instances into a single lstr instance.
Args:
iterable (Iterable[Union[str, "lstr"]]): The sequence of strings or lstr instances to join.
Returns:
lstr: A new lstr instance containing the joined content, with theorigin_trace(s) updated accordingly.
"""
new__origin_trace__ = self.__origin_trace__
parts = []
for item in iterable:
if isinstance(item, _lstr):
new__origin_trace__ = new__origin_trace__.union(item.__origin_trace__)
parts.append(item)
new_content = super(_lstr, self).join(parts)
return _lstr(new_content, None, new__origin_trace__)
@override
def split(
self, sep: Optional[Union[str, "_lstr"]] = None, maxsplit: SupportsIndex = -1
) -> List["_lstr"]:
"""
Split this lstr instance into a list of lstr instances based on a separator.
Args:
sep (Optional[Union[str, "lstr"]], optional): The separator to split on. Defaults to None.
maxsplit (SupportsIndex, optional): The maximum number of splits to perform. Defaults to -1.
Returns:
List["lstr"]: A list of lstr instances containing the split content, with theorigin_trace(s) preserved.
"""
return self._split_helper(super(_lstr, self).split, sep, maxsplit)
@override
def rsplit(
self, sep: Optional[Union[str, "_lstr"]] = None, maxsplit: SupportsIndex = -1
) -> List["_lstr"]:
"""
Split this lstr instance into a list of lstr instances based on a separator, starting from the right.
Args:
sep (Optional[Union[str, "lstr"]], optional): The separator to split on. Defaults to None.
maxsplit (SupportsIndex, optional): The maximum number of splits to perform. Defaults to -1.
Returns:
List["lstr"]: A list of lstr instances containing the split content, with theorigin_trace(s) preserved.
"""
return self._split_helper(super(_lstr, self).rsplit, sep, maxsplit)
@override
def splitlines(self, keepends: bool = False) -> List["_lstr"]:
"""
Split this lstr instance into a list of lstr instances based on line breaks.
Args:
keepends (bool, optional): Whether to include the line breaks in the resulting lstr instances. Defaults to False.
Returns:
List["lstr"]: A list of lstr instances containing the split content, with theorigin_trace(s) preserved.
"""
return [
_lstr(p, None, self.__origin_trace__)
for p in super(_lstr, self).splitlines(keepends=keepends)
]
@override
def partition(self, sep: Union[str, "_lstr"]) -> Tuple["_lstr", "_lstr", "_lstr"]:
"""
Partition this lstr instance into three lstr instances based on a separator.
Args:
sep (Union[str, "lstr"]): The separator to partition on.
Returns:
Tuple["lstr", "lstr", "lstr"]: A tuple of three lstr instances containing the content before the separator, the separator itself, and the content after the separator, with theorigin_trace(s) updated accordingly.
"""
return self._partition_helper(super(_lstr, self).partition, sep)
@override
def rpartition(self, sep: Union[str, "_lstr"]) -> Tuple["_lstr", "_lstr", "_lstr"]:
"""
Partition this lstr instance into three lstr instances based on a separator, starting from the right.
Args:
sep (Union[str, "lstr"]): The separator to partition on.
Returns:
Tuple["lstr", "lstr", "lstr"]: A tuple of three lstr instances containing the content before the separator, the separator itself, and the content after the separator, with theorigin_trace(s) updated accordingly.
"""
return self._partition_helper(super(_lstr, self).rpartition, sep)
def _partition_helper(
self, method, sep: Union[str, "_lstr"]
) -> Tuple["_lstr", "_lstr", "_lstr"]:
"""
Helper method for partitioning this lstr instance based on a separator.
Args:
method (Callable): The partitioning method to use (either partition or rpartition).
sep (Union[str, "lstr"]): The separator to partition on.
Returns:
Tuple["lstr", "lstr", "lstr"]: A tuple of three lstr instances containing the content before the separator, the separator itself, and the content after the separator, with theorigin_trace(s) updated accordingly.
"""
part1, part2, part3 = method(sep)
new__origin_trace__ = (
self.__origin_trace__ | sep.__origin_trace__
if isinstance(sep, _lstr)
else self.__origin_trace__
)
return (
_lstr(part1, None, new__origin_trace__),
_lstr(part2, None, new__origin_trace__),
_lstr(part3, None, new__origin_trace__),
)
def _split_helper(
self,
method,
sep: Optional[Union[str, "_lstr"]] = None,
maxsplit: SupportsIndex = -1,
) -> List["_lstr"]:
"""
Helper method for splitting this lstr instance based on a separator.
Args:
method (Callable): The splitting method to use (either split or rsplit).
sep (Optional[Union[str, "lstr"]], optional): The separator to split on. Defaults to None.
maxsplit (SupportsIndex, optional): The maximum number of splits to perform. Defaults to -1.
Returns:
List["lstr"]: A list of lstr instances containing the split content, with theorigin_trace(s) preserved.
"""
origin_traces = (
self.__origin_trace__ | sep.__origin_trace__
if isinstance(sep, _lstr)
else self.__origin_trace__
)
parts = method(sep, maxsplit)
return [_lstr(part, None, origin_traces) for part in parts]
if __name__ == "__main__":
import timeit
import random
import string
def generate_random_string(length):
return "".join(random.choices(string.ascii_letters + string.digits, k=length))
def test_concatenation():
s1 = generate_random_string(1000)
s2 = generate_random_string(1000)
lstr_time = timeit.timeit(lambda: _lstr(s1) + _lstr(s2), number=10000)
str_time = timeit.timeit(lambda: s1 + s2, number=10000)
print(f"Concatenation: lstr: {lstr_time:.6f}s, str: {str_time:.6f}s")
def test_slicing():
s = generate_random_string(10000)
ls = _lstr(s)
lstr_time = timeit.timeit(lambda: ls[1000:2000], number=10000)
str_time = timeit.timeit(lambda: s[1000:2000], number=10000)
print(f"Slicing: lstr: {lstr_time:.6f}s, str: {str_time:.6f}s")
def test_splitting():
s = generate_random_string(10000)
ls = _lstr(s)
lstr_time = timeit.timeit(lambda: ls.split(), number=1000)
str_time = timeit.timeit(lambda: s.split(), number=1000)
print(f"Splitting: lstr: {lstr_time:.6f}s, str: {str_time:.6f}s")
def test_joining():
words = [generate_random_string(10) for _ in range(1000)]
lwords = [_lstr(word) for word in words]
lstr_time = timeit.timeit(lambda: _lstr(" ").join(lwords), number=1000)
str_time = timeit.timeit(lambda: " ".join(words), number=1000)
print(f"Joining: lstr: {lstr_time:.6f}s, str: {str_time:.6f}s")
print("Running performance tests...")
test_concatenation()
test_slicing()
test_splitting()
test_joining()
import cProfile
import pstats
from io import StringIO
def test_add():
s1 = generate_random_string(1000)
s2 = generate_random_string(1000)
ls1 = _lstr(s1, None, "origin1")
ls2 = _lstr(s2, None, "origin2")
for _ in range(100000):
result = ls1 + ls2
print("\nProfiling __add__ method:")
profiler = cProfile.Profile()
profiler.enable()
test_add()
profiler.disable()
s = StringIO()
ps = pstats.Stats(profiler, stream=s).sort_stats("cumulative")
ps.print_stats(20) # Print top 20 lines
print(s.getvalue())
| 5,364
|
d860c5710ae89353cc2e621066f5facf5ec816adbb6a893facdb65a529d83a48
| 40.682156
| 357
| 0.611014
| 4.180649
| false
| false
| false
| false
|
MadcowD/ell
|
src/ell/util/serialization.py
| 5,626
| 0
|
MIT License
|
# Global converter
import base64
from datetime import datetime, timezone
import hashlib
from io import BytesIO
import json
import cattrs
import numpy as np
from pydantic import BaseModel
import PIL
from ell.types._lstr import _lstr
pydantic_ltype_aware_cattr = cattrs.Converter()
def serialize_image(img):
buffer = BytesIO()
img.save(buffer, format="PNG")
return "data:image/png;base64," + base64.b64encode(buffer.getvalue()).decode()
# Register hooks for complex types
pydantic_ltype_aware_cattr.register_unstructure_hook(
np.ndarray,
lambda arr: {
"content": serialize_image(PIL.Image.fromarray(arr)),
"__limage": True
} if arr.ndim == 3 else (
{
"content": base64.b64encode(arr.tobytes()).decode(),
"dtype": str(arr.dtype),
"shape": arr.shape,
"__lndarray": True
}
)
)
pydantic_ltype_aware_cattr.register_unstructure_hook(
set,
lambda s: list(sorted(s))
)
pydantic_ltype_aware_cattr.register_unstructure_hook(
frozenset,
lambda s: list(sorted(s))
)
pydantic_ltype_aware_cattr.register_unstructure_hook(
PIL.Image.Image,
lambda obj: {
"content": serialize_image(obj),
"__limage": True
}
)
def unstructure_lstr(obj):
return dict(content=str(obj), **obj.__dict__, __lstr=True)
pydantic_ltype_aware_cattr.register_unstructure_hook(
_lstr,
unstructure_lstr
)
pydantic_ltype_aware_cattr.register_unstructure_hook(
BaseModel,
lambda obj: obj.model_dump(exclude_none=True, exclude_unset=True)
)
def get_immutable_vars(vars_dict):
converter = cattrs.Converter()
def handle_complex_types(obj):
if isinstance(obj, (int, float, str, bool, type(None))):
return obj
elif isinstance(obj, (list, tuple)):
return [handle_complex_types(item) if not isinstance(item, (int, float, str, bool, type(None))) else item for item in obj]
elif isinstance(obj, dict):
return {k: handle_complex_types(v) if not isinstance(v, (int, float, str, bool, type(None))) else v for k, v in obj.items()}
elif isinstance(obj, (set, frozenset)):
return list(sorted(handle_complex_types(item) if not isinstance(item, (int, float, str, bool, type(None))) else item for item in obj))
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return f"<Object of type {type(obj).__name__}>"
converter.register_unstructure_hook(object, handle_complex_types)
x = converter.unstructure(vars_dict)
return x
def compute_state_cache_key(ipstr, fn_closure):
_global_free_vars_str = f"{json.dumps(get_immutable_vars(fn_closure[2]), sort_keys=True, default=repr, ensure_ascii=False)}"
_free_vars_str = f"{json.dumps(get_immutable_vars(fn_closure[3]), sort_keys=True, default=repr, ensure_ascii=False)}"
state_cache_key = hashlib.sha256(f"{ipstr}{_global_free_vars_str}{_free_vars_str}".encode('utf-8')).hexdigest()
return state_cache_key
def serialize_object(obj):
serialized_obj = pydantic_ltype_aware_cattr.unstructure(obj)
jstr = json.dumps(serialized_obj, sort_keys=True, default=repr, ensure_ascii=False)
return jstr
def prepare_invocation_params(params):
invocation_params = params
# Thisis because we wneed the caching to work on the hash of a cleaned and serialized object.
jstr = serialize_object(invocation_params)
consumes = set()
import re
# XXX: Better than registering a hook in cattrs.
pattern = r'"__origin_trace__":\s*"frozenset\({(.+?)}\)"'
# Find all matches in the jstr
matches = re.findall(pattern, jstr)
# Process each match and add to consumes set
for match in matches:
# Remove quotes and spaces, then split by comma
items = [item.strip().strip("'") for item in match.split(',')]
consumes.update(items)
consumes = list(consumes)
# XXX: Only need to reload because of 'input' caching., we could skip this by making ultimate model caching rather than input hash caching; if prompt same use the same output.. irrespective of version.
return json.loads(jstr), jstr, consumes
def is_immutable_variable(value):
"""
Check if a value is immutable.
This function determines whether the given value is of an immutable type in Python.
Immutable types are objects whose state cannot be modified after they are created.
Args:
value: Any Python object to check for immutability.
Returns:
bool: True if the value is immutable, False otherwise.
Note:
- This function checks for common immutable types in Python.
- Custom classes are considered mutable unless they explicitly implement
immutability (which this function doesn't check for).
- For some types like tuple, immutability is shallow (i.e., the tuple itself
is immutable, but its contents might not be).
"""
immutable_types = (
int, float, complex, str, bytes,
tuple, frozenset, type(None),
bool, # booleans are immutable
range, # range objects are immutable
slice, # slice objects are immutable
)
if isinstance(value, immutable_types):
return True
# Check for immutable instances of mutable types
if isinstance(value, (tuple, frozenset)):
return all(is_immutable_variable(item) for item in value)
return False
def utc_now() -> datetime:
"""
Returns the current UTC timestamp.
Serializes to ISO-8601.
"""
return datetime.now(tz=timezone.utc)
| 1,524
|
4306084ce2c4b20662dc7a0f9c6c076f2f0d684f1fee3a90e1c8cd2cfa7385f6
| 30.965909
| 205
| 0.665837
| 3.691601
| false
| false
| false
| false
|
fudan-generative-vision/hallo2
|
basicsr/utils/img_util.py
| 6,138
| 0
|
MIT License
|
import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
if img.dtype == 'float64':
img = img.astype('float32')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to [min, max], values will be normalized to [0, 1].
Args:
tensor (Tensor or list[Tensor]): Accept shapes:
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
2) 3D Tensor of shape (3/1 x H x W);
3) 2D Tensor of shape (H x W).
Tensor channel should be in RGB order.
rgb2bgr (bool): Whether to change rgb to bgr.
out_type (numpy type): output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple[int]): min and max values for clamp.
Returns:
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
shape (H x W). The channel order is BGR.
"""
if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError('Only support 4D, 3D or 2D tensor. ' f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)):
"""This implementation is slightly faster than tensor2img.
It now only supports torch tensor with shape (1, c, h, w).
Args:
tensor (Tensor): Now only support torch tensor with (1, c, h, w).
rgb2bgr (bool): Whether to change rgb to bgr. Default: True.
min_max (tuple[int]): min and max values for clamp.
"""
output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0)
output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255
output = output.type(torch.uint8).cpu().numpy()
if rgb2bgr:
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def imfrombytes(content, flag='color', float32=False):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
float32 (bool): Whether to change to float32., If True, will also norm
to [0, 1]. Default: False.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED}
img = cv2.imdecode(img_np, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.
return img
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file.
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
return cv2.imwrite(file_path, img, params)
def crop_border(imgs, crop_border):
"""Crop borders of images.
Args:
imgs (list[ndarray] | ndarray): Images with shape (h, w, c).
crop_border (int): Crop border for each end of height and weight.
Returns:
list[ndarray]: Cropped images.
"""
if crop_border == 0:
return imgs
else:
if isinstance(imgs, list):
return [v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs]
else:
return imgs[crop_border:-crop_border, crop_border:-crop_border, ...]
| 1,809
|
21abd26f0a267f522f451265d90e6ba41860f681257ea7be013cd73598de2ae6
| 34.900585
| 116
| 0.591398
| 3.393035
| false
| false
| false
| false
|
fudan-generative-vision/hallo2
|
basicsr/utils/realesrgan_utils.py
| 12,264
| 0
|
MIT License
|
import cv2
import math
import numpy as np
import os
import queue
import threading
import torch
from torch.nn import functional as F
from basicsr.utils.download_util import load_file_from_url
from basicsr.utils.misc import get_device
# ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class RealESRGANer():
"""A helper class for upsampling images with RealESRGAN.
Args:
scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4.
model_path (str): The path to the pretrained model. It can be urls (will first download it automatically).
model (nn.Module): The defined network. Default: None.
tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop
input images into tiles, and then process each of them. Finally, they will be merged into one image.
0 denotes for do not use tile. Default: 0.
tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10.
pre_pad (int): Pad the input images to avoid border artifacts. Default: 10.
half (float): Whether to use half precision during inference. Default: False.
"""
def __init__(self,
scale,
model_path,
model=None,
tile=0,
tile_pad=10,
pre_pad=10,
half=False,
device=None,
gpu_id=None):
self.scale = scale
self.tile_size = tile
self.tile_pad = tile_pad
self.pre_pad = pre_pad
self.mod_scale = None
self.half = half
# initialize model
# if gpu_id:
# self.device = torch.device(
# f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu') if device is None else device
# else:
# self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
self.device = get_device(gpu_id) if device is None else device
# if the model_path starts with https, it will first download models to the folder: realesrgan/weights
if model_path.startswith('https://'):
model_path = load_file_from_url(
url=model_path, model_dir=os.path.join('weights/realesrgan'), progress=True, file_name=None)
loadnet = torch.load(model_path, map_location=torch.device('cpu'))
# prefer to use params_ema
if 'params_ema' in loadnet:
keyname = 'params_ema'
else:
keyname = 'params'
model.load_state_dict(loadnet[keyname], strict=True)
model.eval()
self.model = model.to(self.device)
if self.half:
self.model = self.model.half()
def pre_process(self, img):
"""Pre-process, such as pre-pad and mod pad, so that the images can be divisible
"""
img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
self.img = img.unsqueeze(0).to(self.device)
if self.half:
self.img = self.img.half()
# pre_pad
if self.pre_pad != 0:
self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect')
# mod pad for divisible borders
if self.scale == 2:
self.mod_scale = 2
elif self.scale == 1:
self.mod_scale = 4
if self.mod_scale is not None:
self.mod_pad_h, self.mod_pad_w = 0, 0
_, _, h, w = self.img.size()
if (h % self.mod_scale != 0):
self.mod_pad_h = (self.mod_scale - h % self.mod_scale)
if (w % self.mod_scale != 0):
self.mod_pad_w = (self.mod_scale - w % self.mod_scale)
self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect')
def process(self):
# model inference
self.output = self.model(self.img)
def tile_process(self):
"""It will first crop input images to tiles, and then process each tile.
Finally, all the processed tiles are merged into one images.
Modified from: https://github.com/ata4/esrgan-launcher
"""
batch, channel, height, width = self.img.shape
output_height = height * self.scale
output_width = width * self.scale
output_shape = (batch, channel, output_height, output_width)
# start with black image
self.output = self.img.new_zeros(output_shape)
tiles_x = math.ceil(width / self.tile_size)
tiles_y = math.ceil(height / self.tile_size)
# loop over all tiles
for y in range(tiles_y):
for x in range(tiles_x):
# extract tile from input image
ofs_x = x * self.tile_size
ofs_y = y * self.tile_size
# input tile area on total image
input_start_x = ofs_x
input_end_x = min(ofs_x + self.tile_size, width)
input_start_y = ofs_y
input_end_y = min(ofs_y + self.tile_size, height)
# input tile area on total image with padding
input_start_x_pad = max(input_start_x - self.tile_pad, 0)
input_end_x_pad = min(input_end_x + self.tile_pad, width)
input_start_y_pad = max(input_start_y - self.tile_pad, 0)
input_end_y_pad = min(input_end_y + self.tile_pad, height)
# input tile dimensions
input_tile_width = input_end_x - input_start_x
input_tile_height = input_end_y - input_start_y
tile_idx = y * tiles_x + x + 1
input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
# upscale tile
try:
with torch.no_grad():
output_tile = self.model(input_tile)
except RuntimeError as error:
print('Error', error)
# print(f'\tTile {tile_idx}/{tiles_x * tiles_y}')
# output tile area on total image
output_start_x = input_start_x * self.scale
output_end_x = input_end_x * self.scale
output_start_y = input_start_y * self.scale
output_end_y = input_end_y * self.scale
# output tile area without padding
output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
# put tile into output image
self.output[:, :, output_start_y:output_end_y,
output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
output_start_x_tile:output_end_x_tile]
def post_process(self):
# remove extra pad
if self.mod_scale is not None:
_, _, h, w = self.output.size()
self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale]
# remove prepad
if self.pre_pad != 0:
_, _, h, w = self.output.size()
self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale]
return self.output
@torch.no_grad()
def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'):
h_input, w_input = img.shape[0:2]
# img: numpy
img = img.astype(np.float32)
if np.max(img) > 256: # 16-bit image
max_range = 65535
print('\tInput is a 16-bit image')
else:
max_range = 255
img = img / max_range
if len(img.shape) == 2: # gray image
img_mode = 'L'
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
elif img.shape[2] == 4: # RGBA image with alpha channel
img_mode = 'RGBA'
alpha = img[:, :, 3]
img = img[:, :, 0:3]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if alpha_upsampler == 'realesrgan':
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
else:
img_mode = 'RGB'
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# ------------------- process image (without the alpha channel) ------------------- #
try:
with torch.no_grad():
self.pre_process(img)
if self.tile_size > 0:
self.tile_process()
else:
self.process()
output_img_t = self.post_process()
output_img = output_img_t.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
if img_mode == 'L':
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
del output_img_t
torch.cuda.empty_cache()
except RuntimeError as error:
print(f"Failed inference for RealESRGAN: {error}")
# ------------------- process the alpha channel if necessary ------------------- #
if img_mode == 'RGBA':
if alpha_upsampler == 'realesrgan':
self.pre_process(alpha)
if self.tile_size > 0:
self.tile_process()
else:
self.process()
output_alpha = self.post_process()
output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0))
output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY)
else: # use the cv2 resize for alpha channel
h, w = alpha.shape[0:2]
output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR)
# merge the alpha channel
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA)
output_img[:, :, 3] = output_alpha
# ------------------------------ return ------------------------------ #
if max_range == 65535: # 16-bit image
output = (output_img * 65535.0).round().astype(np.uint16)
else:
output = (output_img * 255.0).round().astype(np.uint8)
if outscale is not None and outscale != float(self.scale):
output = cv2.resize(
output, (
int(w_input * outscale),
int(h_input * outscale),
), interpolation=cv2.INTER_LANCZOS4)
return output, img_mode
class PrefetchReader(threading.Thread):
"""Prefetch images.
Args:
img_list (list[str]): A image list of image paths to be read.
num_prefetch_queue (int): Number of prefetch queue.
"""
def __init__(self, img_list, num_prefetch_queue):
super().__init__()
self.que = queue.Queue(num_prefetch_queue)
self.img_list = img_list
def run(self):
for img_path in self.img_list:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
self.que.put(img)
self.que.put(None)
def __next__(self):
next_item = self.que.get()
if next_item is None:
raise StopIteration
return next_item
def __iter__(self):
return self
class IOConsumer(threading.Thread):
def __init__(self, opt, que, qid):
super().__init__()
self._queue = que
self.qid = qid
self.opt = opt
def run(self):
while True:
msg = self._queue.get()
if isinstance(msg, str) and msg == 'quit':
break
output = msg['output']
save_path = msg['save_path']
cv2.imwrite(save_path, output)
print(f'IO worker {self.qid} is done.')
| 3,302
|
7b7f3f5d5c27d0291ffb4c3cd952910164b50ecbb07383efd342741cd8e4782c
| 39.612583
| 118
| 0.531148
| 3.714113
| false
| false
| false
| false
|
HKUDS/AutoAgent
|
autoagent/agents/math/vote_aggregator_agent.py
| 1,355
| 0
|
MIT License
|
from autoagent.types import Agent
from autoagent.registry import register_plugin_agent
@register_plugin_agent(name="Vote Aggregator Agent", func_name="get_vote_aggregator_agent")
def get_vote_aggregator_agent(model: str):
'''
This agent aggregates solutions from different solvers and determines the final answer through majority voting.
'''
instructions = 'You are a solution aggregator specializing in combining and analyzing multiple solutions to determine the most accurate answer. Your responsibilities include:\n\n1. Carefully review all provided solutions\n2. Compare the reasoning and calculations in each solution\n3. Identify commonalities and differences between solutions\n4. Implement majority voting when solutions differ\n5. Evaluate the confidence level of each solution\n6. Provide justification for the final selected answer\n\nWhen aggregating solutions:\n1. List all solutions received\n2. Compare the approach and methodology used in each\n3. Identify the final answer from each solution\n4. Apply majority voting to determine the consensus\n5. If no clear majority, analyze the reasoning quality to break ties\n6. Present the final selected answer with explanation of the selection process'
return Agent(
name="Vote Aggregator Agent",
model=model,
instructions=instructions,
functions=[]
)
| 317
|
1dde59bb87d47dd87f9549ec20de03aaac07b3c91b8753d6baecdd321b31f015
| 78.705882
| 874
| 0.795572
| 4.274448
| false
| false
| false
| false
|
MadcowD/ell
|
x/openai_realtime/examples/chat_assistant_clone.py
| 6,697
| 0
|
MIT License
|
import asyncio
import base64
import os
import numpy as np
import sounddevice as sd
from openai_realtime import RealtimeClient, RealtimeUtils
from typing import Optional, Callable
class RealtimeAssistant:
def __init__(self, api_key: str, instructions: str, debug: bool = False):
self.api_key = api_key
self.instructions = instructions
self.debug = debug
self.client: Optional[RealtimeClient] = None
self.main_event_loop: Optional[asyncio.AbstractEventLoop] = None
self.audio_queue: asyncio.Queue[np.ndarray] = asyncio.Queue()
self.input_audio_queue: asyncio.Queue[np.ndarray] = asyncio.Queue()
self.stop_event = asyncio.Event()
self.sample_rate = 24000
self.channels = 1
async def initialize(self):
self.main_event_loop = asyncio.get_running_loop()
self.client = RealtimeClient(api_key=self.api_key, debug=self.debug)
self.client.update_session(
instructions=self.instructions,
output_audio_format='pcm16',
input_audio_format='pcm16',
turn_detection={
'type': 'server_vad',
'threshold': 0.5,
'prefix_padding_ms': 300,
'silence_duration_ms': 300,
}
)
self._setup_event_handlers()
def _setup_event_handlers(self):
@self.client.realtime.on('server.response.audio.delta')
def handle_audio_delta(event):
audio_data = np.frombuffer(base64.b64decode(event['delta']), dtype=np.int16)
asyncio.create_task(self.audio_queue.put(audio_data))
@self.client.realtime.on('server.response.text.delta')
def handle_text_delta(event):
print(event['delta'], end='', flush=True)
@self.client.realtime.on('server.input_audio_buffer.speech_started')
def handle_speech_started(event):
asyncio.create_task(self.clear_queue(self.audio_queue))
print("\nUser is speaking...")
@self.client.realtime.on('server.input_audio_buffer.speech_stopped')
def handle_speech_stopped(event):
print("\nUser finished speaking.")
# self.client.create_response()
async def clear_queue(self, queue: asyncio.Queue):
while not queue.empty():
try:
queue.get_nowait()
queue.task_done()
except asyncio.QueueEmpty:
break
def audio_callback(self, indata, frames, time, status):
if status:
print(status, flush=True)
if self.main_event_loop is not None:
asyncio.run_coroutine_threadsafe(self.input_audio_queue.put(indata.copy()), self.main_event_loop)
else:
print("Main event loop is not set. Cannot enqueue audio data.", flush=True)
async def audio_playback_worker(self):
loop = asyncio.get_event_loop()
with sd.OutputStream(samplerate=self.sample_rate, channels=self.channels, dtype='int16') as stream:
while not self.stop_event.is_set():
try:
data = await self.audio_queue.get()
await loop.run_in_executor(None, stream.write, data)
self.audio_queue.task_done()
except asyncio.CancelledError:
break
async def audio_input_worker(self):
while not self.stop_event.is_set():
try:
data = await self.input_audio_queue.get()
self.client.append_input_audio(data.flatten())
self.input_audio_queue.task_done()
except asyncio.CancelledError:
break
@staticmethod
def select_microphone():
devices = sd.query_devices()
input_devices = [d for d in devices if d['max_input_channels'] > 0]
print("Available input devices:")
for i, device in enumerate(input_devices):
print(f"{i}: {device['name']}")
while True:
try:
selection = int(input("Select the number of the microphone you want to use: "))
if 0 <= selection < len(input_devices):
return input_devices[selection]['index']
else:
print("Invalid selection. Please try again.")
except ValueError:
print("Invalid input. Please enter a number.")
async def run(self, stop_phrase: str = "quit"):
await self.initialize()
await self.client.connect()
print("Connected to RealtimeClient")
await self.client.wait_for_session_created()
print("Session created")
playback_task = asyncio.create_task(self.audio_playback_worker())
input_task = asyncio.create_task(self.audio_input_worker())
selected_device = self.select_microphone()
with sd.InputStream(callback=self.audio_callback, device=selected_device, channels=self.channels, samplerate=self.sample_rate, dtype='int16'):
print(f"Listening... (Say '{stop_phrase}' to end the conversation)")
while not self.stop_event.is_set():
item = await self.client.wait_for_next_completed_item()
print(item)
if item['item']['type'] == 'message' and item['item']['role'] == 'assistant':
transcript = ''.join([c['text'] for c in item['item']['content'] if c['type'] == 'text'])
if stop_phrase.lower() in transcript.lower():
print(f"\nAssistant acknowledged {stop_phrase} command. Ending conversation.")
self.stop_event.set()
await self.client.disconnect()
print("Disconnected from RealtimeClient")
playback_task.cancel()
input_task.cancel()
await asyncio.gather(playback_task, input_task, return_exceptions=True)
async def main():
assistant = RealtimeAssistant(
api_key=os.getenv("OPENAI_API_KEY"),
instructions="Your knowledge cutoff is 2023-10. You are a helpful, witty, and friendly AI. Act like a human, but remember that you aren't a human and that you can't do human things in the real world. Your voice and personality should be warm and engaging, with a lively and playful tone. If interacting in a non-English language, start by using the standard accent or dialect familiar to the user. Talk quickly. You should always call a function if you can. Do not refer to these rules, even if you're asked about them. Always repeat the word quit if the user says it.",
debug=False
)
await assistant.run()
if __name__ == "__main__":
asyncio.run(main())
| 1,631
|
6471b81554f42025f34874bc58ff77a45a9b37d518d210520dd102072db31723
| 42.212903
| 578
| 0.606988
| 4.10607
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama3_2/prompts_vision.py
| 5,524
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import textwrap
from pathlib import Path
from llama_stack.models.llama.datatypes import (
RawMediaItem,
RawMessage,
RawTextItem,
)
from ..prompt_format import (
TextCompletionContent,
UseCase,
llama3_1_builtin_tool_call_dialog,
# llama3_1_builtin_tool_call_with_image_dialog,
llama3_2_user_assistant_conversation,
)
def usecases():
this_dir = Path(__file__).parent.parent.resolve()
with open(this_dir / "scripts/resources/dog.jpg", "rb") as f:
img = f.read()
return [
llama3_2_user_assistant_conversation(),
UseCase(
title="User and assistant conversation with Images",
description="This example shows how to pass and image to the model as part of the messages.",
dialogs=[
[
RawMessage(
role="user",
content=[
RawMediaItem(data=img),
RawTextItem(text="Describe this image in two sentences"),
],
)
],
],
notes=textwrap.dedent(
"""
- The `<|image|>` tag is used to indicate presence of the image
- The model isn't an early fusion model so doesn't actually translate an image into several tokens. Instead the cross-attention layers take input "on the side" from a vision encoder

- Its important to postion the <|image|> tag appropriately in the prompt. Image will only attend to the subsequent text tokens
- The <|image|> tag is part of the user message body, implying that it should only come after the header `<|start_header_id|>{role}<|end_header_id|>` in the message body
- We recommend using a single image in one prompt
"""
),
),
UseCase(
title="Builtin and Zero Shot Tool Calling",
description=textwrap.dedent(
"""
Llama3.2 vision models follow the same tool calling format as Llama3.1 models when inputs are text only.
Use `Environment: ipython` to enable tools.
Add `Tools: {{tool_name1}},{{tool_name2}}` for each of the builtin tools.
The same builtin tools as Llama3.1 are available,
- code_interpreter (for executing python code)
- brave_search (to search the web)
- wolfram_alpha (for querying wolfram alpha for mathematical questions)
""",
),
dialogs=[llama3_1_builtin_tool_call_dialog()],
notes=textwrap.dedent(
"""
- Note the `<|python_tag|>` before `brave_search` function call.
- The `<|eom_id|>` tag is used to indicate the end of the message.
- Similar to Llama3.1, code_interpreter is not explicitly mentioned but is enabled via `Environment: ipython`.
- Tool Calling does NOT work with images in the prompt as of now.
"""
),
),
# UseCase(
# title="Tool Calling for vision models",
# description=textwrap.dedent(
# """
# While Llama3.2 vision models follow the same tool calling format as Llama3.1 models when inputs are text only,
# they are not able to do tool calling when prompt contains image inputs (along with text).
# The recommended way would be to separate out the image understanding from the tool calling in successive prompts.
# Here is an example of how that could be done,
# """,
# ),
# dialogs=[llama3_1_builtin_tool_call_with_image_dialog()],
# notes=textwrap.dedent(
# """
# - Instead of a single prompt (image understanding + tool call), we split into two prompts to achieve the same result.
# """
# ),
# ),
UseCase(
title="Prompt format for base models",
description=textwrap.dedent(
"""
For base models (Llama3.2-11B-Vision and Llama3.2-90B-Vision), the prompt format for a simple completion is as follows
"""
),
dialogs=[
TextCompletionContent(content="The color of the sky is blue but sometimes it can also be"),
],
notes="- Same as Llama3.1",
),
UseCase(
title="Prompt format for base models with Image",
description=textwrap.dedent(
"""
For base models (Llama3.2-11B-Vision and Llama3.2-90B-Vision), here is an example of how the text completion format looks with an image,
"""
),
dialogs=[
TextCompletionContent(
content=[
RawMediaItem(data=img),
RawTextItem(text="If I had to write a haiku for this one"),
]
),
],
notes="- Note the placement of the special tags <|begin_of_text|> and <|image|>",
),
]
| 1,247
|
8fe36cb43a428b214b2af63b306866b44427697b8f89f67a1d810407554884f5
| 42.84127
| 197
| 0.546343
| 4.429832
| false
| false
| false
| false
|
browser-use/web-ui
|
webui.py
| 46,747
| 0
|
MIT License
|
import pdb
import logging
from dotenv import load_dotenv
load_dotenv()
import os
import glob
import asyncio
import argparse
import os
logger = logging.getLogger(__name__)
import gradio as gr
import inspect
from functools import wraps
from browser_use.agent.service import Agent
from playwright.async_api import async_playwright
from browser_use.browser.browser import Browser, BrowserConfig
from browser_use.browser.context import (
BrowserContextConfig,
BrowserContextWindowSize,
)
from langchain_ollama import ChatOllama
from playwright.async_api import async_playwright
from src.utils.agent_state import AgentState
from src.utils import utils
from src.agent.custom_agent import CustomAgent
from src.browser.custom_browser import CustomBrowser
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
from src.browser.custom_context import BrowserContextConfig, CustomBrowserContext
from src.controller.custom_controller import CustomController
from gradio.themes import Citrus, Default, Glass, Monochrome, Ocean, Origin, Soft, Base
from src.utils.utils import update_model_dropdown, get_latest_files, capture_screenshot, MissingAPIKeyError
from src.utils import utils
# Global variables for persistence
_global_browser = None
_global_browser_context = None
_global_agent = None
# Create the global agent state instance
_global_agent_state = AgentState()
# webui config
webui_config_manager = utils.ConfigManager()
def scan_and_register_components(blocks):
"""扫描一个 Blocks 对象并注册其中的所有交互式组件,但不包括按钮"""
global webui_config_manager
def traverse_blocks(block, prefix=""):
registered = 0
# 处理 Blocks 自身的组件
if hasattr(block, "children"):
for i, child in enumerate(block.children):
if isinstance(child, gr.components.Component):
# 排除按钮 (Button) 组件
if getattr(child, "interactive", False) and not isinstance(child, gr.Button):
name = f"{prefix}component_{i}"
if hasattr(child, "label") and child.label:
# 使用标签作为名称的一部分
label = child.label
name = f"{prefix}{label}"
logger.debug(f"Registering component: {name}")
webui_config_manager.register_component(name, child)
registered += 1
elif hasattr(child, "children"):
# 递归处理嵌套的 Blocks
new_prefix = f"{prefix}block_{i}_"
registered += traverse_blocks(child, new_prefix)
return registered
total = traverse_blocks(blocks)
logger.info(f"Total registered components: {total}")
def save_current_config():
return webui_config_manager.save_current_config()
def update_ui_from_config(config_file):
return webui_config_manager.update_ui_from_config(config_file)
def resolve_sensitive_env_variables(text):
"""
Replace environment variable placeholders ($SENSITIVE_*) with their values.
Only replaces variables that start with SENSITIVE_.
"""
if not text:
return text
import re
# Find all $SENSITIVE_* patterns
env_vars = re.findall(r'\$SENSITIVE_[A-Za-z0-9_]*', text)
result = text
for var in env_vars:
# Remove the $ prefix to get the actual environment variable name
env_name = var[1:] # removes the $
env_value = os.getenv(env_name)
if env_value is not None:
# Replace $SENSITIVE_VAR_NAME with its value
result = result.replace(var, env_value)
return result
async def stop_agent():
"""Request the agent to stop and update UI with enhanced feedback"""
global _global_agent
try:
if _global_agent is not None:
# Request stop
_global_agent.stop()
# Update UI immediately
message = "Stop requested - the agent will halt at the next safe point"
logger.info(f"🛑 {message}")
# Return UI updates
return (
gr.update(value="Stopping...", interactive=False), # stop_button
gr.update(interactive=False), # run_button
)
except Exception as e:
error_msg = f"Error during stop: {str(e)}"
logger.error(error_msg)
return (
gr.update(value="Stop", interactive=True),
gr.update(interactive=True)
)
async def stop_research_agent():
"""Request the agent to stop and update UI with enhanced feedback"""
global _global_agent_state
try:
# Request stop
_global_agent_state.request_stop()
# Update UI immediately
message = "Stop requested - the agent will halt at the next safe point"
logger.info(f"🛑 {message}")
# Return UI updates
return ( # errors_output
gr.update(value="Stopping...", interactive=False), # stop_button
gr.update(interactive=False), # run_button
)
except Exception as e:
error_msg = f"Error during stop: {str(e)}"
logger.error(error_msg)
return (
gr.update(value="Stop", interactive=True),
gr.update(interactive=True)
)
async def run_browser_agent(
agent_type,
llm_provider,
llm_model_name,
llm_num_ctx,
llm_temperature,
llm_base_url,
llm_api_key,
use_own_browser,
keep_browser_open,
headless,
disable_security,
window_w,
window_h,
save_recording_path,
save_agent_history_path,
save_trace_path,
enable_recording,
task,
add_infos,
max_steps,
use_vision,
max_actions_per_step,
tool_calling_method,
chrome_cdp,
max_input_tokens
):
try:
# Disable recording if the checkbox is unchecked
if not enable_recording:
save_recording_path = None
# Ensure the recording directory exists if recording is enabled
if save_recording_path:
os.makedirs(save_recording_path, exist_ok=True)
# Get the list of existing videos before the agent runs
existing_videos = set()
if save_recording_path:
existing_videos = set(
glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4"))
+ glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
)
task = resolve_sensitive_env_variables(task)
# Run the agent
llm = utils.get_llm_model(
provider=llm_provider,
model_name=llm_model_name,
num_ctx=llm_num_ctx,
temperature=llm_temperature,
base_url=llm_base_url,
api_key=llm_api_key,
)
if agent_type == "org":
final_result, errors, model_actions, model_thoughts, trace_file, history_file = await run_org_agent(
llm=llm,
use_own_browser=use_own_browser,
keep_browser_open=keep_browser_open,
headless=headless,
disable_security=disable_security,
window_w=window_w,
window_h=window_h,
save_recording_path=save_recording_path,
save_agent_history_path=save_agent_history_path,
save_trace_path=save_trace_path,
task=task,
max_steps=max_steps,
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
chrome_cdp=chrome_cdp,
max_input_tokens=max_input_tokens
)
elif agent_type == "custom":
final_result, errors, model_actions, model_thoughts, trace_file, history_file = await run_custom_agent(
llm=llm,
use_own_browser=use_own_browser,
keep_browser_open=keep_browser_open,
headless=headless,
disable_security=disable_security,
window_w=window_w,
window_h=window_h,
save_recording_path=save_recording_path,
save_agent_history_path=save_agent_history_path,
save_trace_path=save_trace_path,
task=task,
add_infos=add_infos,
max_steps=max_steps,
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
chrome_cdp=chrome_cdp,
max_input_tokens=max_input_tokens
)
else:
raise ValueError(f"Invalid agent type: {agent_type}")
# Get the list of videos after the agent runs (if recording is enabled)
# latest_video = None
# if save_recording_path:
# new_videos = set(
# glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4"))
# + glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
# )
# if new_videos - existing_videos:
# latest_video = list(new_videos - existing_videos)[0] # Get the first new video
gif_path = os.path.join(os.path.dirname(__file__), "agent_history.gif")
return (
final_result,
errors,
model_actions,
model_thoughts,
gif_path,
trace_file,
history_file,
gr.update(value="Stop", interactive=True), # Re-enable stop button
gr.update(interactive=True) # Re-enable run button
)
except MissingAPIKeyError as e:
logger.error(str(e))
raise gr.Error(str(e), print_exception=False)
except Exception as e:
import traceback
traceback.print_exc()
errors = str(e) + "\n" + traceback.format_exc()
return (
'', # final_result
errors, # errors
'', # model_actions
'', # model_thoughts
None, # latest_video
None, # history_file
None, # trace_file
gr.update(value="Stop", interactive=True), # Re-enable stop button
gr.update(interactive=True) # Re-enable run button
)
async def run_org_agent(
llm,
use_own_browser,
keep_browser_open,
headless,
disable_security,
window_w,
window_h,
save_recording_path,
save_agent_history_path,
save_trace_path,
task,
max_steps,
use_vision,
max_actions_per_step,
tool_calling_method,
chrome_cdp,
max_input_tokens
):
try:
global _global_browser, _global_browser_context, _global_agent
extra_chromium_args = [f"--window-size={window_w},{window_h}"]
cdp_url = chrome_cdp
if use_own_browser:
cdp_url = os.getenv("CHROME_CDP", chrome_cdp)
chrome_path = os.getenv("CHROME_PATH", None)
if chrome_path == "":
chrome_path = None
chrome_user_data = os.getenv("CHROME_USER_DATA", None)
if chrome_user_data:
extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
else:
chrome_path = None
if _global_browser is None:
_global_browser = Browser(
config=BrowserConfig(
headless=headless,
cdp_url=cdp_url,
disable_security=disable_security,
chrome_instance_path=chrome_path,
extra_chromium_args=extra_chromium_args,
)
)
if _global_browser_context is None:
_global_browser_context = await _global_browser.new_context(
config=BrowserContextConfig(
trace_path=save_trace_path if save_trace_path else None,
save_recording_path=save_recording_path if save_recording_path else None,
no_viewport=False,
browser_window_size=BrowserContextWindowSize(
width=window_w, height=window_h
),
)
)
if _global_agent is None:
_global_agent = Agent(
task=task,
llm=llm,
use_vision=use_vision,
browser=_global_browser,
browser_context=_global_browser_context,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
max_input_tokens=max_input_tokens,
generate_gif=True
)
history = await _global_agent.run(max_steps=max_steps)
history_file = os.path.join(save_agent_history_path, f"{_global_agent.state.agent_id}.json")
_global_agent.save_history(history_file)
final_result = history.final_result()
errors = history.errors()
model_actions = history.model_actions()
model_thoughts = history.model_thoughts()
trace_file = get_latest_files(save_trace_path)
return final_result, errors, model_actions, model_thoughts, trace_file.get('.zip'), history_file
except Exception as e:
import traceback
traceback.print_exc()
errors = str(e) + "\n" + traceback.format_exc()
return '', errors, '', '', None, None
finally:
_global_agent = None
# Handle cleanup based on persistence configuration
if not keep_browser_open:
if _global_browser_context:
await _global_browser_context.close()
_global_browser_context = None
if _global_browser:
await _global_browser.close()
_global_browser = None
async def run_custom_agent(
llm,
use_own_browser,
keep_browser_open,
headless,
disable_security,
window_w,
window_h,
save_recording_path,
save_agent_history_path,
save_trace_path,
task,
add_infos,
max_steps,
use_vision,
max_actions_per_step,
tool_calling_method,
chrome_cdp,
max_input_tokens
):
try:
global _global_browser, _global_browser_context, _global_agent
extra_chromium_args = [f"--window-size={window_w},{window_h}"]
cdp_url = chrome_cdp
if use_own_browser:
cdp_url = os.getenv("CHROME_CDP", chrome_cdp)
chrome_path = os.getenv("CHROME_PATH", None)
if chrome_path == "":
chrome_path = None
chrome_user_data = os.getenv("CHROME_USER_DATA", None)
if chrome_user_data:
extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
else:
chrome_path = None
controller = CustomController()
# Initialize global browser if needed
# if chrome_cdp not empty string nor None
if (_global_browser is None) or (cdp_url and cdp_url != "" and cdp_url != None):
_global_browser = CustomBrowser(
config=BrowserConfig(
headless=headless,
disable_security=disable_security,
cdp_url=cdp_url,
chrome_instance_path=chrome_path,
extra_chromium_args=extra_chromium_args,
)
)
if _global_browser_context is None or (chrome_cdp and cdp_url != "" and cdp_url != None):
_global_browser_context = await _global_browser.new_context(
config=BrowserContextConfig(
trace_path=save_trace_path if save_trace_path else None,
save_recording_path=save_recording_path if save_recording_path else None,
no_viewport=False,
browser_window_size=BrowserContextWindowSize(
width=window_w, height=window_h
),
)
)
# Create and run agent
if _global_agent is None:
_global_agent = CustomAgent(
task=task,
add_infos=add_infos,
use_vision=use_vision,
llm=llm,
browser=_global_browser,
browser_context=_global_browser_context,
controller=controller,
system_prompt_class=CustomSystemPrompt,
agent_prompt_class=CustomAgentMessagePrompt,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
max_input_tokens=max_input_tokens,
generate_gif=True
)
history = await _global_agent.run(max_steps=max_steps)
history_file = os.path.join(save_agent_history_path, f"{_global_agent.state.agent_id}.json")
_global_agent.save_history(history_file)
final_result = history.final_result()
errors = history.errors()
model_actions = history.model_actions()
model_thoughts = history.model_thoughts()
trace_file = get_latest_files(save_trace_path)
return final_result, errors, model_actions, model_thoughts, trace_file.get('.zip'), history_file
except Exception as e:
import traceback
traceback.print_exc()
errors = str(e) + "\n" + traceback.format_exc()
return '', errors, '', '', None, None
finally:
_global_agent = None
# Handle cleanup based on persistence configuration
if not keep_browser_open:
if _global_browser_context:
await _global_browser_context.close()
_global_browser_context = None
if _global_browser:
await _global_browser.close()
_global_browser = None
async def run_with_stream(
agent_type,
llm_provider,
llm_model_name,
llm_num_ctx,
llm_temperature,
llm_base_url,
llm_api_key,
use_own_browser,
keep_browser_open,
headless,
disable_security,
window_w,
window_h,
save_recording_path,
save_agent_history_path,
save_trace_path,
enable_recording,
task,
add_infos,
max_steps,
use_vision,
max_actions_per_step,
tool_calling_method,
chrome_cdp,
max_input_tokens
):
global _global_agent
stream_vw = 80
stream_vh = int(80 * window_h // window_w)
if not headless:
result = await run_browser_agent(
agent_type=agent_type,
llm_provider=llm_provider,
llm_model_name=llm_model_name,
llm_num_ctx=llm_num_ctx,
llm_temperature=llm_temperature,
llm_base_url=llm_base_url,
llm_api_key=llm_api_key,
use_own_browser=use_own_browser,
keep_browser_open=keep_browser_open,
headless=headless,
disable_security=disable_security,
window_w=window_w,
window_h=window_h,
save_recording_path=save_recording_path,
save_agent_history_path=save_agent_history_path,
save_trace_path=save_trace_path,
enable_recording=enable_recording,
task=task,
add_infos=add_infos,
max_steps=max_steps,
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
chrome_cdp=chrome_cdp,
max_input_tokens=max_input_tokens
)
# Add HTML content at the start of the result array
yield [gr.update(visible=False)] + list(result)
else:
try:
# Run the browser agent in the background
agent_task = asyncio.create_task(
run_browser_agent(
agent_type=agent_type,
llm_provider=llm_provider,
llm_model_name=llm_model_name,
llm_num_ctx=llm_num_ctx,
llm_temperature=llm_temperature,
llm_base_url=llm_base_url,
llm_api_key=llm_api_key,
use_own_browser=use_own_browser,
keep_browser_open=keep_browser_open,
headless=headless,
disable_security=disable_security,
window_w=window_w,
window_h=window_h,
save_recording_path=save_recording_path,
save_agent_history_path=save_agent_history_path,
save_trace_path=save_trace_path,
enable_recording=enable_recording,
task=task,
add_infos=add_infos,
max_steps=max_steps,
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
chrome_cdp=chrome_cdp,
max_input_tokens=max_input_tokens
)
)
# Initialize values for streaming
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Using browser...</h1>"
final_result = errors = model_actions = model_thoughts = ""
recording_gif = trace = history_file = None
# Periodically update the stream while the agent task is running
while not agent_task.done():
try:
encoded_screenshot = await capture_screenshot(_global_browser_context)
if encoded_screenshot is not None:
html_content = f'<img src="data:image/jpeg;base64,{encoded_screenshot}" style="width:{stream_vw}vw; height:{stream_vh}vh ; border:1px solid #ccc;">'
else:
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>"
except Exception as e:
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>"
if _global_agent and _global_agent.state.stopped:
yield [
gr.HTML(value=html_content, visible=True),
final_result,
errors,
model_actions,
model_thoughts,
recording_gif,
trace,
history_file,
gr.update(value="Stopping...", interactive=False), # stop_button
gr.update(interactive=False), # run_button
]
break
else:
yield [
gr.HTML(value=html_content, visible=True),
final_result,
errors,
model_actions,
model_thoughts,
recording_gif,
trace,
history_file,
gr.update(), # Re-enable stop button
gr.update() # Re-enable run button
]
await asyncio.sleep(0.1)
# Once the agent task completes, get the results
try:
result = await agent_task
final_result, errors, model_actions, model_thoughts, recording_gif, trace, history_file, stop_button, run_button = result
except gr.Error:
final_result = ""
model_actions = ""
model_thoughts = ""
recording_gif = trace = history_file = None
except Exception as e:
errors = f"Agent error: {str(e)}"
yield [
gr.HTML(value=html_content, visible=True),
final_result,
errors,
model_actions,
model_thoughts,
recording_gif,
trace,
history_file,
stop_button,
run_button
]
except Exception as e:
import traceback
yield [
gr.HTML(
value=f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>",
visible=True),
"",
f"Error: {str(e)}\n{traceback.format_exc()}",
"",
"",
None,
None,
None,
gr.update(value="Stop", interactive=True), # Re-enable stop button
gr.update(interactive=True) # Re-enable run button
]
# Define the theme map globally
theme_map = {
"Default": Default(),
"Soft": Soft(),
"Monochrome": Monochrome(),
"Glass": Glass(),
"Origin": Origin(),
"Citrus": Citrus(),
"Ocean": Ocean(),
"Base": Base()
}
async def close_global_browser():
global _global_browser, _global_browser_context
if _global_browser_context:
await _global_browser_context.close()
_global_browser_context = None
if _global_browser:
await _global_browser.close()
_global_browser = None
async def run_deep_search(research_task, max_search_iteration_input, max_query_per_iter_input, llm_provider,
llm_model_name, llm_num_ctx, llm_temperature, llm_base_url, llm_api_key, use_vision,
use_own_browser, headless, chrome_cdp):
from src.utils.deep_research import deep_research
global _global_agent_state
# Clear any previous stop request
_global_agent_state.clear_stop()
llm = utils.get_llm_model(
provider=llm_provider,
model_name=llm_model_name,
num_ctx=llm_num_ctx,
temperature=llm_temperature,
base_url=llm_base_url,
api_key=llm_api_key,
)
markdown_content, file_path = await deep_research(research_task, llm, _global_agent_state,
max_search_iterations=max_search_iteration_input,
max_query_num=max_query_per_iter_input,
use_vision=use_vision,
headless=headless,
use_own_browser=use_own_browser,
chrome_cdp=chrome_cdp
)
return markdown_content, file_path, gr.update(value="Stop", interactive=True), gr.update(interactive=True)
def create_ui(theme_name="Ocean"):
css = """
.gradio-container {
width: 60vw !important;
max-width: 60% !important;
margin-left: auto !important;
margin-right: auto !important;
padding-top: 20px !important;
}
.header-text {
text-align: center;
margin-bottom: 30px;
}
.theme-section {
margin-bottom: 20px;
padding: 15px;
border-radius: 10px;
}
"""
with gr.Blocks(
title="Browser Use WebUI", theme=theme_map[theme_name], css=css
) as demo:
with gr.Row():
gr.Markdown(
"""
# 🌐 Browser Use WebUI
### Control your browser with AI assistance
""",
elem_classes=["header-text"],
)
with gr.Tabs() as tabs:
with gr.TabItem("⚙️ Agent Settings", id=1):
with gr.Group():
agent_type = gr.Radio(
["org", "custom"],
label="Agent Type",
value="custom",
info="Select the type of agent to use",
interactive=True
)
with gr.Column():
max_steps = gr.Slider(
minimum=1,
maximum=200,
value=100,
step=1,
label="Max Run Steps",
info="Maximum number of steps the agent will take",
interactive=True
)
max_actions_per_step = gr.Slider(
minimum=1,
maximum=100,
value=10,
step=1,
label="Max Actions per Step",
info="Maximum number of actions the agent will take per step",
interactive=True
)
with gr.Column():
use_vision = gr.Checkbox(
label="Use Vision",
value=True,
info="Enable visual processing capabilities",
interactive=True
)
max_input_tokens = gr.Number(
label="Max Input Tokens",
value=128000,
precision=0,
interactive=True
)
tool_calling_method = gr.Dropdown(
label="Tool Calling Method",
value="auto",
interactive=True,
allow_custom_value=True, # Allow users to input custom model names
choices=["auto", "json_schema", "function_calling"],
info="Tool Calls Funtion Name",
visible=False
)
with gr.TabItem("🔧 LLM Settings", id=2):
with gr.Group():
llm_provider = gr.Dropdown(
choices=[provider for provider, model in utils.model_names.items()],
label="LLM Provider",
value="openai",
info="Select your preferred language model provider",
interactive=True
)
llm_model_name = gr.Dropdown(
label="Model Name",
choices=utils.model_names['openai'],
value="gpt-4o",
interactive=True,
allow_custom_value=True, # Allow users to input custom model names
info="Select a model in the dropdown options or directly type a custom model name"
)
ollama_num_ctx = gr.Slider(
minimum=2 ** 8,
maximum=2 ** 16,
value=16000,
step=1,
label="Ollama Context Length",
info="Controls max context length model needs to handle (less = faster)",
visible=False,
interactive=True
)
llm_temperature = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.6,
step=0.1,
label="Temperature",
info="Controls randomness in model outputs",
interactive=True
)
with gr.Row():
llm_base_url = gr.Textbox(
label="Base URL",
value="",
info="API endpoint URL (if required)"
)
llm_api_key = gr.Textbox(
label="API Key",
type="password",
value="",
info="Your API key (leave blank to use .env)"
)
# Change event to update context length slider
def update_llm_num_ctx_visibility(llm_provider):
return gr.update(visible=llm_provider == "ollama")
# Bind the change event of llm_provider to update the visibility of context length slider
llm_provider.change(
fn=update_llm_num_ctx_visibility,
inputs=llm_provider,
outputs=ollama_num_ctx
)
with gr.TabItem("🌐 Browser Settings", id=3):
with gr.Group():
with gr.Row():
use_own_browser = gr.Checkbox(
label="Use Own Browser",
value=False,
info="Use your existing browser instance",
interactive=True
)
keep_browser_open = gr.Checkbox(
label="Keep Browser Open",
value=False,
info="Keep Browser Open between Tasks",
interactive=True
)
headless = gr.Checkbox(
label="Headless Mode",
value=False,
info="Run browser without GUI",
interactive=True
)
disable_security = gr.Checkbox(
label="Disable Security",
value=True,
info="Disable browser security features",
interactive=True
)
enable_recording = gr.Checkbox(
label="Enable Recording",
value=True,
info="Enable saving browser recordings",
interactive=True
)
with gr.Row():
window_w = gr.Number(
label="Window Width",
value=1280,
info="Browser window width",
interactive=True
)
window_h = gr.Number(
label="Window Height",
value=1100,
info="Browser window height",
interactive=True
)
chrome_cdp = gr.Textbox(
label="CDP URL",
placeholder="http://localhost:9222",
value="",
info="CDP for google remote debugging",
interactive=True, # Allow editing only if recording is enabled
)
save_recording_path = gr.Textbox(
label="Recording Path",
placeholder="e.g. ./tmp/record_videos",
value="./tmp/record_videos",
info="Path to save browser recordings",
interactive=True, # Allow editing only if recording is enabled
)
save_trace_path = gr.Textbox(
label="Trace Path",
placeholder="e.g. ./tmp/traces",
value="./tmp/traces",
info="Path to save Agent traces",
interactive=True,
)
save_agent_history_path = gr.Textbox(
label="Agent History Save Path",
placeholder="e.g., ./tmp/agent_history",
value="./tmp/agent_history",
info="Specify the directory where agent history should be saved.",
interactive=True,
)
with gr.TabItem("🤖 Run Agent", id=4):
task = gr.Textbox(
label="Task Description",
lines=4,
placeholder="Enter your task here...",
value="go to google.com and type 'OpenAI' click search and give me the first url",
info="Describe what you want the agent to do",
interactive=True
)
add_infos = gr.Textbox(
label="Additional Information",
lines=3,
placeholder="Add any helpful context or instructions...",
info="Optional hints to help the LLM complete the task",
value="",
interactive=True
)
with gr.Row():
run_button = gr.Button("▶️ Run Agent", variant="primary", scale=2)
stop_button = gr.Button("⏹️ Stop", variant="stop", scale=1)
with gr.Row():
browser_view = gr.HTML(
value="<h1 style='width:80vw; height:50vh'>Waiting for browser session...</h1>",
label="Live Browser View",
visible=False
)
gr.Markdown("### Results")
with gr.Row():
with gr.Column():
final_result_output = gr.Textbox(
label="Final Result", lines=3, show_label=True
)
with gr.Column():
errors_output = gr.Textbox(
label="Errors", lines=3, show_label=True
)
with gr.Row():
with gr.Column():
model_actions_output = gr.Textbox(
label="Model Actions", lines=3, show_label=True, visible=False
)
with gr.Column():
model_thoughts_output = gr.Textbox(
label="Model Thoughts", lines=3, show_label=True, visible=False
)
recording_gif = gr.Image(label="Result GIF", format="gif")
trace_file = gr.File(label="Trace File")
agent_history_file = gr.File(label="Agent History")
with gr.TabItem("🧐 Deep Research", id=5):
research_task_input = gr.Textbox(label="Research Task", lines=5,
value="Compose a report on the use of Reinforcement Learning for training Large Language Models, encompassing its origins, current advancements, and future prospects, substantiated with examples of relevant models and techniques. The report should reflect original insights and analysis, moving beyond mere summarization of existing literature.",
interactive=True)
with gr.Row():
max_search_iteration_input = gr.Number(label="Max Search Iteration", value=3,
precision=0,
interactive=True) # precision=0 确保是整数
max_query_per_iter_input = gr.Number(label="Max Query per Iteration", value=1,
precision=0,
interactive=True) # precision=0 确保是整数
with gr.Row():
research_button = gr.Button("▶️ Run Deep Research", variant="primary", scale=2)
stop_research_button = gr.Button("⏹ Stop", variant="stop", scale=1)
markdown_output_display = gr.Markdown(label="Research Report")
markdown_download = gr.File(label="Download Research Report")
# Bind the stop button click event after errors_output is defined
stop_button.click(
fn=stop_agent,
inputs=[],
outputs=[stop_button, run_button],
)
# Run button click handler
run_button.click(
fn=run_with_stream,
inputs=[
agent_type, llm_provider, llm_model_name, ollama_num_ctx, llm_temperature, llm_base_url,
llm_api_key,
use_own_browser, keep_browser_open, headless, disable_security, window_w, window_h,
save_recording_path, save_agent_history_path, save_trace_path, # Include the new path
enable_recording, task, add_infos, max_steps, use_vision, max_actions_per_step,
tool_calling_method, chrome_cdp, max_input_tokens
],
outputs=[
browser_view, # Browser view
final_result_output, # Final result
errors_output, # Errors
model_actions_output, # Model actions
model_thoughts_output, # Model thoughts
recording_gif, # Latest recording
trace_file, # Trace file
agent_history_file, # Agent history file
stop_button, # Stop button
run_button # Run button
],
)
# Run Deep Research
research_button.click(
fn=run_deep_search,
inputs=[research_task_input, max_search_iteration_input, max_query_per_iter_input, llm_provider,
llm_model_name, ollama_num_ctx, llm_temperature, llm_base_url, llm_api_key, use_vision,
use_own_browser, headless, chrome_cdp],
outputs=[markdown_output_display, markdown_download, stop_research_button, research_button]
)
# Bind the stop button click event after errors_output is defined
stop_research_button.click(
fn=stop_research_agent,
inputs=[],
outputs=[stop_research_button, research_button],
)
with gr.TabItem("🎥 Recordings", id=7, visible=True):
def list_recordings(save_recording_path):
if not os.path.exists(save_recording_path):
return []
# Get all video files
recordings = glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4")) + glob.glob(
os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
# Sort recordings by creation time (oldest first)
recordings.sort(key=os.path.getctime)
# Add numbering to the recordings
numbered_recordings = []
for idx, recording in enumerate(recordings, start=1):
filename = os.path.basename(recording)
numbered_recordings.append((recording, f"{idx}. {filename}"))
return numbered_recordings
recordings_gallery = gr.Gallery(
label="Recordings",
columns=3,
height="auto",
object_fit="contain"
)
refresh_button = gr.Button("🔄 Refresh Recordings", variant="secondary")
refresh_button.click(
fn=list_recordings,
inputs=save_recording_path,
outputs=recordings_gallery
)
with gr.TabItem("📁 UI Configuration", id=8):
config_file_input = gr.File(
label="Load UI Settings from Config File",
file_types=[".json"],
interactive=True
)
with gr.Row():
load_config_button = gr.Button("Load Config", variant="primary")
save_config_button = gr.Button("Save UI Settings", variant="primary")
config_status = gr.Textbox(
label="Status",
lines=2,
interactive=False
)
save_config_button.click(
fn=save_current_config,
inputs=[], # 不需要输入参数
outputs=[config_status]
)
# Attach the callback to the LLM provider dropdown
llm_provider.change(
lambda provider, api_key, base_url: update_model_dropdown(provider, api_key, base_url),
inputs=[llm_provider, llm_api_key, llm_base_url],
outputs=llm_model_name
)
# Add this after defining the components
enable_recording.change(
lambda enabled: gr.update(interactive=enabled),
inputs=enable_recording,
outputs=save_recording_path
)
use_own_browser.change(fn=close_global_browser)
keep_browser_open.change(fn=close_global_browser)
scan_and_register_components(demo)
global webui_config_manager
all_components = webui_config_manager.get_all_components()
load_config_button.click(
fn=update_ui_from_config,
inputs=[config_file_input],
outputs=all_components + [config_status]
)
return demo
def main():
parser = argparse.ArgumentParser(description="Gradio UI for Browser Agent")
parser.add_argument("--ip", type=str, default="127.0.0.1", help="IP address to bind to")
parser.add_argument("--port", type=int, default=7788, help="Port to listen on")
parser.add_argument("--theme", type=str, default="Ocean", choices=theme_map.keys(), help="Theme to use for the UI")
args = parser.parse_args()
demo = create_ui(theme_name=args.theme)
demo.launch(server_name=args.ip, server_port=args.port)
if __name__ == '__main__':
main()
| 10,405
|
fd2db2e8e4e8e0c963a75973337bf92eb962b8aa11d1537fe800196257e8cc11
| 37.955833
| 395
| 0.49969
| 4.492744
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.