sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
apache/airflow:providers/microsoft/azure/tests/system/microsoft/azure/example_powerbi_workspace_list.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
from datetime import datetime
from airflow import DAG, settings
try:
from airflow.sdk import task
except ImportError:
# Airflow 2 path
from airflow.decorators import task # type: ignore[attr-defined,no-redef]
from airflow.models import Connection
from airflow.models.baseoperator import chain
from airflow.providers.microsoft.azure.operators.powerbi import PowerBIWorkspaceListOperator
DAG_ID = "example_powerbi_workspace_list"
CONN_ID = "powerbi_default"
# Before running this system test, you should set following environment variables:
CLIENT_ID = os.environ.get("CLIENT_ID", None)
CLIENT_SECRET = os.environ.get("CLIENT_SECRET", None)
TENANT_ID = os.environ.get("TENANT_ID", None)
@task
def create_connection(conn_id_name: str):
conn = Connection(
conn_id=conn_id_name,
conn_type="powerbi",
login=CLIENT_ID,
password=CLIENT_SECRET,
extra={"tenant_id": TENANT_ID},
)
if settings.Session is None:
raise RuntimeError("Session not configured. Call configure_orm() first.")
session = settings.Session()
session.add(conn)
session.commit()
with DAG(
dag_id=DAG_ID,
start_date=datetime(2021, 1, 1),
schedule=None,
tags=["example"],
) as dag:
set_up_connection = create_connection(CONN_ID)
# [START howto_operator_powerbi_workspace_list_async]
get_powerbi_workspace_list = PowerBIWorkspaceListOperator(
conn_id="powerbi_default",
task_id="get_powerbi_workspace_list",
timeout=120,
)
# [END howto_operator_powerbi_workspace_list_async]
chain(
# TEST SETUP
set_up_connection,
# TEST BODY
get_powerbi_workspace_list,
)
from tests_common.test_utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests_common.test_utils.system_tests import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| {
"repo_id": "apache/airflow",
"file_path": "providers/microsoft/azure/tests/system/microsoft/azure/example_powerbi_workspace_list.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
apache/airflow:providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_powerbi_list.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from unittest import mock
from unittest.mock import MagicMock
import pytest
from airflow.providers.common.compat.sdk import AirflowException, BaseHook, TaskDeferred
from airflow.providers.microsoft.azure.operators.powerbi import (
PowerBIDatasetListOperator,
PowerBIWorkspaceListOperator,
)
from airflow.providers.microsoft.azure.triggers.powerbi import (
PowerBIDatasetListTrigger,
PowerBIWorkspaceListTrigger,
)
from unit.microsoft.azure.test_utils import get_airflow_connection
try:
from airflow.sdk import timezone
except ImportError:
from airflow.utils import timezone # type: ignore[no-redef]
DEFAULT_CONNECTION_CLIENT_SECRET = "powerbi_conn_id"
TASK_ID = "run_powerbi_operators"
GROUP_ID = "group_id"
DATASET_LIST_ID = ["5e2d9921-e91b-491f-b7e1-e7d8db49194c"]
WORKSPACE_LIST_ID = ["5e2d9921-e91b-491f-b7e1-e7d8db49194c"]
CONFIG_DATASETS = {
"task_id": TASK_ID,
"conn_id": DEFAULT_CONNECTION_CLIENT_SECRET,
"group_id": GROUP_ID,
}
CONFIG_WORKSPACES = {
"task_id": TASK_ID,
"conn_id": DEFAULT_CONNECTION_CLIENT_SECRET,
}
SUCCESS_LIST_EVENT_DATASETS = {
"status": "success",
"message": "success",
"dataset_ids": DATASET_LIST_ID,
}
SUCCESS_LIST_EVENT_WORKSPACES = {
"status": "success",
"message": "success",
"workspace_ids": WORKSPACE_LIST_ID,
}
DEFAULT_DATE = timezone.datetime(2021, 1, 1)
class TestPowerBIDatasetListOperator:
@mock.patch.object(BaseHook, "get_connection", side_effect=get_airflow_connection)
def test_powerbi_operator_async_get_dataset_list_success(self, connection):
"""Assert that get_dataset_list log success message"""
operator = PowerBIDatasetListOperator(
**CONFIG_DATASETS,
)
context = {"ti": MagicMock()}
context["ti"].task_id = TASK_ID
with pytest.raises(TaskDeferred) as exc:
operator.execute(
context=context,
)
assert isinstance(exc.value.trigger, PowerBIDatasetListTrigger)
assert exc.value.trigger.dataset_ids is None
assert str(exc.value.trigger.group_id) == GROUP_ID
def test_powerbi_operator_async_execute_complete_success(self):
"""Assert that execute_complete log success message"""
operator = PowerBIDatasetListOperator(
**CONFIG_DATASETS,
)
context = {"ti": MagicMock()}
operator.execute_complete(
context=context,
event=SUCCESS_LIST_EVENT_DATASETS,
)
assert context["ti"].xcom_push.call_count == 1
def test_powerbi_operator_async_execute_complete_fail(self):
"""Assert that execute_complete raise exception on error"""
operator = PowerBIDatasetListOperator(
**CONFIG_DATASETS,
)
context = {"ti": MagicMock()}
with pytest.raises(AirflowException) as exc:
operator.execute_complete(
context=context,
event={
"status": "error",
"message": "error",
"dataset_ids": None,
},
)
assert context["ti"].xcom_push.call_count == 1
assert str(exc.value) == "error"
def test_powerbi_operator_dataset_list_fail(self):
"""Assert that execute_complete raise exception on dataset list fail"""
operator = PowerBIDatasetListOperator(
**CONFIG_DATASETS,
)
context = {"ti": MagicMock()}
with pytest.raises(AirflowException) as exc:
operator.execute_complete(
context=context,
event={
"status": "error",
"message": "error message",
"dataset_ids": None,
},
)
assert context["ti"].xcom_push.call_count == 1
assert str(exc.value) == "error message"
def test_execute_complete_no_event(self):
"""Test execute_complete when event is None or empty."""
operator = PowerBIDatasetListOperator(
**CONFIG_DATASETS,
)
context = {"ti": MagicMock()}
operator.execute_complete(
context=context,
event=None,
)
assert context["ti"].xcom_push.call_count == 0
class TestPowerBIWorkspaceListOperator:
@mock.patch.object(BaseHook, "get_connection", side_effect=get_airflow_connection)
def test_powerbi_operator_async_get_workspace_list_success(self, connection):
"""Assert that get_workspace_list log success message"""
operator = PowerBIWorkspaceListOperator(
**CONFIG_WORKSPACES,
)
context = {"ti": MagicMock()}
context["ti"].task_id = TASK_ID
with pytest.raises(TaskDeferred) as exc:
operator.execute(
context=context,
)
assert isinstance(exc.value.trigger, PowerBIWorkspaceListTrigger)
assert exc.value.trigger.workspace_ids is None
def test_powerbi_operator_async_execute_complete_success(self):
"""Assert that execute_complete log success message"""
operator = PowerBIWorkspaceListOperator(
**CONFIG_WORKSPACES,
)
context = {"ti": MagicMock()}
operator.execute_complete(
context=context,
event=SUCCESS_LIST_EVENT_WORKSPACES,
)
assert context["ti"].xcom_push.call_count == 1
def test_powerbi_operator_async_execute_complete_fail(self):
"""Assert that execute_complete raise exception on error"""
operator = PowerBIWorkspaceListOperator(
**CONFIG_WORKSPACES,
)
context = {"ti": MagicMock()}
with pytest.raises(AirflowException) as exc:
operator.execute_complete(
context=context,
event={
"status": "error",
"message": "error",
"workspace_ids": None,
},
)
assert context["ti"].xcom_push.call_count == 1
assert str(exc.value) == "error"
def test_powerbi_operator_workspace_list_fail(self):
"""Assert that execute_complete raise exception on workspace list fail"""
operator = PowerBIWorkspaceListOperator(
**CONFIG_WORKSPACES,
)
context = {"ti": MagicMock()}
with pytest.raises(AirflowException) as exc:
operator.execute_complete(
context=context,
event={
"status": "error",
"message": "error message",
"workspace_ids": None,
},
)
assert context["ti"].xcom_push.call_count == 1
assert str(exc.value) == "error message"
def test_execute_complete_no_event(self):
"""Test execute_complete when event is None or empty."""
operator = PowerBIWorkspaceListOperator(
**CONFIG_WORKSPACES,
)
context = {"ti": MagicMock()}
operator.execute_complete(
context=context,
event=None,
)
assert context["ti"].xcom_push.call_count == 0
| {
"repo_id": "apache/airflow",
"file_path": "providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_powerbi_list.py",
"license": "Apache License 2.0",
"lines": 202,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
apache/airflow:providers/amazon/tests/unit/amazon/aws/queues/test_sqs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import pytest
from airflow.providers.amazon.aws.triggers.sqs import SqsSensorTrigger
pytest.importorskip("airflow.providers.common.messaging.providers.base_provider")
def test_message_sqs_queue_create():
from airflow.providers.amazon.aws.queues.sqs import SqsMessageQueueProvider
from airflow.providers.common.messaging.providers.base_provider import BaseMessageQueueProvider
provider = SqsMessageQueueProvider()
assert isinstance(provider, BaseMessageQueueProvider)
def test_message_sqs_queue_matches():
from airflow.providers.amazon.aws.queues.sqs import SqsMessageQueueProvider
provider = SqsMessageQueueProvider()
assert provider.queue_matches("https://sqs.us-east-1.amazonaws.com/123456789012/my-queue")
assert not provider.queue_matches("https://sqs.us-east-1.amazonaws.com/123456789012")
assert not provider.queue_matches("https://sqs.us-east-1.amazonaws.com/123456789012/")
assert not provider.queue_matches("https://sqs.us-east-1.amazonaws.com/")
@pytest.mark.parametrize(
("scheme", "expected_result"),
[
pytest.param("sqs", True, id="sqs_scheme"),
pytest.param("kafka", False, id="kafka_scheme"),
pytest.param("redis+pubsub", False, id="redis_scheme"),
pytest.param("unknown", False, id="unknown_scheme"),
],
)
def test_message_sqs_scheme_matches(scheme, expected_result):
"""Test the scheme_matches method with various schemes."""
from airflow.providers.amazon.aws.queues.sqs import SqsMessageQueueProvider
provider = SqsMessageQueueProvider()
assert provider.scheme_matches(scheme) == expected_result
def test_message_sqs_queue_trigger_class():
from airflow.providers.amazon.aws.queues.sqs import SqsMessageQueueProvider
provider = SqsMessageQueueProvider()
assert provider.trigger_class() == SqsSensorTrigger
def test_message_sqs_queue_trigger_kwargs():
from airflow.providers.amazon.aws.queues.sqs import SqsMessageQueueProvider
provider = SqsMessageQueueProvider()
assert provider.trigger_kwargs("https://sqs.us-east-1.amazonaws.com/123456789012/my-queue") == {
"sqs_queue": "https://sqs.us-east-1.amazonaws.com/123456789012/my-queue",
}
| {
"repo_id": "apache/airflow",
"file_path": "providers/amazon/tests/unit/amazon/aws/queues/test_sqs.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
apache/airflow:providers/fab/src/airflow/providers/fab/www/extensions/init_wsgi_middlewares.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from werkzeug.middleware.proxy_fix import ProxyFix
from airflow.configuration import conf
if TYPE_CHECKING:
from flask import Flask
def init_wsgi_middleware(flask_app: Flask) -> None:
"""Handle X-Forwarded-* headers and base_url support."""
# Apply ProxyFix middleware
if conf.getboolean("fab", "ENABLE_PROXY_FIX"):
flask_app.wsgi_app = ProxyFix( # type: ignore
flask_app.wsgi_app,
x_for=conf.getint("fab", "PROXY_FIX_X_FOR", fallback=1),
x_proto=conf.getint("fab", "PROXY_FIX_X_PROTO", fallback=1),
x_host=conf.getint("fab", "PROXY_FIX_X_HOST", fallback=1),
x_port=conf.getint("fab", "PROXY_FIX_X_PORT", fallback=1),
x_prefix=conf.getint("fab", "PROXY_FIX_X_PREFIX", fallback=1),
)
| {
"repo_id": "apache/airflow",
"file_path": "providers/fab/src/airflow/providers/fab/www/extensions/init_wsgi_middlewares.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
apache/airflow:task-sdk/tests/task_sdk/execution_time/test_lazy_sequence.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from unittest.mock import Mock, call
import pytest
import airflow
from airflow.sdk.bases.xcom import BaseXCom
from airflow.sdk.exceptions import ErrorType
from airflow.sdk.execution_time.comms import (
ErrorResponse,
GetXComCount,
GetXComSequenceItem,
GetXComSequenceSlice,
XComCountResponse,
XComSequenceIndexResult,
XComSequenceSliceResult,
)
from airflow.sdk.execution_time.lazy_sequence import LazyXComSequence
from airflow.sdk.execution_time.xcom import resolve_xcom_backend
from tests_common.test_utils.config import conf_vars
@pytest.fixture
def mock_operator():
return Mock(spec=["dag_id", "task_id"], dag_id="dag", task_id="task")
@pytest.fixture
def mock_xcom_arg(mock_operator):
return Mock(spec=["operator", "key"], operator=mock_operator, key=BaseXCom.XCOM_RETURN_KEY)
@pytest.fixture
def mock_ti():
return Mock(spec=["run_id"], run_id="run")
@pytest.fixture
def lazy_sequence(mock_xcom_arg, mock_ti):
return LazyXComSequence(mock_xcom_arg, mock_ti)
class CustomXCom(BaseXCom):
@classmethod
def deserialize_value(cls, xcom):
return f"Made with CustomXCom: {xcom.value}"
def test_len(mock_supervisor_comms, lazy_sequence):
mock_supervisor_comms.send.return_value = XComCountResponse(len=3)
assert len(lazy_sequence) == 3
mock_supervisor_comms.send.assert_called_once_with(
msg=GetXComCount(key=BaseXCom.XCOM_RETURN_KEY, dag_id="dag", task_id="task", run_id="run"),
)
def test_iter(mock_supervisor_comms, lazy_sequence):
it = iter(lazy_sequence)
mock_supervisor_comms.send.side_effect = [
XComSequenceIndexResult(root="f"),
ErrorResponse(error=ErrorType.XCOM_NOT_FOUND, detail={"oops": "sorry!"}),
]
assert list(it) == ["f"]
mock_supervisor_comms.send.assert_has_calls(
[
call(
msg=GetXComSequenceItem(
key=BaseXCom.XCOM_RETURN_KEY,
dag_id="dag",
task_id="task",
run_id="run",
offset=0,
),
),
call(
msg=GetXComSequenceItem(
key=BaseXCom.XCOM_RETURN_KEY,
dag_id="dag",
task_id="task",
run_id="run",
offset=1,
),
),
]
)
def test_getitem_index(mock_supervisor_comms, lazy_sequence):
mock_supervisor_comms.send.return_value = XComSequenceIndexResult(root="f")
assert lazy_sequence[4] == "f"
mock_supervisor_comms.send.assert_called_once_with(
GetXComSequenceItem(
key=BaseXCom.XCOM_RETURN_KEY,
dag_id="dag",
task_id="task",
run_id="run",
offset=4,
),
)
@conf_vars({("core", "xcom_backend"): "task_sdk.execution_time.test_lazy_sequence.CustomXCom"})
def test_getitem_calls_correct_deserialise(monkeypatch, mock_supervisor_comms, lazy_sequence):
mock_supervisor_comms.send.return_value = XComSequenceIndexResult(root="some-value")
xcom = resolve_xcom_backend()
assert xcom.__name__ == "CustomXCom"
monkeypatch.setattr(airflow.sdk.execution_time.xcom, "XCom", xcom)
assert lazy_sequence[4] == "Made with CustomXCom: some-value"
mock_supervisor_comms.send.assert_called_once_with(
GetXComSequenceItem(
key=BaseXCom.XCOM_RETURN_KEY,
dag_id="dag",
task_id="task",
run_id="run",
offset=4,
),
)
def test_getitem_indexerror(mock_supervisor_comms, lazy_sequence):
mock_supervisor_comms.send.return_value = ErrorResponse(
error=ErrorType.XCOM_NOT_FOUND,
detail={"oops": "sorry!"},
)
with pytest.raises(IndexError) as ctx:
lazy_sequence[4]
assert ctx.value.args == (4,)
mock_supervisor_comms.send.assert_called_once_with(
GetXComSequenceItem(
key=BaseXCom.XCOM_RETURN_KEY,
dag_id="dag",
task_id="task",
run_id="run",
offset=4,
),
)
def test_getitem_slice(mock_supervisor_comms, lazy_sequence):
mock_supervisor_comms.send.return_value = XComSequenceSliceResult(root=[6, 4, 1])
assert lazy_sequence[:5] == [6, 4, 1]
mock_supervisor_comms.send.assert_called_once_with(
GetXComSequenceSlice(
key=BaseXCom.XCOM_RETURN_KEY,
dag_id="dag",
task_id="task",
run_id="run",
start=None,
stop=5,
step=None,
),
)
| {
"repo_id": "apache/airflow",
"file_path": "task-sdk/tests/task_sdk/execution_time/test_lazy_sequence.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
apache/airflow:providers/exasol/tests/system/exasol/example_exasol.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example DAG for the use of the SQLExecuteQueryOperator with Exasol.
"""
from __future__ import annotations
import datetime
from airflow import DAG
from airflow.providers.common.sql.operators.sql import SQLExecuteQueryOperator
from airflow.providers.exasol.hooks.exasol import exasol_fetch_all_handler
DAG_ID = "example_exasol"
with DAG(
dag_id=DAG_ID,
start_date=datetime.datetime(2025, 1, 1),
default_args={"conn_id": "my_exasol_conn", "handler": exasol_fetch_all_handler},
schedule="@once",
catchup=False,
) as dag:
# [START howto_operator_exasol]
create_table_exasol = SQLExecuteQueryOperator(
task_id="create_table_exasol",
sql="""
CREATE OR REPLACE TABLE exasol_example (
a VARCHAR(100),
b DECIMAL(18,0)
);
""",
)
alter_table_exasol = SQLExecuteQueryOperator(
task_id="alter_table_exasol",
sql="ALTER TABLE exasol_example ADD COLUMN c DECIMAL(18,0);",
)
insert_data_exasol = SQLExecuteQueryOperator(
task_id="insert_data_exasol",
sql="""
INSERT INTO exasol_example (a, b, c)
VALUES
('a', 1, 1),
('a', 2, 1),
('b', 3, 1);
""",
)
select_data_exasol = SQLExecuteQueryOperator(
task_id="select_data_exasol",
sql="SELECT * FROM exasol_example;",
)
drop_table_exasol = SQLExecuteQueryOperator(
task_id="drop_table_exasol",
sql="DROP TABLE exasol_example;",
)
# [END howto_operator_exasol]
(
create_table_exasol
>> alter_table_exasol
>> insert_data_exasol
>> select_data_exasol
>> drop_table_exasol
)
from tests_common.test_utils.watcher import watcher
list(dag.tasks) >> watcher()
from tests_common.test_utils.system_tests import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| {
"repo_id": "apache/airflow",
"file_path": "providers/exasol/tests/system/exasol/example_exasol.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
apache/airflow:helm-tests/tests/helm_tests/apiserver/test_ingress_apiserver.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import jmespath
import pytest
from chart_utils.helm_template_generator import render_chart
class TestIngressAPIServer:
"""Tests ingress API Server."""
def test_should_pass_validation_with_just_ingress_enabled(self):
render_chart(
values={"ingress": {"apiServer": {"enabled": True}}, "airflowVersion": "3.0.0"},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
def test_should_allow_more_than_one_annotation(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {"apiServer": {"enabled": True, "annotations": {"aa": "bb", "cc": "dd"}}},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert jmespath.search("metadata.annotations", docs[0]) == {"aa": "bb", "cc": "dd"}
def test_should_set_ingress_class_name(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {"apiServer": {"enabled": True, "ingressClassName": "foo"}},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert jmespath.search("spec.ingressClassName", docs[0]) == "foo"
def test_should_ingress_hosts_objs_have_priority_over_host(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {
"apiServer": {
"enabled": True,
"tls": {"enabled": True, "secretName": "oldsecret"},
"hosts": [
{"name": "*.a-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "b-host", "tls": {"enabled": True, "secretName": "newsecret2"}},
{"name": "c-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "d-host", "tls": {"enabled": False, "secretName": ""}},
{"name": "e-host"},
],
"host": "old-host",
},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert jmespath.search("spec.rules[*].host", docs[0]) == [
"*.a-host",
"b-host",
"c-host",
"d-host",
"e-host",
]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.a-host"], "secretName": "newsecret1"},
{"hosts": ["b-host"], "secretName": "newsecret2"},
{"hosts": ["c-host"], "secretName": "newsecret1"},
]
def test_should_ingress_hosts_strs_have_priority_over_host(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {
"apiServer": {
"enabled": True,
"tls": {"enabled": True, "secretName": "secret"},
"hosts": ["*.a-host", "b-host", "c-host", "d-host"],
"host": "old-host",
},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert jmespath.search("spec.rules[*].host", docs[0]) == ["*.a-host", "b-host", "c-host", "d-host"]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.a-host", "b-host", "c-host", "d-host"], "secretName": "secret"}
]
def test_should_ingress_deprecated_host_and_top_level_tls_still_work(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {
"apiServer": {
"enabled": True,
"tls": {"enabled": True, "secretName": "supersecret"},
"host": "old-host",
},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert (
["old-host"]
== jmespath.search("spec.rules[*].host", docs[0])
== jmespath.search("spec.tls[0].hosts", docs[0])
)
def test_should_ingress_host_entry_not_exist(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {
"apiServer": {
"enabled": True,
}
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert not jmespath.search("spec.rules[*].host", docs[0])
@pytest.mark.parametrize(
("global_value", "api_server_value", "expected"),
[
(None, None, False),
(None, False, False),
(None, True, True),
(False, None, False),
(True, None, True),
(False, True, True), # We will deploy it if _either_ are true
(True, False, True),
],
)
def test_ingress_created(self, global_value, api_server_value, expected):
values = {"airflowVersion": "3.0.0", "ingress": {}}
if global_value is not None:
values["ingress"]["enabled"] = global_value
if api_server_value is not None:
values["ingress"]["apiServer"] = {"enabled": api_server_value}
if values["ingress"] == {}:
del values["ingress"]
docs = render_chart(values=values, show_only=["templates/api-server/api-server-ingress.yaml"])
assert expected == (len(docs) == 1)
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"ingress": {"enabled": True},
"apiServer": {
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_can_ingress_hosts_be_templated(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"testValues": {
"scalar": "aa",
"list": ["bb", "cc"],
"dict": {
"key": "dd",
},
},
"ingress": {
"apiServer": {
"enabled": True,
"hosts": [
{
"name": "*.{{ .Release.Namespace }}.example.com",
"tls": {"enabled": True, "secretName": "secret1"},
},
{
"name": "{{ .Values.testValues.scalar }}.example.com",
"tls": {"enabled": True, "secretName": "secret2"},
},
{"name": "{{ index .Values.testValues.list 1 }}.example.com"},
{"name": "{{ .Values.testValues.dict.key }}.example.com"},
],
},
},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
namespace="airflow",
)
assert jmespath.search("spec.rules[*].host", docs[0]) == [
"*.airflow.example.com",
"aa.example.com",
"cc.example.com",
"dd.example.com",
]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.airflow.example.com"], "secretName": "secret1"},
{"hosts": ["aa.example.com"], "secretName": "secret2"},
]
def test_backend_service_name(self):
docs = render_chart(
values={"airflowVersion": "3.0.0", "ingress": {"apiServer": {"enabled": True}}},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert (
jmespath.search("spec.rules[0].http.paths[0].backend.service.name", docs[0])
== "release-name-api-server"
)
def test_backend_service_name_with_fullname_override(self):
docs = render_chart(
values={
"airflowVersion": "3.0.0",
"fullnameOverride": "test-basic",
"useStandardNaming": True,
"ingress": {"apiServer": {"enabled": True}},
},
show_only=["templates/api-server/api-server-ingress.yaml"],
)
assert (
jmespath.search("spec.rules[0].http.paths[0].backend.service.name", docs[0])
== "test-basic-api-server"
)
| {
"repo_id": "apache/airflow",
"file_path": "helm-tests/tests/helm_tests/apiserver/test_ingress_apiserver.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
assafelovic/gpt-researcher:backend/server/multi_agent_runner.py | import os
import sys
from typing import Any, Awaitable, Callable
RunResearchTask = Callable[..., Awaitable[Any]]
def _ensure_repo_root_on_path() -> None:
"""Ensure top-level repo root is importable for multi-agent modules."""
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if repo_root not in sys.path:
sys.path.insert(0, repo_root)
def _resolve_run_research_task() -> RunResearchTask:
_ensure_repo_root_on_path()
try:
from multi_agents.main import run_research_task
return run_research_task
except Exception:
try:
from multi_agents_ag2.main import run_research_task
return run_research_task
except Exception as ag2_error:
raise ImportError(
"Could not import run_research_task from multi_agents or multi_agents_ag2"
) from ag2_error
async def run_multi_agent_task(*args, **kwargs) -> Any:
run_research_task = _resolve_run_research_task()
return await run_research_task(*args, **kwargs)
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "backend/server/multi_agent_runner.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:multi_agents_ag2/agents/editor.py | from datetime import datetime
from typing import Dict, Optional, List
from multi_agents.agents.utils.views import print_agent_output
from multi_agents.agents.utils.llms import call_model
class EditorAgent:
"""Agent responsible for planning the research outline."""
def __init__(self, websocket=None, stream_output=None, tone=None, headers=None):
self.websocket = websocket
self.stream_output = stream_output
self.tone = tone
self.headers = headers or {}
async def plan_research(self, research_state: Dict[str, any]) -> Dict[str, any]:
initial_research = research_state.get("initial_research")
task = research_state.get("task")
include_human_feedback = task.get("include_human_feedback")
human_feedback = research_state.get("human_feedback")
max_sections = task.get("max_sections")
prompt = self._create_planning_prompt(
initial_research, include_human_feedback, human_feedback, max_sections
)
print_agent_output(
"Planning an outline layout based on initial research...", agent="EDITOR"
)
plan = await call_model(
prompt=prompt,
model=task.get("model"),
response_format="json",
)
return {
"title": plan.get("title"),
"date": plan.get("date"),
"sections": plan.get("sections"),
}
def _create_planning_prompt(
self,
initial_research: str,
include_human_feedback: bool,
human_feedback: Optional[str],
max_sections: int,
) -> List[Dict[str, str]]:
return [
{
"role": "system",
"content": "You are a research editor. Your goal is to oversee the research project "
"from inception to completion. Your main task is to plan the article section "
"layout based on an initial research summary.\n ",
},
{
"role": "user",
"content": self._format_planning_instructions(
initial_research, include_human_feedback, human_feedback, max_sections
),
},
]
def _format_planning_instructions(
self,
initial_research: str,
include_human_feedback: bool,
human_feedback: Optional[str],
max_sections: int,
) -> str:
today = datetime.now().strftime("%d/%m/%Y")
feedback_instruction = (
f"Human feedback: {human_feedback}. You must plan the sections based on the human feedback."
if include_human_feedback and human_feedback and human_feedback != "no"
else ""
)
return f"""Today's date is {today}
Research summary report: '{initial_research}'
{feedback_instruction}
\nYour task is to generate an outline of sections headers for the research project
based on the research summary report above.
You must generate a maximum of {max_sections} section headers.
You must focus ONLY on related research topics for subheaders and do NOT include introduction, conclusion and references.
You must return nothing but a JSON with the fields 'title' (str) and
'sections' (maximum {max_sections} section headers) with the following structure:
'{{title: string research title, date: today's date,
sections: ['section header 1', 'section header 2', 'section header 3' ...]}}'."""
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "multi_agents_ag2/agents/editor.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:multi_agents_ag2/agents/orchestrator.py | import asyncio
import datetime
import os
import time
from typing import Any, Dict, List, Optional
from autogen import ConversableAgent, GroupChat, GroupChatManager, UserProxyAgent
from multi_agents.agents.utils.views import print_agent_output
from multi_agents.agents.utils.utils import sanitize_filename
from .editor import EditorAgent
from multi_agents.agents.human import HumanAgent
from multi_agents.agents.publisher import PublisherAgent
from multi_agents.agents.researcher import ResearchAgent
from multi_agents.agents.reviewer import ReviewerAgent
from multi_agents.agents.reviser import ReviserAgent
from multi_agents.agents.writer import WriterAgent
class ChiefEditorAgent:
"""AG2-orchestrated agent responsible for managing and coordinating tasks."""
def __init__(self, task: dict, websocket=None, stream_output=None, tone=None, headers=None):
self.task = task
self.websocket = websocket
self.stream_output = stream_output
self.headers = headers or {}
self.tone = tone
self.task_id = self._generate_task_id()
self.output_dir = self._create_output_directory()
self.ag2_agents, self.manager = self._initialize_ag2_team()
def _generate_task_id(self) -> int:
return int(time.time())
def _create_output_directory(self) -> str:
output_dir = "./outputs/" + sanitize_filename(
f"run_{self.task_id}_{self.task.get('query')[0:40]}"
)
os.makedirs(output_dir, exist_ok=True)
return output_dir
def _llm_config(self) -> Dict[str, Any]:
model_name = self.task.get("model") or "gpt-4o"
return {
"config_list": [
{
"model": model_name,
"api_type": "openai",
}
],
"temperature": 0,
}
def _initialize_ag2_team(self):
llm_config = self._llm_config()
agents = {
"chief_editor": ConversableAgent(
name="ChiefEditor",
system_message="You coordinate the multi-agent research workflow.",
llm_config=llm_config,
),
"editor": ConversableAgent(
name="Editor",
system_message="You plan the research outline from initial findings.",
llm_config=llm_config,
),
"researcher": ConversableAgent(
name="Researcher",
system_message="You perform initial and deep research tasks.",
llm_config=llm_config,
),
"reviewer": ConversableAgent(
name="Reviewer",
system_message="You review drafts against the given guidelines.",
llm_config=llm_config,
),
"reviser": ConversableAgent(
name="Reviser",
system_message="You revise drafts based on reviewer feedback.",
llm_config=llm_config,
),
"writer": ConversableAgent(
name="Writer",
system_message="You compile the final report from research drafts.",
llm_config=llm_config,
),
"publisher": ConversableAgent(
name="Publisher",
system_message="You publish the final report in the requested formats.",
llm_config=llm_config,
),
"human": UserProxyAgent(
name="Human",
system_message="You provide optional feedback on the research plan.",
human_input_mode="ALWAYS"
if self.task.get("include_human_feedback")
else "NEVER",
code_execution_config=False,
),
}
group_chat = GroupChat(
agents=list(agents.values()),
messages=[],
max_round=1,
speaker_selection_method="manual",
)
manager = GroupChatManager(groupchat=group_chat, llm_config=llm_config)
return agents, manager
def _chat(self, agent_key: str, message: str) -> None:
agent = self.ag2_agents.get(agent_key)
if not agent:
return
agent.send(message, self.manager, request_reply=False)
async def _log(self, agent_key: str, message: str, stream_tag: str = "logs") -> None:
self._chat(agent_key, message)
if self.websocket and self.stream_output:
await self.stream_output(stream_tag, agent_key, message, self.websocket)
else:
print_agent_output(message, agent="MASTER")
def _initialize_agents(self) -> Dict[str, Any]:
return {
"writer": WriterAgent(self.websocket, self.stream_output, self.headers),
"editor": EditorAgent(self.websocket, self.stream_output, self.tone, self.headers),
"research": ResearchAgent(self.websocket, self.stream_output, self.tone, self.headers),
"publisher": PublisherAgent(self.output_dir, self.websocket, self.stream_output, self.headers),
"reviewer": ReviewerAgent(self.websocket, self.stream_output, self.headers),
"reviser": ReviserAgent(self.websocket, self.stream_output, self.headers),
"human": HumanAgent(self.websocket, self.stream_output, self.headers),
}
async def _run_section(self, agents: Dict[str, Any], topic: str, title: str) -> Any:
await self._log("researcher", f"Running in depth research on topic: {topic}")
draft_result = await agents["research"].run_depth_research(
{"task": self.task, "topic": topic, "title": title}
)
draft_state: Dict[str, Any] = {
"task": self.task,
"draft": draft_result.get("draft"),
"revision_notes": None,
}
max_revisions = int(self.task.get("max_revisions", 3))
for _ in range(max_revisions):
await self._log("reviewer", "Reviewing draft...")
review_result = await agents["reviewer"].run(draft_state)
review_notes = review_result.get("review")
if review_notes is None:
break
await self._log("reviser", "Revising draft based on feedback...")
revision = await agents["reviser"].run(
{**draft_state, "review": review_notes}
)
draft_state.update(revision)
return draft_state.get("draft")
async def _run_parallel_research(
self, agents: Dict[str, Any], sections: List[str], title: str
) -> List[Any]:
tasks = [self._run_section(agents, topic, title) for topic in sections]
return await asyncio.gather(*tasks)
async def run_research_task(self, task_id: Optional[str] = None) -> Dict[str, Any]:
agents = self._initialize_agents()
await self._log(
"chief_editor",
f"Starting the research process for query '{self.task.get('query')}'...",
)
initial_state = await agents["research"].run_initial_research({"task": self.task})
await self._log("researcher", "Initial research complete.")
plan_state = {**initial_state}
plan = await agents["editor"].plan_research(plan_state)
await self._log("editor", f"Planned sections: {plan.get('sections')}")
human_feedback = await agents["human"].review_plan({**plan_state, **plan})
if human_feedback.get("human_feedback"):
await self._log("human", f"Human feedback: {human_feedback.get('human_feedback')}")
plan_state = {**plan_state, **plan, **human_feedback}
plan = await agents["editor"].plan_research(plan_state)
await self._log("editor", f"Revised sections: {plan.get('sections')}")
sections = plan.get("sections") or []
title = plan.get("title")
date = plan.get("date") or datetime.datetime.utcnow().strftime("%d/%m/%Y")
research_data = await self._run_parallel_research(agents, sections, title)
research_state = {
"task": self.task,
"title": title,
"date": date,
"sections": sections,
"research_data": research_data,
}
await self._log("writer", "Writing final report...")
writing = await agents["writer"].run(research_state)
full_state = {**research_state, **writing}
await self._log("publisher", "Publishing final report...")
published = await agents["publisher"].run(full_state)
await self._log("chief_editor", "Research task completed.")
return {**full_state, **published}
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "multi_agents_ag2/agents/orchestrator.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:multi_agents_ag2/main.py | import asyncio
from dotenv import load_dotenv
import os
import sys
import uuid
import json
from multi_agents_ag2.agents import ChiefEditorAgent
from gpt_researcher.utils.enum import Tone
load_dotenv()
def open_task() -> dict:
current_dir = os.path.dirname(os.path.abspath(__file__))
task_json_path = os.path.join(current_dir, "task.json")
with open(task_json_path, "r") as f:
task = json.load(f)
if not task:
raise Exception(
"No task found. Please ensure a valid task.json file is present in the multi_agents_ag2 directory."
)
strategic_llm = os.environ.get("STRATEGIC_LLM")
if strategic_llm and ":" in strategic_llm:
model_name = strategic_llm.split(":", 1)[1]
task["model"] = model_name
elif strategic_llm:
task["model"] = strategic_llm
return task
async def run_research_task(query, websocket=None, stream_output=None, tone=Tone.Objective, headers=None):
task = open_task()
task["query"] = query
chief_editor = ChiefEditorAgent(task, websocket, stream_output, tone, headers)
research_report = await chief_editor.run_research_task()
if websocket and stream_output:
await stream_output("logs", "research_report", research_report, websocket)
return research_report
async def main():
task = open_task()
chief_editor = ChiefEditorAgent(task)
research_report = await chief_editor.run_research_task(task_id=uuid.uuid4())
return research_report
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "multi_agents_ag2/main.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:backend/server/report_store.py | import asyncio
import json
from pathlib import Path
from typing import Any, Dict, List
class ReportStore:
def __init__(self, path: Path):
self._path = path
self._lock = asyncio.Lock()
async def _ensure_parent_dir(self) -> None:
self._path.parent.mkdir(parents=True, exist_ok=True)
async def _read_all_unlocked(self) -> Dict[str, Dict[str, Any]]:
if not self._path.exists():
return {}
try:
data = json.loads(self._path.read_text(encoding="utf-8"))
if isinstance(data, dict):
return data # type: ignore[return-value]
except Exception:
return {}
return {}
async def _write_all_unlocked(self, data: Dict[str, Dict[str, Any]]) -> None:
await self._ensure_parent_dir()
tmp_path = self._path.with_suffix(self._path.suffix + ".tmp")
tmp_path.write_text(json.dumps(data, ensure_ascii=False), encoding="utf-8")
tmp_path.replace(self._path)
async def list_reports(self, report_ids: List[str] | None = None) -> List[Dict[str, Any]]:
async with self._lock:
data = await self._read_all_unlocked()
if report_ids is None:
return list(data.values())
return [data[report_id] for report_id in report_ids if report_id in data]
async def get_report(self, report_id: str) -> Dict[str, Any] | None:
async with self._lock:
data = await self._read_all_unlocked()
return data.get(report_id)
async def upsert_report(self, report_id: str, report: Dict[str, Any]) -> None:
async with self._lock:
data = await self._read_all_unlocked()
data[report_id] = report
await self._write_all_unlocked(data)
async def delete_report(self, report_id: str) -> bool:
async with self._lock:
data = await self._read_all_unlocked()
existed = report_id in data
if existed:
del data[report_id]
await self._write_all_unlocked(data)
return existed
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "backend/server/report_store.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:gpt_researcher/llm_provider/image/image_generator.py | """Image generation provider for GPT Researcher.
This module provides image generation capabilities using Google's Gemini/Imagen
models via the google.genai SDK.
Supported models:
- Gemini image models (free tier): models/gemini-2.5-flash-image
- Imagen models (requires billing): imagen-4.0-generate-001
"""
import asyncio
import base64
import hashlib
import os
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
class ImageGeneratorProvider:
"""Provider for generating images using Google's Gemini/Imagen models.
Attributes:
model_name: The model to use for image generation.
api_key: Google API key for authentication.
output_dir: Directory to save generated images.
"""
# Gemini models use generate_content with inline_data response
GEMINI_IMAGE_MODELS = [
"models/gemini-2.5-flash-image",
"gemini-2.5-flash-image",
"gemini-2.0-flash-exp-image-generation",
"gemini-3-pro-image-preview",
]
# Imagen models use generate_images (requires billing)
IMAGEN_MODELS = [
"imagen-4.0-generate-001",
"imagen-4.0-fast-generate-001",
"imagen-4.0-ultra-generate-001",
]
DEFAULT_MODEL = "models/gemini-2.5-flash-image"
def __init__(
self,
model_name: Optional[str] = None,
api_key: Optional[str] = None,
output_dir: str = "outputs",
):
"""Initialize the ImageGeneratorProvider.
Args:
model_name: The model to use. Defaults to models/gemini-2.5-flash-image.
api_key: Google API key. If not provided, reads from GOOGLE_API_KEY env var.
output_dir: Base directory for outputs (images will be in output_dir/images/).
"""
self.model_name = model_name or self.DEFAULT_MODEL
self.api_key = api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
self.output_dir = Path(output_dir)
self._client = None
# Determine model type
self._is_imagen = any(m in self.model_name.lower() for m in ['imagen'])
if not self.api_key:
logger.warning(
"No Google API key found. Set GOOGLE_API_KEY or GEMINI_API_KEY "
"environment variable to enable image generation."
)
def _ensure_client(self):
"""Ensure the Google GenAI client is initialized."""
if self._client is None:
try:
from google import genai
self._client = genai.Client(api_key=self.api_key)
logger.info(f"Initialized image generation with model: {self.model_name}")
except ImportError:
raise ImportError(
"google-genai package is required for image generation. "
"Install with: pip install google-genai"
)
except Exception as e:
logger.error(f"Failed to initialize image generation client: {e}")
raise
def _ensure_output_dir(self, research_id: str = "") -> Path:
"""Ensure the output directory exists and return the path."""
# Use same structure as PDF/DOCX: outputs/images/{research_id}/
if research_id:
output_path = self.output_dir / "images" / research_id
else:
output_path = self.output_dir / "images"
output_path.mkdir(parents=True, exist_ok=True)
return output_path
def _generate_image_filename(self, prompt: str, index: int = 0) -> str:
"""Generate a unique filename for the image based on prompt hash."""
prompt_hash = hashlib.md5(prompt.encode()).hexdigest()[:8]
return f"img_{prompt_hash}_{index}.png"
def _crop_to_landscape(self, image_bytes: bytes, target_ratio: float = 16/9) -> bytes:
"""Crop a square image to landscape format (16:9 by default).
This ensures images fit well in article/report layouts.
Args:
image_bytes: Raw image bytes.
target_ratio: Target width/height ratio (default 16:9 ≈ 1.78).
Returns:
Cropped image bytes in PNG format.
"""
try:
from PIL import Image
import io
# Open the image
img = Image.open(io.BytesIO(image_bytes))
width, height = img.size
# If already landscape or wider, return as-is
if width / height >= target_ratio:
return image_bytes
# Calculate new dimensions for landscape crop
# Keep full width, reduce height
new_height = int(width / target_ratio)
# Center crop vertically
top = (height - new_height) // 2
bottom = top + new_height
# Crop the image
cropped = img.crop((0, top, width, bottom))
# Save to bytes
output = io.BytesIO()
cropped.save(output, format='PNG', optimize=True)
output.seek(0)
logger.info(f"Cropped image from {width}x{height} to {width}x{new_height} (landscape)")
return output.getvalue()
except ImportError:
logger.warning("PIL not available for image cropping, returning original")
return image_bytes
except Exception as e:
logger.warning(f"Failed to crop image to landscape: {e}")
return image_bytes
def _build_enhanced_prompt(self, prompt: str, context: str = "", style: str = "dark") -> str:
"""Build an enhanced prompt with explicit styling instructions.
Args:
prompt: Base image prompt.
context: Additional context from research.
style: Image style - "dark" (matches app theme), "light", or "auto".
Returns:
Enhanced prompt string with styling instructions.
"""
# Style-specific color palettes
if style == "dark":
# Dark mode matching the GPT Researcher app theme
style_instructions = """
STYLE REQUIREMENTS - DARK MODE THEME:
- Dark background (#0d1117 or similar deep charcoal/navy)
- Primary accent color: Teal/Cyan (#14b8a6, #0d9488)
- Secondary colors: Slate grays (#374151, #4b5563), subtle purple accents
- Glowing, neon-like effects for highlights and important elements
- Modern, tech-forward, futuristic aesthetic
- Clean lines with subtle gradients
- High contrast elements that pop against dark background
- Sleek, minimalist design with visual depth
- Icons and diagrams with luminous teal outlines
- Professional infographic style suitable for tech/research context"""
elif style == "light":
style_instructions = """
STYLE REQUIREMENTS - LIGHT MODE:
- Clean white or light gray background
- Primary colors: Deep blue (#1e40af), teal (#0d9488)
- Professional, corporate aesthetic
- Subtle shadows for depth
- High readability with dark text elements
- Modern flat design with occasional gradients"""
else:
style_instructions = """
STYLE REQUIREMENTS - PROFESSIONAL:
- Sophisticated color palette (teals, blues, grays)
- Clean, modern design
- High contrast for readability
- Professional infographic style"""
styled_prompt = f"""Create a professional, high-quality illustration for a research report.
SUBJECT: {prompt}
{style_instructions}
TECHNICAL REQUIREMENTS:
- No text, labels, or watermarks in the image
- Clear visual hierarchy
- Well-balanced composition
- Suitable for both digital viewing and printing
- Vector-style or clean photorealistic rendering
- Resolution and detail appropriate for report embedding
AVOID:
- Cartoonish or childish styles
- Cluttered or busy designs
- Bright white backgrounds (for dark mode)
- Low quality or pixelated elements
- Generic stock photo aesthetics"""
if context:
styled_prompt += f"\n\nRESEARCH CONTEXT: {context[:300]}"
return styled_prompt
async def generate_image(
self,
prompt: str,
context: str = "",
research_id: str = "",
aspect_ratio: str = "1:1",
num_images: int = 1,
style: str = "dark",
) -> List[Dict[str, Any]]:
"""Generate images based on a prompt and optional context.
Args:
prompt: The image generation prompt.
context: Additional context to improve image relevance.
research_id: Research ID for organizing output.
aspect_ratio: Aspect ratio for the image (Imagen only).
num_images: Number of images to generate.
style: Image style - "dark", "light", or "auto".
Returns:
List of dictionaries containing image info with absolute paths.
"""
if not self.api_key:
logger.warning("No API key configured for image generation")
return []
self._ensure_client()
output_path = self._ensure_output_dir(research_id)
# Build enhanced prompt with styling
logger.info(f"Building image prompt with style: {style}")
full_prompt = self._build_enhanced_prompt(prompt, context, style)
logger.debug(f"Full prompt (first 500 chars): {full_prompt[:500]}")
try:
if self._is_imagen:
return await self._generate_with_imagen(full_prompt, output_path, num_images, aspect_ratio, research_id)
else:
return await self._generate_with_gemini(full_prompt, output_path, num_images, research_id, prompt)
except Exception as e:
logger.error(f"Image generation failed: {e}", exc_info=True)
return []
async def _generate_with_gemini(
self,
full_prompt: str,
output_path: Path,
num_images: int,
research_id: str,
original_prompt: str,
) -> List[Dict[str, Any]]:
"""Generate images using Gemini models via generate_content."""
generated_images = []
for i in range(num_images):
try:
# Gemini image models use generate_content
response = await asyncio.to_thread(
self._client.models.generate_content,
model=self.model_name,
contents=full_prompt,
)
# Debug: Log response structure
if response.candidates:
candidate = response.candidates[0]
if candidate.content and candidate.content.parts:
logger.debug(f"Response has {len(candidate.content.parts)} parts")
for idx, part in enumerate(candidate.content.parts):
has_inline = hasattr(part, 'inline_data') and part.inline_data
has_text = hasattr(part, 'text') and part.text
logger.debug(f"Part {idx}: inline_data={has_inline}, text={has_text}")
# Extract image from response parts
if response.candidates and response.candidates[0].content.parts:
for part in response.candidates[0].content.parts:
if hasattr(part, 'inline_data') and part.inline_data:
# Found image data
image_data = part.inline_data.data
mime_type = getattr(part.inline_data, 'mime_type', 'image/png')
# Determine file extension
ext = 'png' if 'png' in mime_type else 'jpg'
filename = self._generate_image_filename(original_prompt, i)
filepath = output_path / filename
# Write image data (may be base64 encoded)
if isinstance(image_data, str):
image_bytes = base64.b64decode(image_data)
else:
image_bytes = image_data
# Note: Keeping original square format from Gemini
# To enable landscape cropping, uncomment:
# image_bytes = self._crop_to_landscape(image_bytes)
with open(filepath, 'wb') as f:
f.write(image_bytes)
# Use both absolute path (for PDF) and web URL (for frontend)
absolute_path = filepath.resolve()
web_url = f"/outputs/images/{research_id}/{filename}" if research_id else f"/outputs/images/{filename}"
generated_images.append({
"path": str(absolute_path), # Absolute path for PDF generation
"url": web_url, # Web URL for frontend display
"absolute_url": str(absolute_path), # For PDF compatibility
"prompt": original_prompt,
"alt_text": self._generate_alt_text(original_prompt),
})
logger.info(f"Generated image saved to: {filepath}")
break # Only take first image per iteration
else:
# No inline_data found - check if there's text (model refused)
for part in response.candidates[0].content.parts:
if hasattr(part, 'text') and part.text:
logger.warning(f"Model returned text instead of image: {part.text[:200]}")
break
except Exception as e:
logger.error(f"Error generating image {i}: {e}", exc_info=True)
continue
return generated_images
async def _generate_with_imagen(
self,
full_prompt: str,
output_path: Path,
num_images: int,
aspect_ratio: str,
research_id: str,
) -> List[Dict[str, Any]]:
"""Generate images using Imagen models via generate_images."""
from google.genai import types
generated_images = []
try:
response = await asyncio.to_thread(
self._client.models.generate_images,
model=self.model_name,
prompt=full_prompt,
config=types.GenerateImagesConfig(
number_of_images=num_images,
aspect_ratio=aspect_ratio,
),
)
if response and response.generated_images:
for i, gen_image in enumerate(response.generated_images):
filename = self._generate_image_filename(full_prompt, i)
filepath = output_path / filename
# Extract image bytes
if hasattr(gen_image, 'image') and hasattr(gen_image.image, 'image_bytes'):
image_bytes = gen_image.image.image_bytes
elif hasattr(gen_image, 'image_bytes'):
image_bytes = gen_image.image_bytes
else:
logger.warning("Could not extract image bytes")
continue
with open(filepath, 'wb') as f:
f.write(image_bytes)
# Use both absolute path (for PDF) and web URL (for frontend)
absolute_path = filepath.resolve()
web_url = f"/outputs/images/{research_id}/{filename}" if research_id else f"/outputs/images/{filename}"
generated_images.append({
"path": str(absolute_path),
"url": web_url,
"absolute_url": str(absolute_path),
"prompt": full_prompt,
"alt_text": self._generate_alt_text(full_prompt),
})
logger.info(f"Generated image saved to: {filepath}")
except Exception as e:
logger.error(f"Imagen generation failed: {e}", exc_info=True)
return generated_images
def _generate_alt_text(self, prompt: str) -> str:
"""Generate accessible alt text from the prompt."""
# Clean and truncate for alt text
clean_prompt = prompt.replace('\n', ' ').strip()
# Extract just the core subject
if len(clean_prompt) > 120:
clean_prompt = clean_prompt[:117] + "..."
return f"Illustration: {clean_prompt}"
def is_available(self) -> bool:
"""Check if image generation is available."""
if not self.api_key:
return False
try:
self._ensure_client()
return True
except Exception as e:
logger.warning(f"Image generation not available: {e}")
return False
@classmethod
def from_config(cls, config) -> Optional["ImageGeneratorProvider"]:
"""Create an ImageGeneratorProvider from a Config object."""
model = getattr(config, 'image_generation_model', None)
enabled = getattr(config, 'image_generation_enabled', False)
if not enabled:
return None
return cls(model_name=model or cls.DEFAULT_MODEL)
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/llm_provider/image/image_generator.py",
"license": "Apache License 2.0",
"lines": 366,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:gpt_researcher/skills/image_generator.py | """Image generator skill for GPT Researcher.
This module provides the ImageGenerator class that handles generating
contextually relevant images for research reports using AI image generation.
"""
import asyncio
import json
import logging
import re
from typing import Any, Dict, List, Optional, Tuple
from ..actions.utils import stream_output
from ..llm_provider.image import ImageGeneratorProvider
from ..utils.llm import create_chat_completion
logger = logging.getLogger(__name__)
class ImageGenerator:
"""Generates contextually relevant images for research reports.
This class analyzes report content to identify sections that would
benefit from visual illustrations and generates images using AI
image generation models.
Attributes:
researcher: The parent GPTResearcher instance.
image_provider: The image generation provider.
max_images: Maximum number of images to generate per report.
"""
def __init__(self, researcher):
"""Initialize the ImageGenerator.
Args:
researcher: The GPTResearcher instance that owns this generator.
"""
self.researcher = researcher
self.cfg = researcher.cfg
self.image_provider = None
self.max_images = getattr(self.cfg, 'image_generation_max_images', 3)
self.generated_images: List[Dict[str, Any]] = []
# Initialize image provider if configured
self._init_provider()
def _init_provider(self):
"""Initialize the image generation provider from config."""
model = getattr(self.cfg, 'image_generation_model', None)
enabled = getattr(self.cfg, 'image_generation_enabled', False)
if model and enabled:
try:
self.image_provider = ImageGeneratorProvider(model_name=model)
if self.image_provider.is_available():
logger.info(f"Image generation enabled with model: {model}")
else:
logger.warning("Image generation provider not available (missing API key?)")
self.image_provider = None
except Exception as e:
logger.error(f"Failed to initialize image provider: {e}")
self.image_provider = None
def is_enabled(self) -> bool:
"""Check if image generation is enabled and available.
Returns:
True if image generation can be used.
"""
return self.image_provider is not None and self.image_provider.is_available()
async def plan_and_generate_images(
self,
context: str,
query: str,
research_id: str = "",
) -> List[Dict[str, Any]]:
"""Plan and pre-generate images based on research context.
This method analyzes the research context to identify 2-3 concepts
that would benefit from visual illustrations, then generates them
in parallel BEFORE report writing begins.
Args:
context: The accumulated research context.
query: The main research query.
research_id: Optional research ID for file organization.
Returns:
List of generated image info dictionaries with URLs ready to embed.
"""
if not self.is_enabled():
logger.info("Image generation is not enabled, skipping pre-generation")
return []
if self.researcher.verbose:
await stream_output(
"logs",
"image_planning",
"🎨 Analyzing research context for visualization opportunities...",
self.researcher.websocket,
)
# Step 1: Use LLM to identify best visualization opportunities
image_concepts = await self._plan_image_concepts(context, query)
if not image_concepts:
logger.info("No suitable visualization opportunities identified")
return []
if self.researcher.verbose:
await stream_output(
"logs",
"image_concepts_identified",
f"🖼️ Identified {len(image_concepts)} visualization concepts, generating images...",
self.researcher.websocket,
)
# Step 2: Generate all images in parallel
image_style = getattr(self.cfg, 'image_generation_style', 'dark')
async def generate_single_image(concept: Dict[str, Any], index: int) -> Optional[Dict[str, Any]]:
"""Generate a single image from a concept."""
try:
if self.researcher.verbose:
await stream_output(
"logs",
"image_generating",
f"🖼️ Generating image {index + 1}/{len(image_concepts)}: {concept['title'][:50]}...",
self.researcher.websocket,
)
images = await self.image_provider.generate_image(
prompt=concept['prompt'],
context=concept.get('context', ''),
research_id=research_id,
num_images=1,
style=image_style,
)
if images:
image_info = images[0]
image_info['title'] = concept['title']
image_info['section_hint'] = concept.get('section_hint', '')
return image_info
except Exception as e:
logger.error(f"Failed to generate image for '{concept['title']}': {e}")
return None
# Generate all images in parallel
tasks = [generate_single_image(concept, i) for i, concept in enumerate(image_concepts)]
results = await asyncio.gather(*tasks)
# Filter out failed generations
generated_images = [img for img in results if img is not None]
self.generated_images = generated_images
if self.researcher.verbose:
if generated_images:
await stream_output(
"logs",
"images_ready",
f"✅ {len(generated_images)} images ready for report embedding",
self.researcher.websocket,
)
else:
await stream_output(
"logs",
"images_failed",
"⚠️ No images could be generated",
self.researcher.websocket,
)
return generated_images
async def _plan_image_concepts(
self,
context: str,
query: str,
) -> List[Dict[str, Any]]:
"""Use LLM to identify the best visualization opportunities from research context.
Args:
context: The research context to analyze.
query: The main research query.
Returns:
List of image concept dictionaries with title, prompt, and section_hint.
"""
# Truncate context if too long
max_context_length = 6000
truncated_context = context[:max_context_length] if len(context) > max_context_length else context
planning_prompt = f"""Analyze this research context and identify 2-3 concepts that would significantly benefit from professional diagram/infographic illustrations.
RESEARCH QUERY: {query}
RESEARCH CONTEXT:
{truncated_context}
For each visualization opportunity, provide:
1. title: A short descriptive title (e.g., "System Architecture", "Comparison Chart")
2. prompt: A detailed image generation prompt describing exactly what to visualize, including layout and key elements (minimum 30 words)
3. section_hint: Which section of the report this image relates to
Focus on:
- Architecture/system diagrams
- Process flows and workflows
- Comparison charts
- Data visualizations
- Conceptual illustrations
IMPORTANT: Return ONLY a valid JSON array. No markdown, no explanation.
Example output:
[
{{
"title": "System Architecture Overview",
"prompt": "A layered architecture diagram showing the frontend application on top, connecting to an API gateway in the middle, which routes to microservices at the bottom. Use clean boxes with connecting arrows, modern tech aesthetic.",
"section_hint": "Architecture"
}}
]
Return 2-3 visualization concepts as a JSON array:"""
try:
response = await create_chat_completion(
model=self.cfg.fast_llm_model,
messages=[
{"role": "system", "content": "You are a visualization expert. Return only valid JSON arrays."},
{"role": "user", "content": planning_prompt}
],
temperature=0.4,
llm_provider=self.cfg.fast_llm_provider,
max_tokens=1000,
llm_kwargs=self.cfg.llm_kwargs,
cost_callback=self.researcher.add_costs,
)
# Parse JSON response
response = response.strip()
# Remove markdown code blocks if present
if response.startswith("```"):
response = re.sub(r'^```(?:json)?\n?', '', response)
response = re.sub(r'\n?```$', '', response)
concepts = json.loads(response)
# Validate and limit to max_images
valid_concepts = []
for concept in concepts[:self.max_images]:
if isinstance(concept, dict) and 'title' in concept and 'prompt' in concept:
valid_concepts.append(concept)
logger.info(f"Planned {len(valid_concepts)} image concepts")
return valid_concepts
except json.JSONDecodeError as e:
logger.error(f"Failed to parse image planning response: {e}")
return []
except Exception as e:
logger.error(f"Error during image planning: {e}")
return []
async def analyze_report_for_images(
self,
report: str,
query: str,
) -> List[Dict[str, Any]]:
"""Analyze a report to identify sections that would benefit from images.
Uses LLM to identify 2-3 key concepts or sections in the report
that would be enhanced by visual illustrations.
Args:
report: The markdown report text.
query: The original research query.
Returns:
List of dictionaries with section info and suggested image prompts.
"""
if not self.is_enabled():
return []
# Extract sections from the report
sections = self._extract_sections(report)
if not sections:
logger.warning("No sections found in report for image analysis")
return []
# Use LLM to identify best sections for images
try:
analysis_prompt = self._build_analysis_prompt(query, sections)
response = await create_chat_completion(
model=self.cfg.fast_llm_model,
messages=[
{"role": "system", "content": "You are an expert at identifying content that would benefit from visual illustrations."},
{"role": "user", "content": analysis_prompt},
],
temperature=0.3,
llm_provider=self.cfg.fast_llm_provider,
stream=False,
websocket=None,
max_tokens=1500,
llm_kwargs=self.cfg.llm_kwargs,
)
# Parse the response
image_suggestions = self._parse_analysis_response(response, sections)
return image_suggestions[:self.max_images]
except Exception as e:
logger.error(f"Error analyzing report for images: {e}")
return []
def _extract_sections(self, report: str) -> List[Dict[str, Any]]:
"""Extract sections from a markdown report.
Args:
report: The markdown report text.
Returns:
List of section dictionaries with header, content, and position.
"""
sections = []
lines = report.split('\n')
current_section = None
current_content = []
section_start = 0
for i, line in enumerate(lines):
# Check for headers (## or ###)
header_match = re.match(r'^(#{2,3})\s+(.+)$', line)
if header_match:
# Save previous section
if current_section:
sections.append({
"header": current_section,
"content": '\n'.join(current_content).strip(),
"start_line": section_start,
"end_line": i - 1,
})
# Start new section
current_section = header_match.group(2)
current_content = []
section_start = i
elif current_section:
current_content.append(line)
# Don't forget the last section
if current_section:
sections.append({
"header": current_section,
"content": '\n'.join(current_content).strip(),
"start_line": section_start,
"end_line": len(lines) - 1,
})
return sections
def _build_analysis_prompt(
self,
query: str,
sections: List[Dict[str, Any]],
) -> str:
"""Build prompt for LLM to analyze which sections need images.
Args:
query: The research query.
sections: List of extracted sections.
Returns:
The analysis prompt string.
"""
sections_text = "\n\n".join([
f"### Section {i+1}: {s['header']}\n{s['content'][:500]}..."
for i, s in enumerate(sections)
])
return f"""Analyze the following research report sections and identify which {self.max_images} sections would benefit MOST from a visual illustration or diagram.
RESEARCH TOPIC: {query}
REPORT SECTIONS:
{sections_text}
For each recommended section, provide:
1. The section number (1-indexed)
2. A specific, detailed image prompt that would create an informative illustration
3. A brief explanation of why this section benefits from visualization
IMPORTANT:
- Choose sections where visual representation would genuinely aid understanding
- Focus on concepts, processes, comparisons, or data that are inherently visual
- Avoid sections that are purely textual analysis or conclusions
- The image prompt should be specific enough to generate a relevant, professional illustration
- Do NOT suggest images for introduction or conclusion sections
Respond in JSON format:
{{
"suggestions": [
{{
"section_number": 1,
"section_header": "Section Title",
"image_prompt": "Detailed prompt for generating an informative illustration...",
"reason": "Why this section benefits from visualization"
}}
]
}}
Return ONLY the JSON, no additional text."""
def _parse_analysis_response(
self,
response: str,
sections: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Parse the LLM's analysis response.
Args:
response: The LLM response text.
sections: The original sections list.
Returns:
List of image suggestion dictionaries.
"""
try:
# Try to extract JSON from the response
json_match = re.search(r'\{[\s\S]*\}', response)
if not json_match:
logger.warning("No JSON found in analysis response")
return []
data = json.loads(json_match.group())
suggestions = data.get("suggestions", [])
# Enrich with section data
enriched = []
for s in suggestions:
section_num = s.get("section_number", 0) - 1 # Convert to 0-indexed
if 0 <= section_num < len(sections):
section = sections[section_num]
enriched.append({
"section_header": section["header"],
"section_content": section["content"][:1000],
"image_prompt": s.get("image_prompt", ""),
"reason": s.get("reason", ""),
"insert_after_line": section["start_line"],
})
return enriched
except json.JSONDecodeError as e:
logger.error(f"Failed to parse analysis JSON: {e}")
return []
async def generate_images_for_report(
self,
report: str,
query: str,
research_id: str = "",
) -> Tuple[str, List[Dict[str, Any]]]:
"""Generate images and embed them in the report.
This is the main method that orchestrates the full image generation
workflow for a research report.
Args:
report: The markdown report text.
query: The original research query.
research_id: Optional research ID for file organization.
Returns:
Tuple of (modified report with embedded images, list of generated images).
"""
if not self.is_enabled():
logger.info("Image generation is not enabled, skipping")
return report, []
# Notify about image generation starting
if self.researcher.verbose:
await stream_output(
"logs",
"image_generation_start",
"🎨 Analyzing report for image generation opportunities...",
self.researcher.websocket,
)
# Analyze report for image opportunities
suggestions = await self.analyze_report_for_images(report, query)
if not suggestions:
logger.info("No sections identified for image generation")
if self.researcher.verbose:
await stream_output(
"logs",
"image_generation_skip",
"📝 No sections identified that would benefit from images",
self.researcher.websocket,
)
return report, []
if self.researcher.verbose:
await stream_output(
"logs",
"image_generation_analyzing",
f"🔍 Found {len(suggestions)} sections that would benefit from images",
self.researcher.websocket,
)
# Generate images for each suggestion
generated_images = []
for i, suggestion in enumerate(suggestions):
if self.researcher.verbose:
await stream_output(
"logs",
"image_generating",
f"🖼️ Generating image {i+1}/{len(suggestions)}: {suggestion['section_header'][:50]}...",
self.researcher.websocket,
)
try:
images = await self.image_provider.generate_image(
prompt=suggestion["image_prompt"],
context=suggestion["section_content"],
research_id=research_id,
num_images=1,
)
if images:
image_info = images[0]
image_info["section_header"] = suggestion["section_header"]
generated_images.append(image_info)
if self.researcher.verbose:
await stream_output(
"logs",
"image_generated",
f"✅ Image generated for: {suggestion['section_header'][:50]}",
self.researcher.websocket,
)
except Exception as e:
logger.error(f"Failed to generate image for section '{suggestion['section_header']}': {e}")
if self.researcher.verbose:
await stream_output(
"logs",
"image_generation_error",
f"⚠️ Failed to generate image: {str(e)[:100]}",
self.researcher.websocket,
)
# Embed images in the report
if generated_images:
report = self._embed_images_in_report(report, generated_images, suggestions)
self.generated_images = generated_images
if self.researcher.verbose:
await stream_output(
"logs",
"image_generation_complete",
f"🎉 Successfully generated and embedded {len(generated_images)} images",
self.researcher.websocket,
)
# Send generated images through WebSocket
await stream_output(
"generated_images",
"inline_images",
json.dumps([{"url": img["url"], "alt": img["alt_text"]} for img in generated_images]),
self.researcher.websocket,
True,
generated_images,
)
return report, generated_images
def _embed_images_in_report(
self,
report: str,
images: List[Dict[str, Any]],
suggestions: List[Dict[str, Any]],
) -> str:
"""Embed generated images into the report markdown.
Args:
report: The original report markdown.
images: List of generated image info.
suggestions: Original suggestions with section info.
Returns:
Modified report with embedded images.
"""
lines = report.split('\n')
# Create a mapping of section headers to images
section_to_image = {}
for img, sug in zip(images, suggestions):
section_to_image[sug["section_header"]] = img
# Find section headers and insert images after them
modified_lines = []
i = 0
while i < len(lines):
line = lines[i]
modified_lines.append(line)
# Check if this is a header that needs an image
header_match = re.match(r'^(#{2,3})\s+(.+)$', line)
if header_match:
header_text = header_match.group(2)
if header_text in section_to_image:
img = section_to_image[header_text]
# Insert image after the header with a blank line
image_markdown = f"\n![{img['alt_text']}]({img['url']})\n"
modified_lines.append(image_markdown)
i += 1
return '\n'.join(modified_lines)
def get_generated_images(self) -> List[Dict[str, Any]]:
"""Get the list of generated images.
Returns:
List of generated image info dictionaries.
"""
return self.generated_images
async def process_image_placeholders(
self,
report: str,
query: str,
research_id: str = "",
) -> Tuple[str, List[Dict[str, Any]]]:
"""Process [IMAGE: description] placeholders in the report and generate images.
This method finds all image placeholders in the report, generates images
for each one, and replaces the placeholders with actual markdown images.
Args:
report: The markdown report text with [IMAGE: ...] placeholders.
query: The original research query (used for context).
research_id: Optional research ID for file organization.
Returns:
Tuple of (modified report with images embedded, list of generated images).
"""
if not self.is_enabled():
# If image generation is not enabled, just remove the placeholders
report = re.sub(r'\[IMAGE:\s*[^\]]+\]', '', report)
return report, []
# Find all image placeholders
placeholder_pattern = r'\[IMAGE:\s*([^\]]+)\]'
placeholders = list(re.finditer(placeholder_pattern, report))
if not placeholders:
logger.info("No image placeholders found in report")
return report, []
# Limit to max_images
placeholders = placeholders[:self.max_images]
if self.researcher.verbose:
await stream_output(
"logs",
"image_placeholders_found",
f"🎨 Found {len(placeholders)} image placeholders to process",
self.researcher.websocket,
)
generated_images = []
replacements = [] # List of (original_text, replacement_text) tuples
for i, match in enumerate(placeholders):
image_description = match.group(1).strip()
original_text = match.group(0)
if self.researcher.verbose:
await stream_output(
"logs",
"image_generating",
f"🖼️ Generating image {i+1}/{len(placeholders)}: {image_description[:60]}...",
self.researcher.websocket,
)
try:
# Get image style from config (default to "dark" for app theme)
image_style = getattr(self.cfg, 'image_generation_style', 'dark')
logger.info(f"Using image style: {image_style}")
# Generate the image with dark mode styling
images = await self.image_provider.generate_image(
prompt=image_description,
context=query, # Use query as context
research_id=research_id,
num_images=1,
style=image_style,
)
if images:
image_info = images[0]
image_info["description"] = image_description
generated_images.append(image_info)
# Create markdown replacement with absolute path for PDF compatibility
# Use the absolute URL for proper rendering in PDF/DOCX
markdown_image = f"\n\n![{image_info['alt_text']}]({image_info['url']})\n\n"
replacements.append((original_text, markdown_image))
if self.researcher.verbose:
await stream_output(
"logs",
"image_generated",
f"✅ Generated: {image_description[:40]}...",
self.researcher.websocket,
)
else:
# Remove placeholder if image generation failed
replacements.append((original_text, ""))
logger.warning(f"No image generated for: {image_description[:50]}")
except Exception as e:
logger.error(f"Failed to generate image for '{image_description[:50]}': {e}")
# Remove the failed placeholder
replacements.append((original_text, ""))
if self.researcher.verbose:
await stream_output(
"logs",
"image_generation_error",
f"⚠️ Failed to generate: {str(e)[:80]}",
self.researcher.websocket,
)
# Apply all replacements
modified_report = report
for original, replacement in replacements:
modified_report = modified_report.replace(original, replacement, 1)
self.generated_images = generated_images
if generated_images and self.researcher.verbose:
await stream_output(
"logs",
"image_generation_complete",
f"🎉 Successfully generated {len(generated_images)} inline images",
self.researcher.websocket,
)
# Send generated images through WebSocket
await stream_output(
"generated_images",
"inline_images",
json.dumps([{"url": img["url"], "alt": img["alt_text"]} for img in generated_images]),
self.researcher.websocket,
True,
generated_images,
)
return modified_report, generated_images
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/skills/image_generator.py",
"license": "Apache License 2.0",
"lines": 634,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:tests/test_quick_search.py | import unittest
from unittest.mock import MagicMock, patch, AsyncMock
import asyncio
from gpt_researcher.agent import GPTResearcher
import os
class TestQuickSearch(unittest.TestCase):
@patch('gpt_researcher.agent.get_search_results', new_callable=AsyncMock)
@patch('gpt_researcher.agent.create_chat_completion', new_callable=AsyncMock)
@patch('langchain_openai.OpenAIEmbeddings')
def test_quick_search_no_summary(self, mock_embeddings, mock_create_chat, mock_search):
# Setup mocks
mock_search.return_value = [{'title': 'Test Result', 'content': 'Content', 'url': 'http://test.com'}]
# Initialize researcher with dummy config to avoid API key issues
researcher = GPTResearcher(query="test query")
# Run quick_search without summary
results = asyncio.run(researcher.quick_search("test query", aggregated_summary=False))
# Verify
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Test Result')
mock_create_chat.assert_not_called()
@patch('gpt_researcher.agent.get_search_results', new_callable=AsyncMock)
@patch('gpt_researcher.agent.create_chat_completion', new_callable=AsyncMock)
@patch('langchain_openai.OpenAIEmbeddings')
def test_quick_search_with_summary(self, mock_embeddings, mock_create_chat, mock_search):
# Setup mocks
mock_search.return_value = [{'title': 'Test Result', 'content': 'Content', 'url': 'http://test.com'}]
mock_create_chat.return_value = "This is a summary."
# Initialize researcher
researcher = GPTResearcher(query="test query")
# Run quick_search with summary
summary = asyncio.run(researcher.quick_search("test query", aggregated_summary=True))
# Verify
self.assertEqual(summary, "This is a summary.")
mock_create_chat.assert_called_once()
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "tests/test_quick_search.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
assafelovic/gpt-researcher:gpt_researcher/utils/rate_limiter.py | """
Global rate limiter for scraper requests.
Ensures that SCRAPER_RATE_LIMIT_DELAY is enforced globally across ALL WorkerPools,
not just per-pool. This prevents multiple concurrent researchers from overwhelming
rate-limited APIs like Firecrawl.
"""
import asyncio
import time
from typing import ClassVar
class GlobalRateLimiter:
"""
Singleton global rate limiter.
Ensures minimum delay between ANY scraper requests across the entire application,
regardless of how many WorkerPools or GPTResearcher instances are active.
"""
_instance: ClassVar['GlobalRateLimiter'] = None
_lock: ClassVar[asyncio.Lock] = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
"""Initialize the global rate limiter (only once)."""
if self._initialized:
return
self.last_request_time = 0.0
self.rate_limit_delay = 0.0
self._initialized = True
# Create lock at class level to ensure it's shared across all instances
if GlobalRateLimiter._lock is None:
# Note: This will be properly initialized when first accessed in an async context
GlobalRateLimiter._lock = None
@classmethod
def get_lock(cls):
"""Get or create the async lock (must be called from async context)."""
if cls._lock is None:
cls._lock = asyncio.Lock()
return cls._lock
def configure(self, rate_limit_delay: float):
"""
Configure the global rate limit delay.
Args:
rate_limit_delay: Minimum seconds between requests (0 = no limit)
"""
self.rate_limit_delay = rate_limit_delay
async def wait_if_needed(self):
"""
Wait if needed to enforce global rate limiting.
This method ensures that regardless of how many WorkerPools are active,
the SCRAPER_RATE_LIMIT_DELAY is respected globally.
"""
if self.rate_limit_delay <= 0:
return # No rate limiting
lock = self.get_lock()
async with lock:
current_time = time.time()
time_since_last = current_time - self.last_request_time
if time_since_last < self.rate_limit_delay:
sleep_time = self.rate_limit_delay - time_since_last
await asyncio.sleep(sleep_time)
self.last_request_time = time.time()
def reset(self):
"""Reset the rate limiter state (useful for testing)."""
self.last_request_time = 0.0
# Singleton instance
_global_rate_limiter = GlobalRateLimiter()
def get_global_rate_limiter() -> GlobalRateLimiter:
"""Get the global rate limiter singleton instance."""
return _global_rate_limiter
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/utils/rate_limiter.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:gpt_researcher/utils/tools.py | """
Tool-enabled LLM utilities for GPT Researcher
This module provides provider-agnostic tool calling functionality using LangChain's
unified interface. It allows any LLM provider that supports function calling to use
tools seamlessly.
"""
import asyncio
import logging
from typing import Any, Dict, List, Tuple, Callable, Optional
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
from langchain_core.tools import tool
from .llm import create_chat_completion
logger = logging.getLogger(__name__)
async def create_chat_completion_with_tools(
messages: List[Dict[str, str]],
tools: List[Callable],
model: str | None = None,
temperature: float | None = 0.4,
max_tokens: int | None = 4000,
llm_provider: str | None = None,
llm_kwargs: Dict[str, Any] | None = None,
cost_callback: Callable = None,
websocket: Any | None = None,
**kwargs
) -> Tuple[str, List[Dict[str, Any]]]:
"""
Create a chat completion with tool calling support across all LLM providers.
This function uses LangChain's bind_tools() to enable function calling in a
provider-agnostic way. The AI decides autonomously when and how to use tools.
Args:
messages: List of chat messages with role and content
tools: List of LangChain tool functions (decorated with @tool)
model: The model to use (from config)
temperature: Temperature for generation
max_tokens: Maximum tokens to generate
llm_provider: LLM provider name (from config)
llm_kwargs: Additional LLM keyword arguments
cost_callback: Callback function for cost tracking
websocket: Optional websocket for streaming
**kwargs: Additional arguments
Returns:
Tuple of (response_content, tool_calls_metadata)
Raises:
Exception: If tool-enabled completion fails, falls back to simple completion
"""
try:
from ..llm_provider.generic.base import GenericLLMProvider
# Create LLM provider using the config
provider_kwargs = {
'model': model,
**(llm_kwargs or {})
}
llm_provider_instance = GenericLLMProvider.from_provider(
llm_provider,
**provider_kwargs
)
# Convert messages to LangChain format
lc_messages = []
for msg in messages:
if msg["role"] == "system":
lc_messages.append(SystemMessage(content=msg["content"]))
elif msg["role"] == "user":
lc_messages.append(HumanMessage(content=msg["content"]))
elif msg["role"] == "assistant":
lc_messages.append(AIMessage(content=msg["content"]))
# Bind tools to the LLM - this works across all LangChain providers that support function calling
llm_with_tools = llm_provider_instance.llm.bind_tools(tools)
# Invoke the LLM with tools - this will handle the full conversation flow
logger.info(f"Invoking LLM with {len(tools)} available tools")
# For tool calling, we need to handle the full conversation including tool responses
from langchain_core.messages import ToolMessage
# First call to LLM
response = await llm_with_tools.ainvoke(lc_messages)
# Process tool calls if any were made
tool_calls_metadata = []
if hasattr(response, 'tool_calls') and response.tool_calls:
logger.info(f"LLM made {len(response.tool_calls)} tool calls")
# Add the assistant's response with tool calls to the conversation
lc_messages.append(response)
# Execute each tool call and add results to conversation
for tool_call in response.tool_calls:
tool_name = tool_call.get('name', 'unknown')
tool_args = tool_call.get('args', {})
tool_id = tool_call.get('id', '')
logger.info(f"Tool called: {tool_name}")
if tool_args:
args_str = ", ".join([f"{k}={v}" for k, v in tool_args.items()])
logger.debug(f"Tool arguments: {args_str}")
# Find and execute the tool
tool_result = "Tool execution failed"
for tool in tools:
if tool.name == tool_name:
try:
if hasattr(tool, 'ainvoke'):
tool_result = await tool.ainvoke(tool_args)
elif hasattr(tool, 'invoke'):
tool_result = tool.invoke(tool_args)
else:
tool_result = await tool(**tool_args) if asyncio.iscoroutinefunction(tool) else tool(**tool_args)
break
except Exception as e:
error_type = type(e).__name__
error_msg = str(e)
logger.error(
f"Error executing tool '{tool_name}': {error_type}: {error_msg}",
exc_info=True
)
# Provide user-friendly error message
if "timeout" in error_msg.lower() or "timed out" in error_msg.lower():
tool_result = f"Tool '{tool_name}' timed out. The operation took too long to complete. Please try again or check your network connection."
elif "connection" in error_msg.lower() or "network" in error_msg.lower():
tool_result = f"Tool '{tool_name}' failed due to a network issue. Please check your internet connection and try again."
elif "permission" in error_msg.lower() or "access" in error_msg.lower():
tool_result = f"Tool '{tool_name}' failed due to insufficient permissions. Please check your API keys or access credentials."
else:
tool_result = f"Tool '{tool_name}' encountered an error: {error_msg}. Please check the logs for more details."
# Add tool result to conversation
tool_message = ToolMessage(content=str(tool_result), tool_call_id=tool_id)
lc_messages.append(tool_message)
# Add to metadata
tool_calls_metadata.append({
"tool": tool_name,
"args": tool_args,
"call_id": tool_id,
"result": str(tool_result)[:200] + "..." if len(str(tool_result)) > 200 else str(tool_result)
})
# Get final response from LLM after tool execution
logger.info("Getting final response from LLM after tool execution")
final_response = await llm_with_tools.ainvoke(lc_messages)
# Track costs if callback provided
if cost_callback:
from .costs import estimate_llm_cost
# Calculate costs for both calls
llm_costs = estimate_llm_cost(str(lc_messages), final_response.content or "")
cost_callback(llm_costs)
return final_response.content, tool_calls_metadata
else:
# No tool calls, return regular response
if cost_callback:
from .costs import estimate_llm_cost
llm_costs = estimate_llm_cost(str(messages), response.content or "")
cost_callback(llm_costs)
return response.content, []
except Exception as e:
error_type = type(e).__name__
error_msg = str(e)
logger.error(
f"Error in tool-enabled chat completion: {error_type}: {error_msg}",
exc_info=True
)
logger.info("Falling back to simple chat completion without tools")
# Fallback to simple chat completion without tools
response = await create_chat_completion(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
llm_provider=llm_provider,
llm_kwargs=llm_kwargs,
cost_callback=cost_callback,
websocket=websocket,
**kwargs
)
return response, []
def create_search_tool(search_function: Callable[[str], Dict]) -> Callable:
"""
Create a standardized search tool for use with tool-enabled chat completions.
Args:
search_function: Function that takes a query string and returns search results
Returns:
LangChain tool function decorated with @tool
"""
@tool
def search_tool(query: str) -> str:
"""Search for current events or online information when you need new knowledge that doesn't exist in the current context"""
try:
results = search_function(query)
if results and 'results' in results:
search_content = f"Search results for '{query}':\n\n"
for result in results['results'][:5]:
search_content += f"Title: {result.get('title', '')}\n"
search_content += f"Content: {result.get('content', '')[:300]}...\n"
search_content += f"URL: {result.get('url', '')}\n\n"
return search_content
else:
return f"No search results found for: {query}"
except Exception as e:
error_type = type(e).__name__
error_msg = str(e)
logger.error(
f"Search tool error: {error_type}: {error_msg}",
exc_info=True
)
# Provide context-aware error messages
if "api" in error_msg.lower() or "key" in error_msg.lower():
return f"Search failed: API key issue. Please verify your search API credentials are configured correctly."
elif "timeout" in error_msg.lower() or "timed out" in error_msg.lower():
return f"Search timed out. The search request took too long. Please try again with a different query."
elif "rate limit" in error_msg.lower() or "quota" in error_msg.lower():
return f"Search rate limit exceeded. Please wait a moment before trying again."
else:
return f"Search encountered an error: {error_msg}. Please check your search provider configuration."
return search_tool
def create_custom_tool(
name: str,
description: str,
function: Callable,
parameter_schema: Optional[Dict] = None
) -> Callable:
"""
Create a custom tool for use with tool-enabled chat completions.
Args:
name: Name of the tool
description: Description of what the tool does
function: The actual function to execute
parameter_schema: Optional schema for function parameters
Returns:
LangChain tool function decorated with @tool
"""
@tool
def custom_tool(*args, **kwargs) -> str:
try:
result = function(*args, **kwargs)
return str(result) if result is not None else "Tool executed successfully"
except Exception as e:
error_type = type(e).__name__
error_msg = str(e)
logger.error(
f"Custom tool '{name}' error: {error_type}: {error_msg}",
exc_info=True
)
# Provide informative error message without exposing internal details
if "validation" in error_msg.lower() or "invalid" in error_msg.lower():
return f"Tool '{name}' received invalid input. Please check the parameters and try again."
elif "not found" in error_msg.lower() or "missing" in error_msg.lower():
return f"Tool '{name}' could not find required resources. Please verify the input data is correct."
else:
return f"Tool '{name}' encountered an error: {error_msg}. Please check the tool configuration."
# Set tool metadata
custom_tool.name = name
custom_tool.description = description
return custom_tool
# Utility function for common tool patterns
def get_available_providers_with_tools() -> List[str]:
"""
Get list of LLM providers that support tool calling.
Returns:
List of provider names that support function calling
"""
# These are the providers known to support function calling in LangChain
return [
"openai",
"anthropic",
"google_genai",
"azure_openai",
"fireworks",
"groq",
# Note: This list may expand as more providers add function calling support
]
def supports_tools(provider: str) -> bool:
"""
Check if a given provider supports tool calling.
Args:
provider: LLM provider name
Returns:
True if provider supports tools, False otherwise
"""
return provider in get_available_providers_with_tools()
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/utils/tools.py",
"license": "Apache License 2.0",
"lines": 268,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:backend/run_server.py | #!/usr/bin/env python3
"""
GPT-Researcher Backend Server Startup Script
Run this to start the research API server.
"""
import uvicorn
import os
import sys
# Add the backend directory to Python path
backend_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, backend_dir)
if __name__ == "__main__":
# Change to backend directory
os.chdir(backend_dir)
# Start the server
uvicorn.run(
"server.app:app",
host="0.0.0.0",
port=8000,
reload=True,
log_level="info"
)
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "backend/run_server.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:tests/test_security_fix.py | """
Security tests for path traversal vulnerability fix.
This module tests the security improvements made to file upload and deletion
operations to prevent path traversal attacks.
"""
import pytest
import tempfile
import os
import shutil
from unittest.mock import Mock, MagicMock
from fastapi import HTTPException
from fastapi.responses import JSONResponse
# Import the functions we're testing
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from backend.server.server_utils import (
secure_filename,
validate_file_path,
handle_file_upload,
handle_file_deletion
)
class TestSecureFilename:
"""Test the secure_filename function against various attack vectors."""
def test_basic_filename(self):
"""Test that normal filenames pass through unchanged."""
assert secure_filename("document.pdf") == "document.pdf"
assert secure_filename("report_2024.docx") == "report_2024.docx"
def test_path_traversal_attacks(self):
"""Test that path traversal attempts are blocked."""
with pytest.raises(ValueError):
secure_filename("../../../etc/passwd")
with pytest.raises(ValueError):
secure_filename("..\\..\\windows\\system32\\config\\SAM")
# Multiple traversal attempts
assert secure_filename("....//....//etc/passwd") == "etcpasswd"
def test_null_byte_injection(self):
"""Test that null byte injection is prevented."""
# Null bytes should be removed
result = secure_filename("test\x00.txt")
assert "\x00" not in result
assert result == "test.txt"
def test_control_characters(self):
"""Test that control characters are removed."""
# Test various control characters
result = secure_filename("test\x01\x02\x03file.txt")
assert result == "testfile.txt"
def test_unicode_normalization(self):
"""Test that unicode attacks are prevented."""
# Test unicode normalization
filename = "test\u202e\u202dfile.txt" # Right-to-left override
result = secure_filename(filename)
# Should be normalized and safe
assert len(result) > 0
def test_drive_letters_windows(self):
"""Test that Windows drive letters are removed."""
assert secure_filename("C:sensitive.txt") == "sensitive.txt"
assert secure_filename("D:important.doc") == "important.doc"
def test_reserved_names_windows(self):
"""Test that Windows reserved names are blocked."""
reserved_names = ['CON', 'PRN', 'AUX', 'NUL', 'COM1', 'LPT1']
for name in reserved_names:
with pytest.raises(ValueError, match="reserved name"):
secure_filename(f"{name}.txt")
with pytest.raises(ValueError, match="reserved name"):
secure_filename(name.lower())
def test_empty_filename(self):
"""Test that empty filenames are rejected."""
with pytest.raises(ValueError, match="empty"):
secure_filename("")
with pytest.raises(ValueError, match="empty"):
secure_filename(" ") # Only spaces
with pytest.raises(ValueError, match="empty"):
secure_filename("...") # Only dots
def test_filename_length_limit(self):
"""Test that overly long filenames are rejected."""
# Create a filename longer than 255 bytes
long_filename = "a" * 300 + ".txt"
with pytest.raises(ValueError, match="too long"):
secure_filename(long_filename)
def test_leading_dots_spaces(self):
"""Test that leading dots and spaces are removed."""
assert secure_filename("...file.txt") == "file.txt"
assert secure_filename(" file.txt") == "file.txt"
assert secure_filename(". . .file.txt") == "file.txt"
class TestValidateFilePath:
"""Test the validate_file_path function."""
def test_valid_path(self):
"""Test that valid paths within base directory are accepted."""
with tempfile.TemporaryDirectory() as temp_dir:
file_path = os.path.join(temp_dir, "test.txt")
result = validate_file_path(file_path, temp_dir)
assert result == os.path.abspath(file_path)
def test_path_traversal_blocked(self):
"""Test that path traversal attempts are blocked."""
with tempfile.TemporaryDirectory() as temp_dir:
# Try to escape the directory
malicious_path = os.path.join(temp_dir, "..", "..", "etc", "passwd")
with pytest.raises(ValueError, match="outside allowed directory"):
validate_file_path(malicious_path, temp_dir)
def test_symlink_traversal_blocked(self):
"""Test that symlink-based traversal is blocked."""
with tempfile.TemporaryDirectory() as temp_dir:
# Create a symlink pointing outside the directory
outside_file = "/tmp/test_target.txt"
symlink_path = os.path.join(temp_dir, "malicious_link")
try:
os.symlink(outside_file, symlink_path)
target_path = os.path.join(temp_dir, "malicious_link", "nested")
with pytest.raises(ValueError, match="outside allowed directory"):
validate_file_path(target_path, temp_dir)
except OSError:
# Skip if symlinks aren't supported (e.g., Windows without admin)
pytest.skip("Symlinks not supported in this environment")
class TestHandleFileUpload:
"""Test the secure file upload functionality."""
@pytest.fixture
def mock_file(self):
"""Create a mock file object for testing."""
mock_file = Mock()
mock_file.filename = "test.txt"
mock_file.file = Mock()
return mock_file
@pytest.fixture
def temp_doc_path(self):
"""Create a temporary directory for testing."""
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir, ignore_errors=True)
@pytest.mark.asyncio
async def test_normal_file_upload(self, mock_file, temp_doc_path):
"""Test that normal file uploads work correctly."""
# NOTE: Not fully tested with DocumentLoader due to automated environment limits
# Manual testing recommended for: DocumentLoader integration
# Mock the DocumentLoader to avoid dependency issues
import backend.server.server_utils
original_loader = backend.server.server_utils.DocumentLoader
class MockDocumentLoader:
def __init__(self, path):
self.path = path
async def load(self):
pass
backend.server.server_utils.DocumentLoader = MockDocumentLoader
try:
result = await handle_file_upload(mock_file, temp_doc_path)
assert result["filename"] == "test.txt"
assert temp_doc_path in result["path"]
assert os.path.exists(result["path"])
finally:
# Restore original loader
backend.server.server_utils.DocumentLoader = original_loader
@pytest.mark.asyncio
async def test_malicious_filename_upload(self, temp_doc_path):
"""Test that malicious filenames are rejected."""
mock_file = Mock()
mock_file.filename = "../../../etc/passwd"
mock_file.file = Mock()
with pytest.raises(HTTPException) as exc_info:
await handle_file_upload(mock_file, temp_doc_path)
assert exc_info.value.status_code == 400
assert "Invalid file" in str(exc_info.value.detail)
@pytest.mark.asyncio
async def test_empty_filename_upload(self, temp_doc_path):
"""Test that empty filenames are rejected."""
mock_file = Mock()
mock_file.filename = ""
mock_file.file = Mock()
with pytest.raises(HTTPException) as exc_info:
await handle_file_upload(mock_file, temp_doc_path)
assert exc_info.value.status_code == 400
@pytest.mark.asyncio
async def test_file_conflict_handling(self, mock_file, temp_doc_path):
"""Test that file conflicts are handled by creating unique names."""
# Create an existing file
existing_path = os.path.join(temp_doc_path, "test.txt")
os.makedirs(temp_doc_path, exist_ok=True)
with open(existing_path, "w") as f:
f.write("existing content")
# Mock DocumentLoader
import backend.server.server_utils
original_loader = backend.server.server_utils.DocumentLoader
class MockDocumentLoader:
def __init__(self, path):
pass
async def load(self):
pass
backend.server.server_utils.DocumentLoader = MockDocumentLoader
try:
result = await handle_file_upload(mock_file, temp_doc_path)
# Should create a unique filename
assert result["filename"] == "test_1.txt"
assert os.path.exists(result["path"])
finally:
backend.server.server_utils.DocumentLoader = original_loader
class TestHandleFileDeletion:
"""Test the secure file deletion functionality."""
@pytest.fixture
def temp_doc_path(self):
"""Create a temporary directory for testing."""
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir, ignore_errors=True)
@pytest.mark.asyncio
async def test_normal_file_deletion(self, temp_doc_path):
"""Test that normal file deletion works correctly."""
# Create a test file
test_file = os.path.join(temp_doc_path, "test.txt")
os.makedirs(temp_doc_path, exist_ok=True)
with open(test_file, "w") as f:
f.write("test content")
result = await handle_file_deletion("test.txt", temp_doc_path)
assert isinstance(result, JSONResponse)
assert not os.path.exists(test_file)
@pytest.mark.asyncio
async def test_malicious_filename_deletion(self, temp_doc_path):
"""Test that malicious filenames are rejected for deletion."""
result = await handle_file_deletion("../../../etc/passwd", temp_doc_path)
assert isinstance(result, JSONResponse)
assert result.status_code == 400
@pytest.mark.asyncio
async def test_nonexistent_file_deletion(self, temp_doc_path):
"""Test deletion of non-existent files."""
result = await handle_file_deletion("nonexistent.txt", temp_doc_path)
assert isinstance(result, JSONResponse)
assert result.status_code == 404
@pytest.mark.asyncio
async def test_directory_deletion_blocked(self, temp_doc_path):
"""Test that directory deletion is blocked."""
# Create a subdirectory
subdir = os.path.join(temp_doc_path, "subdir")
os.makedirs(subdir, exist_ok=True)
result = await handle_file_deletion("subdir", temp_doc_path)
assert isinstance(result, JSONResponse)
assert result.status_code == 400
assert "not a file" in str(result.body.decode())
class TestSecurityIntegration:
"""Integration tests for the complete security fix."""
def test_attack_vectors_blocked(self):
"""Test that common attack vectors are blocked."""
attack_vectors = [
"../../../etc/passwd",
"..\\..\\..\\windows\\system32\\config\\SAM",
"test\x00.txt",
"CON.txt",
"PRN",
"C:sensitive.txt",
"....//....//sensitive",
"\u202e\u202dmalicious.txt" # Unicode RLO attack
]
for attack in attack_vectors:
try:
result = secure_filename(attack)
# If it doesn't raise an exception, ensure it's safe
assert ".." not in result
assert "/" not in result
assert "\\" not in result
assert "\x00" not in result
assert not result.startswith(".")
except ValueError:
# This is expected for malicious inputs
pass
def test_legitimate_files_allowed(self):
"""Test that legitimate files are still allowed."""
legitimate_files = [
"document.pdf",
"report_2024.docx",
"data.csv",
"image.jpg",
"script.py",
"config.json",
"README.md",
"file-with-dashes.txt",
"file_with_underscores.txt"
]
for filename in legitimate_files:
result = secure_filename(filename)
assert result == filename # Should pass through unchanged
if __name__ == "__main__":
# Run tests if executed directly
pytest.main([__file__, "-v"]) | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "tests/test_security_fix.py",
"license": "Apache License 2.0",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
assafelovic/gpt-researcher:evals/hallucination_eval/evaluate.py | """
Evaluate model outputs for hallucination using the judges library.
"""
import logging
from pathlib import Path
from typing import Dict, List, Optional
from dotenv import load_dotenv
from judges.classifiers.hallucination import HaluEvalDocumentSummaryNonFactual
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class HallucinationEvaluator:
"""Evaluates model outputs for hallucination using the judges library."""
def __init__(self, model: str = "openai/gpt-4o"):
"""Initialize the hallucination evaluator."""
self.summary_judge = HaluEvalDocumentSummaryNonFactual(model=model)
def evaluate_response(self, model_output: str, source_text: str) -> Dict:
"""
Evaluate a single model response for hallucination against source documents.
Args:
model_output: The model's response to evaluate
source_text: Source text to check summary against
Returns:
Dict containing evaluation results
"""
try:
# Use document summary evaluation
judgment = self.summary_judge.judge(
input=source_text, # The source document
output=model_output # The summary to evaluate
)
return {
"output": model_output,
"source": source_text,
"is_hallucination": judgment.score,
"reasoning": judgment.reasoning
}
except Exception as e:
logger.error(f"Error evaluating response: {str(e)}")
raise
def main():
# Example test case
model_output = "The capital of France is Paris, a city known for its rich history and culture."
source_text = "Paris is the capital and largest city of France, located in the northern part of the country."
evaluator = HallucinationEvaluator()
result = evaluator.evaluate_response(
model_output=model_output,
source_text=source_text
)
# Print results
print("\nEvaluation Results:")
print(f"Output: {result['output']}")
print(f"Source: {result['source']}")
print(f"Hallucination: {'Yes' if result['is_hallucination'] else 'No'}")
print(f"Reasoning: {result['reasoning']}")
if __name__ == "__main__":
main() | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "evals/hallucination_eval/evaluate.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:evals/hallucination_eval/run_eval.py | """
Script to run GPT-Researcher queries and evaluate them for hallucination.
"""
import json
import logging
import random
import asyncio
import argparse
import os
from pathlib import Path
from typing import Dict, List, Optional
from dotenv import load_dotenv
from gpt_researcher.agent import GPTResearcher
from gpt_researcher.utils.enum import ReportType, ReportSource, Tone
from gpt_researcher.utils.logging_config import get_json_handler
from .evaluate import HallucinationEvaluator
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
# Default paths
DEFAULT_OUTPUT_DIR = "evals/hallucination_eval/results"
DEFAULT_QUERIES_FILE = "evals/hallucination_eval/inputs/search_queries.jsonl"
class ResearchEvaluator:
"""Runs GPT-Researcher queries and evaluates responses for hallucination."""
def __init__(self, queries_file: str = DEFAULT_QUERIES_FILE):
"""
Initialize the research evaluator.
Args:
queries_file: Path to JSONL file containing search queries
"""
self.queries_file = Path(queries_file)
self.hallucination_evaluator = HallucinationEvaluator()
def load_queries(self, num_queries: Optional[int] = None) -> List[str]:
"""
Load and optionally sample queries from the JSONL file.
Args:
num_queries: Optional number of queries to randomly sample
Returns:
List of query strings
"""
queries = []
with open(self.queries_file) as f:
for line in f:
data = json.loads(line.strip())
queries.append(data["question"])
if num_queries and num_queries < len(queries):
return random.sample(queries, num_queries)
return queries
async def run_research(self, query: str) -> Dict:
"""
Run a single query through GPT-Researcher.
Args:
query: The search query to research
Returns:
Dict containing research results and context
"""
researcher = GPTResearcher(
query=query,
report_type=ReportType.ResearchReport.value,
report_format="markdown",
report_source=ReportSource.Web.value,
tone=Tone.Objective,
verbose=True
)
# Run research and get results
research_result = await researcher.conduct_research()
report = await researcher.write_report()
return {
"query": query,
"report": report,
"context": research_result,
}
def evaluate_research(
self,
research_data: Dict,
output_dir: Optional[str] = None
) -> Dict:
"""
Evaluate research results for hallucination.
Args:
research_data: Dict containing research results and context
output_dir: Optional directory to save evaluation results
Returns:
Dict containing evaluation results
"""
# Use default output directory if none provided
if output_dir is None:
output_dir = DEFAULT_OUTPUT_DIR
# Use the final combined context as source text
source_text = research_data.get("context", "")
if not source_text:
logger.warning("No source text found in research results - skipping evaluation")
eval_result = {
"input": research_data["query"],
"output": research_data["report"],
"source": "No source text available",
"is_hallucination": None,
"confidence_score": None,
"reasoning": "Evaluation skipped - no source text available for verification"
}
else:
# Evaluate the research report for hallucination
eval_result = self.hallucination_evaluator.evaluate_response(
model_output=research_data["report"],
source_text=source_text
)
# Save to output directory
os.makedirs(output_dir, exist_ok=True)
# Append to evaluation records
records_file = Path(output_dir) / "evaluation_records.jsonl"
with open(records_file, "a") as f:
f.write(json.dumps(eval_result) + "\n")
return eval_result
async def main(num_queries: int = 5, output_dir: str = DEFAULT_OUTPUT_DIR):
"""
Run evaluation on a sample of queries.
Args:
num_queries: Number of queries to evaluate
output_dir: Directory to save results
"""
evaluator = ResearchEvaluator()
# Load and sample queries
queries = evaluator.load_queries(num_queries)
logger.info(f"Selected {len(queries)} queries for evaluation")
# Run research and evaluation for each query
all_results = []
total_hallucinated = 0
total_responses = 0
total_evaluated = 0
for query in queries:
try:
logger.info(f"Processing query: {query}")
# Run research
research_data = await evaluator.run_research(query)
# Evaluate results
eval_results = evaluator.evaluate_research(
research_data,
output_dir=output_dir
)
all_results.append(eval_results)
# Update counters
total_responses += 1
if eval_results["is_hallucination"] is not None:
total_evaluated += 1
if eval_results["is_hallucination"]:
total_hallucinated += 1
except Exception as e:
logger.error(f"Error processing query '{query}': {str(e)}")
continue
# Calculate hallucination rate
hallucination_rate = (total_hallucinated / total_evaluated) if total_evaluated > 0 else None
# Save aggregate results
aggregate_results = {
"total_queries": len(queries),
"successful_queries": len(all_results),
"total_responses": total_responses,
"total_evaluated": total_evaluated,
"total_hallucinated": total_hallucinated,
"hallucination_rate": hallucination_rate,
"results": all_results
}
aggregate_file = Path(output_dir) / "aggregate_results.json"
with open(aggregate_file, "w") as f:
json.dump(aggregate_results, f, indent=2)
logger.info(f"Saved aggregate results to {aggregate_file}")
# Print summary
print("\n=== Evaluation Summary ===")
print(f"Queries processed: {len(queries)}")
print(f"Responses evaluated: {total_evaluated}")
print(f"Responses skipped (no source text): {total_responses - total_evaluated}")
if hallucination_rate is not None:
print(f"Hallucination rate: {hallucination_rate * 100:.1f}%")
else:
print("No responses could be evaluated due to missing source text")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run GPT-Researcher evaluation")
parser.add_argument("-n", "--num-queries", type=int, default=5,
help="Number of queries to evaluate")
parser.add_argument("-o", "--output-dir", type=str, default=DEFAULT_OUTPUT_DIR,
help="Directory to save results")
args = parser.parse_args()
asyncio.run(main(args.num_queries, args.output_dir)) | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "evals/hallucination_eval/run_eval.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:gpt_researcher/mcp/client.py | """
MCP Client Management Module
Handles MCP client creation, configuration conversion, and connection management.
"""
import asyncio
import logging
from typing import List, Dict, Any, Optional
try:
from langchain_mcp_adapters.client import MultiServerMCPClient
HAS_MCP_ADAPTERS = True
except ImportError:
HAS_MCP_ADAPTERS = False
logger = logging.getLogger(__name__)
class MCPClientManager:
"""
Manages MCP client lifecycle and configuration.
Responsible for:
- Converting GPT Researcher MCP configs to langchain format
- Creating and managing MultiServerMCPClient instances
- Handling client cleanup and resource management
"""
def __init__(self, mcp_configs: List[Dict[str, Any]]):
"""
Initialize the MCP client manager.
Args:
mcp_configs: List of MCP server configurations from GPT Researcher
"""
self.mcp_configs = mcp_configs or []
self._client = None
self._client_lock = asyncio.Lock()
def convert_configs_to_langchain_format(self) -> Dict[str, Dict[str, Any]]:
"""
Convert GPT Researcher MCP configs to langchain-mcp-adapters format.
Returns:
Dict[str, Dict[str, Any]]: Server configurations for MultiServerMCPClient
"""
server_configs = {}
for i, config in enumerate(self.mcp_configs):
# Generate server name
server_name = config.get("name", f"mcp_server_{i+1}")
# Build the server config
server_config = {}
# Auto-detect transport type from URL if provided
connection_url = config.get("connection_url")
if connection_url:
if connection_url.startswith(("wss://", "ws://")):
server_config["transport"] = "websocket"
server_config["url"] = connection_url
elif connection_url.startswith(("https://", "http://")):
server_config["transport"] = "streamable_http"
server_config["url"] = connection_url
else:
# Fallback to specified connection_type or stdio
connection_type = config.get("connection_type", "stdio")
server_config["transport"] = connection_type
if connection_type in ["websocket", "streamable_http", "http"]:
server_config["url"] = connection_url
else:
# No URL provided, use stdio (default) or specified connection_type
connection_type = config.get("connection_type", "stdio")
server_config["transport"] = connection_type
# Handle stdio transport configuration
if server_config.get("transport") == "stdio":
if config.get("command"):
server_config["command"] = config["command"]
# Handle server_args
server_args = config.get("args", [])
if isinstance(server_args, str):
server_args = server_args.split()
server_config["args"] = server_args
# Handle environment variables
server_env = config.get("env", {})
if server_env:
server_config["env"] = server_env
# Add authentication if provided
if config.get("connection_token"):
server_config["token"] = config["connection_token"]
server_configs[server_name] = server_config
return server_configs
async def get_or_create_client(self) -> Optional[object]:
"""
Get or create a MultiServerMCPClient with proper lifecycle management.
Returns:
MultiServerMCPClient: The client instance or None if creation fails
"""
async with self._client_lock:
if self._client is not None:
return self._client
if not HAS_MCP_ADAPTERS:
logger.error("langchain-mcp-adapters not installed")
return None
if not self.mcp_configs:
logger.error("No MCP server configurations found")
return None
try:
# Convert configs to langchain format
server_configs = self.convert_configs_to_langchain_format()
logger.info(f"Creating MCP client for {len(server_configs)} server(s)")
# Initialize the MultiServerMCPClient
self._client = MultiServerMCPClient(server_configs)
return self._client
except Exception as e:
logger.error(f"Error creating MCP client: {e}")
return None
async def close_client(self):
"""
Properly close the MCP client and clean up resources.
"""
async with self._client_lock:
if self._client is not None:
try:
# Since MultiServerMCPClient doesn't support context manager
# or explicit close methods in langchain-mcp-adapters 0.1.0,
# we just clear the reference and let garbage collection handle it
logger.debug("Releasing MCP client reference")
except Exception as e:
logger.error(f"Error during MCP client cleanup: {e}")
finally:
# Always clear the reference
self._client = None
async def get_all_tools(self) -> List:
"""
Get all available tools from MCP servers.
Returns:
List: All available MCP tools
"""
client = await self.get_or_create_client()
if not client:
return []
try:
# Get tools from all servers
all_tools = await client.get_tools()
if all_tools:
logger.info(f"Loaded {len(all_tools)} total tools from MCP servers")
return all_tools
else:
logger.warning("No tools available from MCP servers")
return []
except Exception as e:
logger.error(f"Error getting MCP tools: {e}")
return [] | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/mcp/client.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:gpt_researcher/mcp/research.py | """
MCP Research Execution Skill
Handles research execution using selected MCP tools as a skill component.
"""
import asyncio
import logging
from typing import List, Dict, Any
logger = logging.getLogger(__name__)
class MCPResearchSkill:
"""
Handles research execution using selected MCP tools.
Responsible for:
- Executing research with LLM and bound tools
- Processing tool results into standard format
- Managing tool execution and error handling
"""
def __init__(self, cfg, researcher=None):
"""
Initialize the MCP research skill.
Args:
cfg: Configuration object with LLM settings
researcher: Researcher instance for cost tracking
"""
self.cfg = cfg
self.researcher = researcher
async def conduct_research_with_tools(self, query: str, selected_tools: List) -> List[Dict[str, str]]:
"""
Use LLM with bound tools to conduct intelligent research.
Args:
query: Research query
selected_tools: List of selected MCP tools
Returns:
List[Dict[str, str]]: Research results in standard format
"""
if not selected_tools:
logger.warning("No tools available for research")
return []
logger.info(f"Conducting research using {len(selected_tools)} selected tools")
try:
from ..llm_provider.generic.base import GenericLLMProvider
# Create LLM provider using the config
provider_kwargs = {
'model': self.cfg.strategic_llm_model,
**self.cfg.llm_kwargs
}
llm_provider = GenericLLMProvider.from_provider(
self.cfg.strategic_llm_provider,
**provider_kwargs
)
# Bind tools to LLM
llm_with_tools = llm_provider.llm.bind_tools(selected_tools)
# Import here to avoid circular imports
from ..prompts import PromptFamily
# Create research prompt
research_prompt = PromptFamily.generate_mcp_research_prompt(query, selected_tools)
# Create messages
messages = [{"role": "user", "content": research_prompt}]
# Invoke LLM with tools
logger.info("LLM researching with bound tools...")
response = await llm_with_tools.ainvoke(messages)
# Process tool calls and results
research_results = []
# Check if the LLM made tool calls
if hasattr(response, 'tool_calls') and response.tool_calls:
logger.info(f"LLM made {len(response.tool_calls)} tool calls")
# Process each tool call
for i, tool_call in enumerate(response.tool_calls, 1):
tool_name = tool_call.get("name", "unknown")
tool_args = tool_call.get("args", {})
logger.info(f"Executing tool {i}/{len(response.tool_calls)}: {tool_name}")
# Log the tool arguments for transparency
if tool_args:
args_str = ", ".join([f"{k}={v}" for k, v in tool_args.items()])
logger.debug(f"Tool arguments: {args_str}")
try:
# Find the tool by name
tool = next((t for t in selected_tools if t.name == tool_name), None)
if not tool:
logger.warning(f"Tool {tool_name} not found in selected tools")
continue
# Execute the tool
if hasattr(tool, 'ainvoke'):
result = await tool.ainvoke(tool_args)
elif hasattr(tool, 'invoke'):
result = tool.invoke(tool_args)
else:
result = await tool(tool_args) if asyncio.iscoroutinefunction(tool) else tool(tool_args)
# Log the actual tool response for debugging
if result:
result_preview = str(result)[:500] + "..." if len(str(result)) > 500 else str(result)
logger.debug(f"Tool {tool_name} response preview: {result_preview}")
# Process the result
formatted_results = self._process_tool_result(tool_name, result)
research_results.extend(formatted_results)
logger.info(f"Tool {tool_name} returned {len(formatted_results)} formatted results")
# Log details of each formatted result
for j, formatted_result in enumerate(formatted_results):
title = formatted_result.get("title", "No title")
content_preview = formatted_result.get("body", "")[:200] + "..." if len(formatted_result.get("body", "")) > 200 else formatted_result.get("body", "")
logger.debug(f"Result {j+1}: '{title}' - Content: {content_preview}")
else:
logger.warning(f"Tool {tool_name} returned empty result")
except Exception as e:
logger.error(f"Error executing tool {tool_name}: {e}")
continue
# Also include the LLM's own analysis/response as a result
if hasattr(response, 'content') and response.content:
llm_analysis = {
"title": f"LLM Analysis: {query}",
"href": "mcp://llm_analysis",
"body": response.content
}
research_results.append(llm_analysis)
# Log LLM analysis content
analysis_preview = response.content[:300] + "..." if len(response.content) > 300 else response.content
logger.debug(f"LLM Analysis: {analysis_preview}")
logger.info("Added LLM analysis to results")
logger.info(f"Research completed with {len(research_results)} total results")
return research_results
except Exception as e:
logger.error(f"Error in LLM research with tools: {e}")
return []
def _process_tool_result(self, tool_name: str, result: Any) -> List[Dict[str, str]]:
"""
Process tool result into search result format.
Args:
tool_name: Name of the tool that produced the result
result: The tool result
Returns:
List[Dict[str, str]]: Formatted search results
"""
search_results = []
try:
# 1) First: handle MCP result wrapper with structured_content/content
if isinstance(result, dict) and ("structured_content" in result or "content" in result):
search_results = []
# Prefer structured_content when present
structured = result.get("structured_content")
if isinstance(structured, dict):
items = structured.get("results")
if isinstance(items, list):
for i, item in enumerate(items):
if isinstance(item, dict):
search_results.append({
"title": item.get("title", f"Result from {tool_name} #{i+1}"),
"href": item.get("href", item.get("url", f"mcp://{tool_name}/{i}")),
"body": item.get("body", item.get("content", str(item)))
})
# If no items array but structured is dict, treat as single
elif isinstance(structured, dict):
search_results.append({
"title": structured.get("title", f"Result from {tool_name}"),
"href": structured.get("href", structured.get("url", f"mcp://{tool_name}")),
"body": structured.get("body", structured.get("content", str(structured)))
})
# Fallback to content if provided (MCP spec: list of {type: text, text: ...})
if not search_results:
content_field = result.get("content")
if isinstance(content_field, list):
texts = []
for part in content_field:
if isinstance(part, dict):
if part.get("type") == "text" and isinstance(part.get("text"), str):
texts.append(part["text"])
elif "text" in part:
texts.append(str(part.get("text")))
else:
# unknown piece; stringify
texts.append(str(part))
else:
texts.append(str(part))
body_text = "\n\n".join([t for t in texts if t])
elif isinstance(content_field, str):
body_text = content_field
else:
body_text = str(result)
search_results.append({
"title": f"Result from {tool_name}",
"href": f"mcp://{tool_name}",
"body": body_text,
})
return search_results
# 2) If the result is already a list, process each item normally
if isinstance(result, list):
# If the result is already a list, process each item
for i, item in enumerate(result):
if isinstance(item, dict):
# Use the item as is if it has required fields
if "title" in item and ("content" in item or "body" in item):
search_result = {
"title": item.get("title", ""),
"href": item.get("href", item.get("url", f"mcp://{tool_name}/{i}")),
"body": item.get("body", item.get("content", str(item))),
}
search_results.append(search_result)
else:
# Create a search result with a generic title
search_result = {
"title": f"Result from {tool_name}",
"href": f"mcp://{tool_name}/{i}",
"body": str(item),
}
search_results.append(search_result)
# 3) If the result is a dict (non-MCP wrapper), use it as a single search result
elif isinstance(result, dict):
# If the result is a dictionary, use it as a single search result
search_result = {
"title": result.get("title", f"Result from {tool_name}"),
"href": result.get("href", result.get("url", f"mcp://{tool_name}")),
"body": result.get("body", result.get("content", str(result))),
}
search_results.append(search_result)
else:
# For any other type, convert to string and use as a single search result
search_result = {
"title": f"Result from {tool_name}",
"href": f"mcp://{tool_name}",
"body": str(result),
}
search_results.append(search_result)
except Exception as e:
logger.error(f"Error processing tool result from {tool_name}: {e}")
# Fallback: create a basic result
search_result = {
"title": f"Result from {tool_name}",
"href": f"mcp://{tool_name}",
"body": str(result),
}
search_results.append(search_result)
return search_results | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/mcp/research.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:gpt_researcher/mcp/streaming.py | """
MCP Streaming Utilities Module
Handles websocket streaming and logging for MCP operations.
"""
import asyncio
import logging
from typing import Any, Optional
logger = logging.getLogger(__name__)
class MCPStreamer:
"""
Handles streaming output for MCP operations.
Responsible for:
- Streaming logs to websocket
- Synchronous/asynchronous logging
- Error handling in streaming
"""
def __init__(self, websocket=None):
"""
Initialize the MCP streamer.
Args:
websocket: WebSocket for streaming output
"""
self.websocket = websocket
async def stream_log(self, message: str, data: Any = None):
"""Stream a log message to the websocket if available."""
logger.info(message)
if self.websocket:
try:
from ..actions.utils import stream_output
await stream_output(
type="logs",
content="mcp_retriever",
output=message,
websocket=self.websocket,
metadata=data
)
except Exception as e:
logger.error(f"Error streaming log: {e}")
def stream_log_sync(self, message: str, data: Any = None):
"""Synchronous version of stream_log for use in sync contexts."""
logger.info(message)
if self.websocket:
try:
try:
loop = asyncio.get_event_loop()
if loop.is_running():
asyncio.create_task(self.stream_log(message, data))
else:
loop.run_until_complete(self.stream_log(message, data))
except RuntimeError:
logger.debug("Could not stream log: no running event loop")
except Exception as e:
logger.error(f"Error in sync log streaming: {e}")
async def stream_stage_start(self, stage: str, description: str):
"""Stream the start of a research stage."""
await self.stream_log(f"🔧 {stage}: {description}")
async def stream_stage_complete(self, stage: str, result_count: int = None):
"""Stream the completion of a research stage."""
if result_count is not None:
await self.stream_log(f"✅ {stage} completed: {result_count} results")
else:
await self.stream_log(f"✅ {stage} completed")
async def stream_tool_selection(self, selected_count: int, total_count: int):
"""Stream tool selection information."""
await self.stream_log(f"🧠 Using LLM to select {selected_count} most relevant tools from {total_count} available")
async def stream_tool_execution(self, tool_name: str, step: int, total: int):
"""Stream tool execution progress."""
await self.stream_log(f"🔍 Executing tool {step}/{total}: {tool_name}")
async def stream_research_results(self, result_count: int, total_chars: int = None):
"""Stream research results summary."""
if total_chars:
await self.stream_log(f"✅ MCP research completed: {result_count} results obtained ({total_chars:,} chars)")
else:
await self.stream_log(f"✅ MCP research completed: {result_count} results obtained")
async def stream_error(self, error_msg: str):
"""Stream error messages."""
await self.stream_log(f"❌ {error_msg}")
async def stream_warning(self, warning_msg: str):
"""Stream warning messages."""
await self.stream_log(f"⚠️ {warning_msg}")
async def stream_info(self, info_msg: str):
"""Stream informational messages."""
await self.stream_log(f"ℹ️ {info_msg}") | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/mcp/streaming.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:gpt_researcher/mcp/tool_selector.py | """
MCP Tool Selection Module
Handles intelligent tool selection using LLM analysis.
"""
import asyncio
import json
import logging
from typing import List, Dict, Any, Optional
logger = logging.getLogger(__name__)
class MCPToolSelector:
"""
Handles intelligent selection of MCP tools using LLM analysis.
Responsible for:
- Analyzing available tools with LLM
- Selecting the most relevant tools for a query
- Providing fallback selection mechanisms
"""
def __init__(self, cfg, researcher=None):
"""
Initialize the tool selector.
Args:
cfg: Configuration object with LLM settings
researcher: Researcher instance for cost tracking
"""
self.cfg = cfg
self.researcher = researcher
async def select_relevant_tools(self, query: str, all_tools: List, max_tools: int = 3) -> List:
"""
Use LLM to select the most relevant tools for the research query.
Args:
query: Research query
all_tools: List of all available tools
max_tools: Maximum number of tools to select (default: 3)
Returns:
List: Selected tools most relevant for the query
"""
if not all_tools:
return []
if len(all_tools) < max_tools:
max_tools = len(all_tools)
logger.info(f"Using LLM to select {max_tools} most relevant tools from {len(all_tools)} available")
# Create tool descriptions for LLM analysis
tools_info = []
for i, tool in enumerate(all_tools):
tool_info = {
"index": i,
"name": tool.name,
"description": tool.description or "No description available"
}
tools_info.append(tool_info)
# Import here to avoid circular imports
from ..prompts import PromptFamily
# Create prompt for intelligent tool selection
prompt = PromptFamily.generate_mcp_tool_selection_prompt(query, tools_info, max_tools)
try:
# Call LLM for tool selection
response = await self._call_llm_for_tool_selection(prompt)
if not response:
logger.warning("No LLM response for tool selection, using fallback")
return self._fallback_tool_selection(all_tools, max_tools)
# Log a preview of the LLM response for debugging
response_preview = response[:500] + "..." if len(response) > 500 else response
logger.debug(f"LLM tool selection response: {response_preview}")
# Parse LLM response
try:
selection_result = json.loads(response)
except json.JSONDecodeError:
# Try to extract JSON from response
import re
json_match = re.search(r"\{.*\}", response, re.DOTALL)
if json_match:
try:
selection_result = json.loads(json_match.group(0))
except json.JSONDecodeError:
logger.warning("Could not parse extracted JSON, using fallback")
return self._fallback_tool_selection(all_tools, max_tools)
else:
logger.warning("No JSON found in LLM response, using fallback")
return self._fallback_tool_selection(all_tools, max_tools)
selected_tools = []
# Process selected tools
for tool_selection in selection_result.get("selected_tools", []):
tool_index = tool_selection.get("index")
tool_name = tool_selection.get("name", "")
reason = tool_selection.get("reason", "")
relevance_score = tool_selection.get("relevance_score", 0)
if tool_index is not None and 0 <= tool_index < len(all_tools):
selected_tools.append(all_tools[tool_index])
logger.info(f"Selected tool '{tool_name}' (score: {relevance_score}): {reason}")
if len(selected_tools) == 0:
logger.warning("No tools selected by LLM, using fallback selection")
return self._fallback_tool_selection(all_tools, max_tools)
# Log the overall selection reasoning
selection_reasoning = selection_result.get("selection_reasoning", "No reasoning provided")
logger.info(f"LLM selection strategy: {selection_reasoning}")
logger.info(f"LLM selected {len(selected_tools)} tools for research")
return selected_tools
except Exception as e:
logger.error(f"Error in LLM tool selection: {e}")
logger.warning("Falling back to pattern-based selection")
return self._fallback_tool_selection(all_tools, max_tools)
async def _call_llm_for_tool_selection(self, prompt: str) -> str:
"""
Call the LLM using the existing create_chat_completion function for tool selection.
Args:
prompt (str): The prompt to send to the LLM.
Returns:
str: The generated text response.
"""
if not self.cfg:
logger.warning("No config available for LLM call")
return ""
try:
from ..utils.llm import create_chat_completion
# Create messages for the LLM
messages = [{"role": "user", "content": prompt}]
# Use the strategic LLM for tool selection (as it's more complex reasoning)
result = await create_chat_completion(
model=self.cfg.strategic_llm_model,
messages=messages,
temperature=0.0, # Low temperature for consistent tool selection
llm_provider=self.cfg.strategic_llm_provider,
llm_kwargs=self.cfg.llm_kwargs,
cost_callback=self.researcher.add_costs if self.researcher and hasattr(self.researcher, 'add_costs') else None,
)
return result
except Exception as e:
logger.error(f"Error calling LLM for tool selection: {e}")
return ""
def _fallback_tool_selection(self, all_tools: List, max_tools: int) -> List:
"""
Fallback tool selection using pattern matching if LLM selection fails.
Args:
all_tools: List of all available tools
max_tools: Maximum number of tools to select
Returns:
List: Selected tools
"""
# Define patterns for research-relevant tools
research_patterns = [
'search', 'get', 'read', 'fetch', 'find', 'list', 'query',
'lookup', 'retrieve', 'browse', 'view', 'show', 'describe'
]
scored_tools = []
for tool in all_tools:
tool_name = tool.name.lower()
tool_description = (tool.description or "").lower()
# Calculate relevance score based on pattern matching
score = 0
for pattern in research_patterns:
if pattern in tool_name:
score += 3
if pattern in tool_description:
score += 1
if score > 0:
scored_tools.append((tool, score))
# Sort by score and take top tools
scored_tools.sort(key=lambda x: x[1], reverse=True)
selected_tools = [tool for tool, score in scored_tools[:max_tools]]
for i, (tool, score) in enumerate(scored_tools[:max_tools]):
logger.info(f"Fallback selected tool {i+1}: {tool.name} (score: {score})")
return selected_tools | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/mcp/tool_selector.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:gpt_researcher/retrievers/mcp/retriever.py | """
MCP-Based Research Retriever
A retriever that uses Model Context Protocol (MCP) tools for intelligent research.
This retriever implements a two-stage approach:
1. Tool Selection: LLM selects 2-3 most relevant tools from all available MCP tools
2. Research Execution: LLM uses the selected tools to conduct intelligent research
"""
import asyncio
import logging
from typing import List, Dict, Any, Optional
try:
from langchain_mcp_adapters.client import MultiServerMCPClient
HAS_MCP_ADAPTERS = True
except ImportError:
HAS_MCP_ADAPTERS = False
from ...mcp.client import MCPClientManager
from ...mcp.tool_selector import MCPToolSelector
from ...mcp.research import MCPResearchSkill
from ...mcp.streaming import MCPStreamer
logger = logging.getLogger(__name__)
class MCPRetriever:
"""
Model Context Protocol (MCP) Retriever for GPT Researcher.
This retriever implements a two-stage approach:
1. Tool Selection: LLM selects 2-3 most relevant tools from all available MCP tools
2. Research Execution: LLM with bound tools conducts intelligent research
This approach is more efficient than calling all tools and provides better,
more targeted research results.
The retriever requires a researcher instance to access:
- mcp_configs: List of MCP server configurations
- cfg: Configuration object with LLM settings and parameters
- add_costs: Method for tracking research costs
"""
def __init__(
self,
query: str,
headers: Optional[Dict[str, str]] = None,
query_domains: Optional[List[str]] = None,
websocket=None,
researcher=None,
**kwargs
):
"""
Initialize the MCP Retriever.
Args:
query (str): The search query string.
headers (dict, optional): Headers containing MCP configuration.
query_domains (list, optional): List of domains to search (not used in MCP).
websocket: WebSocket for stream logging.
researcher: Researcher instance containing mcp_configs and cfg.
**kwargs: Additional arguments (for compatibility).
"""
self.query = query
self.headers = headers or {}
self.query_domains = query_domains or []
self.websocket = websocket
self.researcher = researcher
# Extract mcp_configs and config from the researcher instance
self.mcp_configs = self._get_mcp_configs()
self.cfg = self._get_config()
# Initialize modular components
self.client_manager = MCPClientManager(self.mcp_configs)
self.tool_selector = MCPToolSelector(self.cfg, self.researcher)
self.mcp_researcher = MCPResearchSkill(self.cfg, self.researcher)
self.streamer = MCPStreamer(self.websocket)
# Initialize caching
self._all_tools_cache = None
# Log initialization
if self.mcp_configs:
self.streamer.stream_log_sync(f"🔧 Initializing MCP retriever for query: {self.query}")
self.streamer.stream_log_sync(f"🔧 Found {len(self.mcp_configs)} MCP server configurations")
else:
logger.error("No MCP server configurations found. The retriever will fail during search.")
self.streamer.stream_log_sync("❌ CRITICAL: No MCP server configurations found. Please check documentation.")
def _get_mcp_configs(self) -> List[Dict[str, Any]]:
"""
Get MCP configurations from the researcher instance.
Returns:
List[Dict[str, Any]]: List of MCP server configurations.
"""
if self.researcher and hasattr(self.researcher, 'mcp_configs'):
return self.researcher.mcp_configs or []
return []
def _get_config(self):
"""
Get configuration from the researcher instance.
Returns:
Config: Configuration object with LLM settings.
"""
if self.researcher and hasattr(self.researcher, 'cfg'):
return self.researcher.cfg
# If no config available, this is a critical error
logger.error("No config found in researcher instance. MCPRetriever requires a researcher instance with cfg attribute.")
raise ValueError("MCPRetriever requires a researcher instance with cfg attribute containing LLM configuration")
async def search_async(self, max_results: int = 10) -> List[Dict[str, str]]:
"""
Perform an async search using MCP tools with intelligent two-stage approach.
Args:
max_results: Maximum number of results to return.
Returns:
List[Dict[str, str]]: The search results.
"""
# Check if we have any server configurations
if not self.mcp_configs:
error_msg = "No MCP server configurations available. Please provide mcp_configs parameter to GPTResearcher."
logger.error(error_msg)
await self.streamer.stream_error("MCP retriever cannot proceed without server configurations.")
return [] # Return empty instead of raising to allow research to continue
# Log to help debug the integration flow
logger.info(f"MCPRetriever.search_async called for query: {self.query}")
try:
# Stage 1: Get all available tools
await self.streamer.stream_stage_start("Stage 1", "Getting all available MCP tools")
all_tools = await self._get_all_tools()
if not all_tools:
await self.streamer.stream_warning("No MCP tools available, skipping MCP research")
return []
# Stage 2: Select most relevant tools
await self.streamer.stream_stage_start("Stage 2", "Selecting most relevant tools")
selected_tools = await self.tool_selector.select_relevant_tools(self.query, all_tools, max_tools=3)
if not selected_tools:
await self.streamer.stream_warning("No relevant tools selected, skipping MCP research")
return []
# Stage 3: Conduct research with selected tools
await self.streamer.stream_stage_start("Stage 3", "Conducting research with selected tools")
results = await self.mcp_researcher.conduct_research_with_tools(self.query, selected_tools)
# Limit the number of results
if len(results) > max_results:
logger.info(f"Limiting {len(results)} MCP results to {max_results}")
results = results[:max_results]
# Log result summary with actual content samples
logger.info(f"MCPRetriever returning {len(results)} results")
# Calculate total content length for summary
total_content_length = sum(len(result.get("body", "")) for result in results)
await self.streamer.stream_research_results(len(results), total_content_length)
# Log detailed content samples for debugging
if results:
# Show samples of the first few results
for i, result in enumerate(results[:3]): # Show first 3 results
title = result.get("title", "No title")
url = result.get("href", "No URL")
content = result.get("body", "")
content_length = len(content)
content_sample = content[:400] + "..." if len(content) > 400 else content
logger.debug(f"Result {i+1}/{len(results)}: '{title}'")
logger.debug(f"URL: {url}")
logger.debug(f"Content ({content_length:,} chars): {content_sample}")
if len(results) > 3:
remaining_results = len(results) - 3
remaining_content = sum(len(result.get("body", "")) for result in results[3:])
logger.debug(f"... and {remaining_results} more results ({remaining_content:,} chars)")
return results
except Exception as e:
logger.error(f"Error in MCP search: {e}")
await self.streamer.stream_error(f"Error in MCP search: {str(e)}")
return []
finally:
# Ensure client cleanup after search completes
try:
await self.client_manager.close_client()
except Exception as e:
logger.error(f"Error during client cleanup: {e}")
def search(self, max_results: int = 10) -> List[Dict[str, str]]:
"""
Perform a search using MCP tools with intelligent two-stage approach.
This is the synchronous interface required by GPT Researcher.
It wraps the async search_async method.
Args:
max_results: Maximum number of results to return.
Returns:
List[Dict[str, str]]: The search results.
"""
# Check if we have any server configurations
if not self.mcp_configs:
error_msg = "No MCP server configurations available. Please provide mcp_configs parameter to GPTResearcher."
logger.error(error_msg)
self.streamer.stream_log_sync("❌ MCP retriever cannot proceed without server configurations.")
return [] # Return empty instead of raising to allow research to continue
# Log to help debug the integration flow
logger.info(f"MCPRetriever.search called for query: {self.query}")
try:
# Handle the async/sync boundary properly
try:
# Try to get the current event loop
loop = asyncio.get_running_loop()
# If we're in an async context, we need to schedule the coroutine
# This is a bit tricky - we'll create a task and let it run
import concurrent.futures
import threading
# Create a new event loop in a separate thread
def run_in_thread():
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result = new_loop.run_until_complete(self.search_async(max_results))
return result
finally:
# Enhanced cleanup procedure for MCP connections
try:
# Cancel all pending tasks with a timeout
pending = asyncio.all_tasks(new_loop)
for task in pending:
task.cancel()
# Wait for cancelled tasks to complete with timeout
if pending:
try:
new_loop.run_until_complete(
asyncio.wait_for(
asyncio.gather(*pending, return_exceptions=True),
timeout=5.0 # 5 second timeout for cleanup
)
)
except asyncio.TimeoutError:
logger.debug("Timeout during task cleanup, continuing...")
except Exception:
pass # Ignore other cleanup errors
except Exception:
pass # Ignore cleanup errors
finally:
try:
# Give the loop a moment to finish any final cleanup
import time
time.sleep(0.1)
# Force garbage collection to clean up any remaining references
import gc
gc.collect()
# Additional time for HTTP clients to finish their cleanup
time.sleep(0.2)
# Close the loop
if not new_loop.is_closed():
new_loop.close()
except Exception:
pass # Ignore close errors
# Run in a thread pool to avoid blocking the main event loop
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(run_in_thread)
results = future.result(timeout=300) # 5 minute timeout
except RuntimeError:
# No event loop is running, we can run directly
results = asyncio.run(self.search_async(max_results))
return results
except Exception as e:
logger.error(f"Error in MCP search: {e}")
self.streamer.stream_log_sync(f"❌ Error in MCP search: {str(e)}")
# Return empty results instead of raising to allow research to continue
return []
async def _get_all_tools(self) -> List:
"""
Get all available tools from MCP servers.
Returns:
List: All available MCP tools
"""
if self._all_tools_cache is not None:
return self._all_tools_cache
try:
all_tools = await self.client_manager.get_all_tools()
if all_tools:
await self.streamer.stream_log(f"📋 Loaded {len(all_tools)} total tools from MCP servers")
self._all_tools_cache = all_tools
return all_tools
else:
await self.streamer.stream_warning("No tools available from MCP servers")
return []
except Exception as e:
logger.error(f"Error getting MCP tools: {e}")
await self.streamer.stream_error(f"Error getting MCP tools: {str(e)}")
return [] | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "gpt_researcher/retrievers/mcp/retriever.py",
"license": "Apache License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
assafelovic/gpt-researcher:json_schema_generator.py | import json
from typing import Dict, Any
from pydantic import BaseModel
class UserSchema(BaseModel):
id: int
name: str
email: str
age: int
is_active: bool
def generate_structured_json(schema: BaseModel, data: Dict[str, Any]) -> str:
"""
Generate structured JSON output based on provided schema
Args:
schema: Pydantic model defining the schema structure
data: Dictionary containing the data to be structured
Returns:
str: JSON string with structured data
"""
try:
# Create instance of schema with provided data
structured_data = schema(**data)
# Convert to JSON string
return json.dumps(structured_data.dict(), indent=2)
except Exception as e:
return f"Error generating JSON: {str(e)}"
# Example usage
if __name__ == "__main__":
sample_data = {
"id": 1,
"name": "John Doe",
"email": "john@example.com",
"age": 30,
"is_active": True
}
json_output = generate_structured_json(UserSchema, sample_data)
print("Structured JSON Output:")
print(json_output)
| {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "json_schema_generator.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
assafelovic/gpt-researcher:tests/test_mcp.py | #!/usr/bin/env python3
"""
Test script for MCP integration in GPT Researcher
This script tests two MCP integration scenarios:
1. Web Search MCP (Tavily) - News and general web search queries
2. GitHub MCP - Code repository and technical documentation queries
Both tests verify:
- MCP server connection and tool usage
- Research execution with default optimal settings
- Report generation with MCP data
Prerequisites:
1. Install GPT Researcher: pip install gpt-researcher
2. Install MCP servers:
- Web Search: npm install -g tavily-mcp
- GitHub: npm install -g @modelcontextprotocol/server-github
3. Set up environment variables:
- GITHUB_PERSONAL_ACCESS_TOKEN: Your GitHub Personal Access Token
- OPENAI_API_KEY: Your OpenAI API key
- TAVILY_API_KEY: Your Tavily API key
"""
import asyncio
import os
import logging
from typing import Dict, List, Any
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Get API keys from environment variables
GITHUB_TOKEN = os.environ.get("GITHUB_PERSONAL_ACCESS_TOKEN")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
TAVILY_API_KEY = os.environ.get("TAVILY_API_KEY")
# Test configuration using environment variables
def get_mcp_config():
"""Get MCP configuration with environment variables."""
return [
{
"name": "tavily",
"command": "npx",
"args": ["-y", "tavily-mcp@0.1.2"],
"env": {
"TAVILY_API_KEY": TAVILY_API_KEY
}
}
]
def get_github_mcp_config():
"""Get GitHub MCP configuration with environment variables."""
return [
{
"name": "github",
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {
"GITHUB_PERSONAL_ACCESS_TOKEN": GITHUB_TOKEN
}
}
]
def setup_environment():
"""Validate required environment variables."""
required_vars = {
"GITHUB_PERSONAL_ACCESS_TOKEN": GITHUB_TOKEN,
"OPENAI_API_KEY": OPENAI_API_KEY,
"TAVILY_API_KEY": TAVILY_API_KEY
}
missing_vars = []
for var_name, var_value in required_vars.items():
if not var_value:
missing_vars.append(var_name)
if missing_vars:
print("❌ Missing required environment variables:")
for var in missing_vars:
print(f" • {var}")
print("\nPlease set these environment variables before running the test:")
print(" export GITHUB_PERSONAL_ACCESS_TOKEN='your_github_token'")
print(" export OPENAI_API_KEY='your_openai_key'")
print(" export TAVILY_API_KEY='your_tavily_key'")
return False
print("✅ All required environment variables are set")
return True
async def test_web_search_mcp():
"""Test MCP integration with web search (Tavily) for news and general topics."""
print("\n🌐 Testing Web Search MCP Integration")
print("=" * 50)
try:
from gpt_researcher import GPTResearcher
# Create web search MCP configuration
mcp_configs = get_mcp_config()
# Create researcher with web search query
query = "What is the latest updates in the NBA playoffs?"
researcher = GPTResearcher(
query=query,
mcp_configs=mcp_configs
)
print("✅ GPTResearcher initialized with web search MCP")
print(f"🔧 MCP servers configured: {len(mcp_configs)} (Tavily)")
print(f"📝 Query: {query}")
# Conduct research - should use fast strategy by default
print("🚀 Starting web search research...")
context = await researcher.conduct_research()
print(f"📊 Web search research completed!")
print(f"📈 Context collected: {len(str(context)) if context else 0} chars")
# Generate a brief report
print("📝 Generating report...")
report = await researcher.write_report()
print(f"✅ Report generated successfully!")
print(f"📄 Report length: {len(report)} characters")
# Save test report
filename = "../test_web_search_mcp_report.md"
with open(filename, "w", encoding="utf-8") as f:
f.write(f"# Test Report: Web Search MCP Integration\n\n")
f.write(f"**Query:** {researcher.query}\n\n")
f.write(f"**MCP Server:** Tavily (Web Search)\n\n")
f.write(f"**Generated Report:**\n\n")
f.write(report)
print(f"💾 Test report saved to: {filename}")
# Print summary
print(f"\n📋 Web Search MCP Test Summary:")
print(f" • News query processed successfully")
print(f" • Context gathered: {len(str(context)):,} chars")
print(f" • Report generated: {len(report):,} chars")
print(f" • Cost: ${researcher.get_costs():.4f}")
print(f" • Saved to: {filename}")
return True
except Exception as e:
print(f"❌ Error in web search MCP test: {e}")
logger.exception("Web search MCP test error:")
return False
async def test_github_mcp():
"""Test MCP integration with GitHub for code-related queries."""
print("\n🐙 Testing GitHub MCP Integration")
print("=" * 50)
try:
from gpt_researcher import GPTResearcher
# Create GitHub MCP configuration
mcp_configs = get_github_mcp_config()
# Create researcher with code-related query
query = "What are the key features and implementation of React's useState hook? How has it evolved in recent versions?"
researcher = GPTResearcher(
query=query,
mcp_configs=mcp_configs
)
print("✅ GPTResearcher initialized with GitHub MCP")
print(f"🔧 MCP servers configured: {len(mcp_configs)} (GitHub)")
print(f"📝 Query: {query}")
# Conduct research - should use fast strategy by default
print("🚀 Starting GitHub code research...")
context = await researcher.conduct_research()
print(f"📊 GitHub research completed!")
print(f"📈 Context collected: {len(str(context)) if context else 0} chars")
# Generate a brief report
print("📝 Generating report...")
report = await researcher.write_report()
print(f"✅ Report generated successfully!")
print(f"📄 Report length: {len(report)} characters")
# Save test report
filename = "../test_github_mcp_report.md"
with open(filename, "w", encoding="utf-8") as f:
f.write(f"# Test Report: GitHub MCP Integration\n\n")
f.write(f"**Query:** {researcher.query}\n\n")
f.write(f"**MCP Server:** GitHub (Code Repository)\n\n")
f.write(f"**Generated Report:**\n\n")
f.write(report)
print(f"💾 Test report saved to: {filename}")
# Print summary
print(f"\n📋 GitHub MCP Test Summary:")
print(f" • Code query processed successfully")
print(f" • Context gathered: {len(str(context)):,} chars")
print(f" • Report generated: {len(report):,} chars")
print(f" • Cost: ${researcher.get_costs():.4f}")
print(f" • Saved to: {filename}")
return True
except Exception as e:
print(f"❌ Error in GitHub MCP test: {e}")
logger.exception("GitHub MCP test error:")
return False
async def main():
"""Main test function."""
print("🚀 Testing MCP Integration with GPT Researcher")
print("=" * 50)
# Check environment setup
if not setup_environment():
print("\n❌ Environment setup failed. Please check your configuration.")
return
print("✅ Environment setup complete")
# Track test results
test_results = []
# Run Web Search MCP test
print("\n🌐 Running Web Search MCP Test (Tavily)")
result1 = await test_web_search_mcp()
test_results.append(("Web Search MCP", result1))
# Run GitHub MCP test
print("\n🐙 Running GitHub MCP Test")
result2 = await test_github_mcp()
test_results.append(("GitHub MCP", result2))
# Summary
print("\n📊 Test Results Summary")
print("=" * 30)
passed = 0
total = len(test_results)
for test_name, passed_test in test_results:
status = "✅ PASSED" if passed_test else "❌ FAILED"
print(f" {test_name}: {status}")
if passed_test:
passed += 1
print(f"\nOverall: {passed}/{total} tests passed")
if passed == total:
print("🎉 All MCP integration tests completed successfully!")
print("⚡ Both Web Search (news) and GitHub (code) MCP servers work seamlessly!")
else:
print("⚠️ Some tests failed. Check the output above for details.")
if __name__ == "__main__":
print("🔧 MCP Integration Tests")
print("=" * 30)
print("Testing Web Search (Tavily) and GitHub MCP integrations with optimal default settings.")
print()
asyncio.run(main()) | {
"repo_id": "assafelovic/gpt-researcher",
"file_path": "tests/test_mcp.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
binary-husky/gpt_academic:crazy_functions/review_fns/prompts/adsabs_prompts.py | # ADS query optimization prompt
ADSABS_QUERY_PROMPT = """Analyze and optimize the following query for NASA ADS search.
If the query is not related to astronomy, astrophysics, or physics, return <query>none</query>.
If the query contains non-English terms, translate them to English first.
Query: {query}
Task: Transform the natural language query into an optimized ADS search query.
Always generate English search terms regardless of the input language.
IMPORTANT: Ignore any requirements about journal ranking (CAS, JCR, IF index),
or output format requirements. Focus only on the core research topic for the search query.
Relevant research areas for ADS:
- Astronomy and astrophysics
- Physics (theoretical and experimental)
- Space science and exploration
- Planetary science
- Cosmology
- Astrobiology
- Related instrumentation and methods
Available search fields and filters:
1. Basic fields:
- title: Search in title (title:"term")
- abstract: Search in abstract (abstract:"term")
- author: Search for author names (author:"lastname, firstname")
- year: Filter by year (year:2020-2023)
- bibstem: Search by journal abbreviation (bibstem:ApJ)
2. Boolean operators:
- AND
- OR
- NOT
- (): Group terms
- "": Exact phrase match
3. Special filters:
- citations(identifier:paper): Papers citing a specific paper
- references(identifier:paper): References of a specific paper
- citation_count: Filter by citation count
- database: Filter by database (database:astronomy)
Examples:
1. Query: "Black holes in galaxy centers after 2020"
<query>title:"black hole" AND abstract:"galaxy center" AND year:2020-</query>
2. Query: "Papers by Neil deGrasse Tyson about exoplanets"
<query>author:"Tyson, Neil deGrasse" AND title:exoplanet</query>
3. Query: "Most cited papers about dark matter in ApJ"
<query>title:"dark matter" AND bibstem:ApJ AND citation_count:[100 TO *]</query>
4. Query: "Latest research on diabetes treatment"
<query>none</query>
5. Query: "Machine learning for galaxy classification"
<query>title:("machine learning" OR "deep learning") AND (title:galaxy OR abstract:galaxy) AND abstract:classification</query>
Please analyze the query and respond ONLY with XML tags:
<query>Provide the optimized ADS search query using appropriate fields and operators, or "none" if not relevant</query>"""
# System prompt
ADSABS_QUERY_SYSTEM_PROMPT = """You are an expert at crafting NASA ADS search queries.
Your task is to:
1. First determine if the query is relevant to astronomy, astrophysics, or physics research
2. If relevant, optimize the natural language query for the ADS API
3. If not relevant, return "none" to indicate the query should be handled by other databases
Focus on creating precise queries that will return relevant astronomical and physics literature.
Always generate English search terms regardless of the input language.
Consider using field-specific search terms and appropriate filters to improve search accuracy.
Remember: ADS is specifically for astronomy, astrophysics, and physics research.
Medical, biological, or general research queries should return "none"."""
| {
"repo_id": "binary-husky/gpt_academic",
"file_path": "crazy_functions/review_fns/prompts/adsabs_prompts.py",
"license": "GNU General Public License v3.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
binary-husky/gpt_academic:crazy_functions/review_fns/prompts/arxiv_prompts.py | # Basic type analysis prompt
ARXIV_TYPE_PROMPT = """Analyze the research query and determine if arXiv search is needed and its type.
Query: {query}
Task 1: Determine if this query requires arXiv search
- arXiv is suitable for:
* Computer science and AI/ML
* Physics and mathematics
* Quantitative biology and finance
* Electrical engineering
* Recent preprints in these fields
- arXiv is NOT needed for:
* Medical research (unless ML/AI applications)
* Social sciences
* Business studies
* Humanities
* Industry reports
Task 2: If arXiv search is needed, determine the most appropriate search type
Available types:
1. basic: Keyword-based search across all fields
- For specific technical queries
- When looking for particular methods or applications
2. category: Category-based search within specific fields
- For broad topic exploration
- When surveying a research area
3. none: arXiv search not needed for this query
- When topic is outside arXiv's scope
- For non-technical or clinical research
Examples:
1. Query: "BERT transformer architecture"
<search_type>basic</search_type>
2. Query: "latest developments in machine learning"
<search_type>category</search_type>
3. Query: "COVID-19 clinical trials"
<search_type>none</search_type>
4. Query: "psychological effects of social media"
<search_type>none</search_type>
Please analyze the query and respond ONLY with XML tags:
<search_type>Choose either 'basic', 'category', or 'none'</search_type>"""
# Query optimization prompt
ARXIV_QUERY_PROMPT = """Optimize the following query for arXiv search.
Query: {query}
Task: Transform the natural language query into an optimized arXiv search query using boolean operators and field tags.
Always generate English search terms regardless of the input language.
IMPORTANT: Ignore any requirements about journal ranking (CAS, JCR, IF index),
or output format requirements. Focus only on the core research topic for the search query.
Available field tags:
- ti: Search in title
- abs: Search in abstract
- au: Search for author
- all: Search in all fields (default)
Boolean operators:
- AND: Both terms must appear
- OR: Either term can appear
- NOT: Exclude terms
- (): Group terms
- "": Exact phrase match
Examples:
1. Natural query: "Recent papers about transformer models by Vaswani"
<query>ti:"transformer model" AND au:Vaswani AND year:[2017 TO 2024]</query>
2. Natural query: "Deep learning for computer vision, excluding surveys"
<query>ti:(deep learning AND "computer vision") NOT (ti:survey OR ti:review)</query>
3. Natural query: "Attention mechanism in language models"
<query>ti:(attention OR "attention mechanism") AND abs:"language model"</query>
4. Natural query: "GANs or generative adversarial networks for image generation"
<query>(ti:GAN OR ti:"generative adversarial network") AND abs:"image generation"</query>
Please analyze the query and respond ONLY with XML tags:
<query>Provide the optimized search query using appropriate operators and tags</query>
Note:
- Use quotes for exact phrases
- Combine multiple conditions with boolean operators
- Consider both title and abstract for important concepts
- Include author names when relevant
- Use parentheses for complex logical groupings"""
# Sort parameters prompt
ARXIV_SORT_PROMPT = """Determine optimal sorting parameters for the research query.
Query: {query}
Task: Select the most appropriate sorting parameters to help users find the most relevant papers.
Available sorting options:
1. Sort by:
- relevance: Best match to query terms (default)
- lastUpdatedDate: Most recently updated papers
- submittedDate: Most recently submitted papers
2. Sort order:
- descending: Newest/Most relevant first (default)
- ascending: Oldest/Least relevant first
3. Result limit:
- Minimum: 10 papers
- Maximum: 50 papers
- Recommended: 20-30 papers for most queries
Examples:
1. Query: "Latest developments in transformer models"
<sort_by>submittedDate</sort_by>
<sort_order>descending</sort_order>
<limit>30</limit>
2. Query: "Foundational papers about neural networks"
<sort_by>relevance</sort_by>
<sort_order>descending</sort_order>
<limit>20</limit>
3. Query: "Evolution of deep learning since 2012"
<sort_by>submittedDate</sort_by>
<sort_order>ascending</sort_order>
<limit>50</limit>
Please analyze the query and respond ONLY with XML tags:
<sort_by>Choose: relevance, lastUpdatedDate, or submittedDate</sort_by>
<sort_order>Choose: ascending or descending</sort_order>
<limit>Suggest number between 10-50</limit>
Note:
- Choose relevance for specific technical queries
- Use lastUpdatedDate for tracking paper revisions
- Use submittedDate for following recent developments
- Consider query context when setting the limit"""
# System prompts for each task
ARXIV_TYPE_SYSTEM_PROMPT = """You are an expert at analyzing academic queries.
Your task is to determine whether the query is better suited for keyword search or category-based search.
Consider the query's specificity, scope, and intended search area when making your decision.
Always respond in English regardless of the input language."""
ARXIV_QUERY_SYSTEM_PROMPT = """You are an expert at crafting arXiv search queries.
Your task is to optimize natural language queries using boolean operators and field tags.
Focus on creating precise, targeted queries that will return the most relevant results.
Always generate English search terms regardless of the input language."""
ARXIV_CATEGORIES_SYSTEM_PROMPT = """You are an expert at arXiv category classification.
Your task is to select the most relevant categories for the given research query.
Consider both primary and related interdisciplinary categories, while maintaining focus on the main research area.
Always respond in English regardless of the input language."""
ARXIV_SORT_SYSTEM_PROMPT = """You are an expert at optimizing search results.
Your task is to determine the best sorting parameters based on the query context.
Consider the user's likely intent and temporal aspects of the research topic.
Always respond in English regardless of the input language."""
# 添加新的搜索提示词
ARXIV_SEARCH_PROMPT = """Analyze and optimize the research query for arXiv search.
Query: {query}
Task: Transform the natural language query into an optimized arXiv search query.
Available search options:
1. Basic search with field tags:
- ti: Search in title
- abs: Search in abstract
- au: Search for author
Example: "ti:transformer AND abs:attention"
2. Category-based search:
- Use specific arXiv categories
Example: "cat:cs.AI AND neural networks"
3. Date range:
- Specify date range using submittedDate
Example: "deep learning AND submittedDate:[20200101 TO 20231231]"
Examples:
1. Query: "Recent papers about transformer models by Vaswani"
<search_criteria>
<query>ti:"transformer model" AND au:Vaswani AND submittedDate:[20170101 TO 99991231]</query>
<categories>cs.CL, cs.AI, cs.LG</categories>
<sort_by>submittedDate</sort_by>
<sort_order>descending</sort_order>
<limit>30</limit>
</search_criteria>
2. Query: "Latest developments in computer vision"
<search_criteria>
<query>cat:cs.CV AND submittedDate:[20220101 TO 99991231]</query>
<categories>cs.CV, cs.AI, cs.LG</categories>
<sort_by>submittedDate</sort_by>
<sort_order>descending</sort_order>
<limit>25</limit>
</search_criteria>
Please analyze the query and respond with XML tags containing search criteria."""
ARXIV_SEARCH_SYSTEM_PROMPT = """You are an expert at crafting arXiv search queries.
Your task is to analyze research queries and transform them into optimized arXiv search criteria.
Consider query intent, relevant categories, and temporal aspects when creating the search parameters.
Always generate English search terms and respond in English regardless of the input language."""
# Categories selection prompt
ARXIV_CATEGORIES_PROMPT = """Select the most relevant arXiv categories for the research query.
Query: {query}
Task: Choose 2-4 most relevant categories that best match the research topic.
Available Categories:
Computer Science (cs):
- cs.AI: Artificial Intelligence (neural networks, machine learning, NLP)
- cs.CL: Computation and Language (NLP, machine translation)
- cs.CV: Computer Vision and Pattern Recognition
- cs.LG: Machine Learning (deep learning, reinforcement learning)
- cs.NE: Neural and Evolutionary Computing
- cs.RO: Robotics
- cs.IR: Information Retrieval
- cs.SE: Software Engineering
- cs.DB: Databases
- cs.DC: Distributed Computing
- cs.CY: Computers and Society
- cs.HC: Human-Computer Interaction
Mathematics (math):
- math.OC: Optimization and Control
- math.PR: Probability
- math.ST: Statistics
- math.NA: Numerical Analysis
- math.DS: Dynamical Systems
Statistics (stat):
- stat.ML: Machine Learning
- stat.ME: Methodology
- stat.TH: Theory
- stat.AP: Applications
Physics (physics):
- physics.comp-ph: Computational Physics
- physics.data-an: Data Analysis
- physics.soc-ph: Physics and Society
Electrical Engineering (eess):
- eess.SP: Signal Processing
- eess.AS: Audio and Speech Processing
- eess.IV: Image and Video Processing
- eess.SY: Systems and Control
Examples:
1. Query: "Deep learning for computer vision"
<categories>cs.CV, cs.LG, stat.ML</categories>
2. Query: "Natural language processing with transformers"
<categories>cs.CL, cs.AI, cs.LG</categories>
3. Query: "Reinforcement learning for robotics"
<categories>cs.RO, cs.AI, cs.LG</categories>
4. Query: "Statistical methods in machine learning"
<categories>stat.ML, cs.LG, math.ST</categories>
Please analyze the query and respond ONLY with XML tags:
<categories>List 2-4 most relevant categories, comma-separated</categories>
Note:
- Choose primary categories first, then add related ones
- Limit to 2-4 most relevant categories
- Order by relevance (most relevant first)
- Use comma and space between categories (e.g., "cs.AI, cs.LG")"""
# 在文件末尾添加新的 prompt
ARXIV_LATEST_PROMPT = """Determine if the query is requesting latest papers from arXiv.
Query: {query}
Task: Analyze if the query is specifically asking for recent/latest papers from arXiv.
IMPORTANT RULE:
- The query MUST explicitly mention "arXiv" or "arxiv" to be considered a latest arXiv papers request
- Queries only asking for recent/latest papers WITHOUT mentioning arXiv should return false
Indicators for latest papers request:
1. MUST HAVE keywords about arXiv:
- "arxiv"
- "arXiv"
AND
2. Keywords about recency:
- "latest"
- "recent"
- "new"
- "newest"
- "just published"
- "this week/month"
Examples:
1. Latest papers request (Valid):
Query: "Show me the latest AI papers on arXiv"
<is_latest_request>true</is_latest_request>
2. Latest papers request (Valid):
Query: "What are the recent papers about transformers on arxiv"
<is_latest_request>true</is_latest_request>
3. Not a latest papers request (Invalid - no mention of arXiv):
Query: "Show me the latest papers about BERT"
<is_latest_request>false</is_latest_request>
4. Not a latest papers request (Invalid - no recency):
Query: "Find papers on arxiv about transformers"
<is_latest_request>false</is_latest_request>
Please analyze the query and respond ONLY with XML tags:
<is_latest_request>true/false</is_latest_request>
Note: The response should be true ONLY if both conditions are met:
1. Query explicitly mentions arXiv/arxiv
2. Query asks for recent/latest papers"""
ARXIV_LATEST_SYSTEM_PROMPT = """You are an expert at analyzing academic queries.
Your task is to determine if the query is specifically requesting latest/recent papers from arXiv.
Remember: The query MUST explicitly mention arXiv to be considered valid, even if it asks for recent papers.
Always respond in English regardless of the input language."""
| {
"repo_id": "binary-husky/gpt_academic",
"file_path": "crazy_functions/review_fns/prompts/arxiv_prompts.py",
"license": "GNU General Public License v3.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
binary-husky/gpt_academic:crazy_functions/review_fns/prompts/crossref_prompts.py | # Crossref query optimization prompt
CROSSREF_QUERY_PROMPT = """Analyze and optimize the query for Crossref search.
Query: {query}
Task: Transform the natural language query into an optimized Crossref search query.
Always generate English search terms regardless of the input language.
IMPORTANT: Ignore any requirements about journal ranking (CAS, JCR, IF index),
or output format requirements. Focus only on the core research topic for the search query.
Available search fields and filters:
1. Basic fields:
- title: Search in title
- abstract: Search in abstract
- author: Search for author names
- container-title: Search in journal/conference name
- publisher: Search by publisher name
- type: Filter by work type (journal-article, book-chapter, etc.)
- year: Filter by publication year
2. Boolean operators:
- AND: Both terms must appear
- OR: Either term can appear
- NOT: Exclude terms
- "": Exact phrase match
3. Special filters:
- is-referenced-by-count: Filter by citation count
- from-pub-date: Filter by publication date
- has-abstract: Filter papers with abstracts
Examples:
1. Query: "Machine learning in healthcare after 2020"
<query>title:"machine learning" AND title:healthcare AND from-pub-date:2020</query>
2. Query: "Papers by Geoffrey Hinton about deep learning"
<query>author:"Hinton, Geoffrey" AND (title:"deep learning" OR abstract:"deep learning")</query>
3. Query: "Most cited papers about transformers in Nature"
<query>title:transformer AND container-title:Nature AND is-referenced-by-count:[100 TO *]</query>
4. Query: "Recent BERT applications in medical domain"
<query>title:BERT AND abstract:medical AND from-pub-date:2020 AND type:journal-article</query>
Please analyze the query and respond ONLY with XML tags:
<query>Provide the optimized Crossref search query using appropriate fields and operators</query>"""
# System prompt
CROSSREF_QUERY_SYSTEM_PROMPT = """You are an expert at crafting Crossref search queries.
Your task is to optimize natural language queries for Crossref's API.
Focus on creating precise queries that will return relevant results.
Always generate English search terms regardless of the input language.
Consider using field-specific search terms and appropriate filters to improve search accuracy."""
| {
"repo_id": "binary-husky/gpt_academic",
"file_path": "crazy_functions/review_fns/prompts/crossref_prompts.py",
"license": "GNU General Public License v3.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
binary-husky/gpt_academic:crazy_functions/review_fns/prompts/pubmed_prompts.py | # PubMed search type prompt
PUBMED_TYPE_PROMPT = """Analyze the research query and determine the appropriate PubMed search type.
Query: {query}
Available search types:
1. basic: General keyword search for medical/biomedical topics
2. author: Search by author name
3. journal: Search within specific journals
4. none: Query not related to medical/biomedical research
Examples:
1. Query: "COVID-19 treatment outcomes"
<search_type>basic</search_type>
2. Query: "Papers by Anthony Fauci"
<search_type>author</search_type>
3. Query: "Recent papers in Nature about CRISPR"
<search_type>journal</search_type>
4. Query: "Deep learning for computer vision"
<search_type>none</search_type>
5. Query: "Transformer architecture for NLP"
<search_type>none</search_type>
Please analyze the query and respond ONLY with XML tags:
<search_type>Choose: basic, author, journal, or none</search_type>"""
# PubMed query optimization prompt
PUBMED_QUERY_PROMPT = """Optimize the following query for PubMed search.
Query: {query}
Task: Transform the natural language query into an optimized PubMed search query.
Requirements:
- Always generate English search terms regardless of input language
- Translate any non-English terms to English before creating the query
- Never include non-English characters in the final query
IMPORTANT: Ignore any requirements about journal ranking (CAS, JCR, IF index),
or output format requirements. Focus only on the core medical/biomedical topic for the search query.
Available field tags:
- [Title] - Search in title
- [Author] - Search for author
- [Journal] - Search in journal name
- [MeSH Terms] - Search using MeSH terms
Boolean operators:
- AND
- OR
- NOT
Examples:
1. Query: "COVID-19 treatment in elderly patients"
<query>COVID-19[Title] AND treatment[Title/Abstract] AND elderly[Title/Abstract]</query>
2. Query: "Cancer immunotherapy review articles"
<query>cancer immunotherapy[Title/Abstract] AND review[Publication Type]</query>
Please analyze the query and respond ONLY with XML tags:
<query>Provide the optimized PubMed search query</query>"""
# PubMed sort parameters prompt
PUBMED_SORT_PROMPT = """Determine optimal sorting parameters for PubMed results.
Query: {query}
Task: Select the most appropriate sorting method and result limit.
Available sort options:
- relevance: Best match to query
- date: Most recent first
- journal: Sort by journal name
Examples:
1. Query: "Latest developments in gene therapy"
<sort_by>date</sort_by>
<limit>30</limit>
2. Query: "Classic papers about DNA structure"
<sort_by>relevance</sort_by>
<limit>20</limit>
Please analyze the query and respond ONLY with XML tags:
<sort_by>Choose: relevance, date, or journal</sort_by>
<limit>Suggest number between 10-50</limit>"""
# System prompts
PUBMED_TYPE_SYSTEM_PROMPT = """You are an expert at analyzing medical and scientific queries.
Your task is to determine the most appropriate PubMed search type.
Consider the query's focus and intended search scope.
Always respond in English regardless of the input language."""
PUBMED_QUERY_SYSTEM_PROMPT = """You are an expert at crafting PubMed search queries.
Your task is to optimize natural language queries using PubMed's search syntax.
Focus on creating precise, targeted queries that will return relevant medical literature.
Always generate English search terms regardless of the input language."""
PUBMED_SORT_SYSTEM_PROMPT = """You are an expert at optimizing PubMed search results.
Your task is to determine the best sorting parameters based on the query context.
Consider the balance between relevance and recency.
Always respond in English regardless of the input language."""
| {
"repo_id": "binary-husky/gpt_academic",
"file_path": "crazy_functions/review_fns/prompts/pubmed_prompts.py",
"license": "GNU General Public License v3.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
binary-husky/gpt_academic:crazy_functions/review_fns/prompts/semantic_prompts.py | # Search type prompt
SEMANTIC_TYPE_PROMPT = """Determine the most appropriate search type for Semantic Scholar.
Query: {query}
Task: Analyze the research query and select the most appropriate search type for Semantic Scholar API.
Available search types:
1. paper: General paper search
- Use for broad topic searches
- Looking for specific papers
- Keyword-based searches
Example: "transformer models in NLP"
2. author: Author-based search
- Finding works by specific researchers
- Author profile analysis
Example: "papers by Yoshua Bengio"
3. paper_details: Specific paper lookup
- Getting details about a known paper
- Finding specific versions or citations
Example: "Attention is All You Need paper details"
4. citations: Citation analysis
- Finding papers that cite a specific work
- Impact analysis
Example: "papers citing BERT"
5. references: Reference analysis
- Finding papers cited by a specific work
- Background research
Example: "references in GPT-3 paper"
6. recommendations: Paper recommendations
- Finding similar papers
- Research direction exploration
Example: "papers similar to Transformer"
Examples:
1. Query: "Latest papers about deep learning"
<search_type>paper</search_type>
2. Query: "Works by Geoffrey Hinton since 2020"
<search_type>author</search_type>
3. Query: "Papers citing the original Transformer paper"
<search_type>citations</search_type>
Please analyze the query and respond ONLY with XML tags:
<search_type>Choose the most appropriate search type from the list above</search_type>"""
# Query optimization prompt
SEMANTIC_QUERY_PROMPT = """Optimize the following query for Semantic Scholar search.
Query: {query}
Task: Transform the natural language query into an optimized search query for maximum relevance.
Always generate English search terms regardless of the input language.
IMPORTANT: Ignore any requirements about journal ranking (CAS, JCR, IF index),
or output format requirements. Focus only on the core research topic for the search query.
Query optimization guidelines:
1. Use quotes for exact phrases
- Ensures exact matching
- Reduces irrelevant results
Example: "\"attention mechanism\"" vs attention mechanism
2. Include key technical terms
- Use specific technical terminology
- Include common variations
Example: "transformer architecture" neural networks
3. Author names (if relevant)
- Include full names when known
- Consider common name variations
Example: "Geoffrey Hinton" OR "G. E. Hinton"
Examples:
1. Natural query: "Recent advances in transformer models"
<query>"transformer model" "neural architecture" deep learning</query>
2. Natural query: "BERT applications in text classification"
<query>"BERT" "text classification" "language model" application</query>
3. Natural query: "Deep learning for computer vision by Kaiming He"
<query>"deep learning" "computer vision" author:"Kaiming He"</query>
Please analyze the query and respond ONLY with XML tags:
<query>Provide the optimized search query</query>
Note:
- Balance between specificity and coverage
- Include important technical terms
- Use quotes for key phrases
- Consider synonyms and related terms"""
# Fields selection prompt
SEMANTIC_FIELDS_PROMPT = """Select relevant fields to retrieve from Semantic Scholar.
Query: {query}
Task: Determine which paper fields should be retrieved based on the research needs.
Available fields:
Core fields:
- title: Paper title (always included)
- abstract: Full paper abstract
- authors: Author information
- year: Publication year
- venue: Publication venue
Citation fields:
- citations: Papers citing this work
- references: Papers cited by this work
Additional fields:
- embedding: Paper vector embedding
- tldr: AI-generated summary
- venue: Publication venue/journal
- url: Paper URL
Examples:
1. Query: "Latest developments in NLP"
<fields>title, abstract, authors, year, venue, citations</fields>
2. Query: "Most influential papers in deep learning"
<fields>title, abstract, authors, year, citations, references</fields>
3. Query: "Survey of transformer architectures"
<fields>title, abstract, authors, year, tldr, references</fields>
Please analyze the query and respond ONLY with XML tags:
<fields>List relevant fields, comma-separated</fields>
Note:
- Choose fields based on the query's purpose
- Include citation data for impact analysis
- Consider tldr for quick paper screening
- Balance completeness with API efficiency"""
# Sort parameters prompt
SEMANTIC_SORT_PROMPT = """Determine optimal sorting parameters for the query.
Query: {query}
Task: Select the most appropriate sorting method and result limit for the search.
Always generate English search terms regardless of the input language.
Sorting options:
1. relevance (default)
- Best match to query terms
- Recommended for specific technical searches
Example: "specific algorithm implementations"
2. citations
- Sort by citation count
- Best for finding influential papers
Example: "most important papers in deep learning"
3. year
- Sort by publication date
- Best for following recent developments
Example: "latest advances in NLP"
Examples:
1. Query: "Recent breakthroughs in AI"
<sort_by>year</sort_by>
<limit>30</limit>
2. Query: "Most influential papers about GANs"
<sort_by>citations</sort_by>
<limit>20</limit>
3. Query: "Specific papers about BERT fine-tuning"
<sort_by>relevance</sort_by>
<limit>25</limit>
Please analyze the query and respond ONLY with XML tags:
<sort_by>Choose: relevance, citations, or year</sort_by>
<limit>Suggest number between 10-50</limit>
Note:
- Consider the query's temporal aspects
- Balance between comprehensive coverage and information overload
- Use citation sorting for impact analysis
- Use year sorting for tracking developments"""
# System prompts for each task
SEMANTIC_TYPE_SYSTEM_PROMPT = """You are an expert at analyzing academic queries.
Your task is to determine the most appropriate type of search on Semantic Scholar.
Consider the query's intent, scope, and specific research needs.
Always respond in English regardless of the input language."""
SEMANTIC_QUERY_SYSTEM_PROMPT = """You are an expert at crafting Semantic Scholar search queries.
Your task is to optimize natural language queries for maximum relevance.
Focus on creating precise queries that leverage the platform's search capabilities.
Always generate English search terms regardless of the input language."""
SEMANTIC_FIELDS_SYSTEM_PROMPT = """You are an expert at Semantic Scholar data fields.
Your task is to select the most relevant fields based on the research context.
Consider both essential and supplementary information needs.
Always respond in English regardless of the input language."""
SEMANTIC_SORT_SYSTEM_PROMPT = """You are an expert at optimizing search results.
Your task is to determine the best sorting parameters based on the query context.
Consider the balance between relevance, impact, and recency.
Always respond in English regardless of the input language."""
# 添加新的综合搜索提示词
SEMANTIC_SEARCH_PROMPT = """Analyze and optimize the research query for Semantic Scholar search.
Query: {query}
Task: Transform the natural language query into optimized search criteria for Semantic Scholar.
IMPORTANT: Ignore any requirements about journal ranking (CAS, JCR, IF index),
or output format requirements when generating the search terms. These requirements
should be considered only for post-search filtering, not as part of the core query.
Available search options:
1. Paper search:
- Title and abstract search
- Author search
- Field-specific search
Example: "transformer architecture neural networks"
2. Field tags:
- title: Search in title
- abstract: Search in abstract
- authors: Search by author names
- venue: Search by publication venue
Example: "title:transformer authors:\"Vaswani\""
3. Advanced options:
- Year range filtering
- Citation count filtering
- Venue filtering
Example: "deep learning year>2020 venue:\"NeurIPS\""
Examples:
1. Query: "Recent transformer papers by Vaswani with high impact"
<search_criteria>
<query>title:transformer authors:"Vaswani" year>2017</query>
<search_type>paper</search_type>
<fields>title,abstract,authors,year,citations,venue</fields>
<sort_by>citations</sort_by>
<limit>30</limit>
</search_criteria>
2. Query: "Most cited papers about BERT in top conferences"
<search_criteria>
<query>title:BERT venue:"ACL|EMNLP|NAACL"</query>
<search_type>paper</search_type>
<fields>title,abstract,authors,year,citations,venue,references</fields>
<sort_by>citations</sort_by>
<limit>25</limit>
</search_criteria>
Please analyze the query and respond with XML tags containing complete search criteria."""
SEMANTIC_SEARCH_SYSTEM_PROMPT = """You are an expert at crafting Semantic Scholar search queries.
Your task is to analyze research queries and transform them into optimized search criteria.
Consider query intent, field relevance, and citation impact when creating the search parameters.
Focus on producing precise and comprehensive search criteria that will yield the most relevant results.
Always generate English search terms and respond in English regardless of the input language."""
| {
"repo_id": "binary-husky/gpt_academic",
"file_path": "crazy_functions/review_fns/prompts/semantic_prompts.py",
"license": "GNU General Public License v3.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
binary-husky/gpt_academic:shared_utils/nltk_downloader.py | # Natural Language Toolkit: Corpus & Model Downloader
#
# Copyright (C) 2001-2023 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
The NLTK corpus and module downloader. This module defines several
interfaces which can be used to download corpora, models, and other
data packages that can be used with NLTK.
Downloading Packages
====================
If called with no arguments, ``download()`` will display an interactive
interface which can be used to download and install new packages.
If Tkinter is available, then a graphical interface will be shown,
otherwise a simple text interface will be provided.
Individual packages can be downloaded by calling the ``download()``
function with a single argument, giving the package identifier for the
package that should be downloaded:
>>> download('treebank') # doctest: +SKIP
[nltk_data] Downloading package 'treebank'...
[nltk_data] Unzipping corpora/treebank.zip.
NLTK also provides a number of \"package collections\", consisting of
a group of related packages. To download all packages in a
colleciton, simply call ``download()`` with the collection's
identifier:
>>> download('all-corpora') # doctest: +SKIP
[nltk_data] Downloading package 'abc'...
[nltk_data] Unzipping corpora/abc.zip.
[nltk_data] Downloading package 'alpino'...
[nltk_data] Unzipping corpora/alpino.zip.
...
[nltk_data] Downloading package 'words'...
[nltk_data] Unzipping corpora/words.zip.
Download Directory
==================
By default, packages are installed in either a system-wide directory
(if Python has sufficient access to write to it); or in the current
user's home directory. However, the ``download_dir`` argument may be
used to specify a different installation target, if desired.
See ``Downloader.default_download_dir()`` for more a detailed
description of how the default download directory is chosen.
NLTK Download Server
====================
Before downloading any packages, the corpus and module downloader
contacts the NLTK download server, to retrieve an index file
describing the available packages. By default, this index file is
loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``.
If necessary, it is possible to create a new ``Downloader`` object,
specifying a different URL for the package index file.
Usage::
python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
or::
python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
"""
# ----------------------------------------------------------------------
"""
0 1 2 3
[label][----][label][----]
[column ][column ]
Notes
=====
Handling data files.. Some questions:
* Should the data files be kept zipped or unzipped? I say zipped.
* Should the data files be kept in svn at all? Advantages: history;
automatic version numbers; 'svn up' could be used rather than the
downloader to update the corpora. Disadvantages: they're big,
which makes working from svn a bit of a pain. And we're planning
to potentially make them much bigger. I don't think we want
people to have to download 400MB corpora just to use nltk from svn.
* Compromise: keep the data files in trunk/data rather than in
trunk/nltk. That way you can check them out in svn if you want
to; but you don't need to, and you can use the downloader instead.
* Also: keep models in mind. When we change the code, we'd
potentially like the models to get updated. This could require a
little thought.
* So.. let's assume we have a trunk/data directory, containing a bunch
of packages. The packages should be kept as zip files, because we
really shouldn't be editing them much (well -- we may edit models
more, but they tend to be binary-ish files anyway, where diffs
aren't that helpful). So we'll have trunk/data, with a bunch of
files like abc.zip and treebank.zip and propbank.zip. For each
package we could also have eg treebank.xml and propbank.xml,
describing the contents of the package (name, copyright, license,
etc). Collections would also have .xml files. Finally, we would
pull all these together to form a single index.xml file. Some
directory structure wouldn't hurt. So how about::
/trunk/data/ ....................... root of data svn
index.xml ........................ main index file
src/ ............................. python scripts
packages/ ........................ dir for packages
corpora/ ....................... zip & xml files for corpora
grammars/ ...................... zip & xml files for grammars
taggers/ ....................... zip & xml files for taggers
tokenizers/ .................... zip & xml files for tokenizers
etc.
collections/ ..................... xml files for collections
Where the root (/trunk/data) would contain a makefile; and src/
would contain a script to update the info.xml file. It could also
contain scripts to rebuild some of the various model files. The
script that builds index.xml should probably check that each zip
file expands entirely into a single subdir, whose name matches the
package's uid.
Changes I need to make:
- in index: change "size" to "filesize" or "compressed-size"
- in index: add "unzipped-size"
- when checking status: check both compressed & uncompressed size.
uncompressed size is important to make sure we detect a problem
if something got partially unzipped. define new status values
to differentiate stale vs corrupt vs corruptly-uncompressed??
(we shouldn't need to re-download the file if the zip file is ok
but it didn't get uncompressed fully.)
- add other fields to the index: author, license, copyright, contact,
etc.
the current grammars/ package would become a single new package (eg
toy-grammars or book-grammars).
xml file should have:
- authorship info
- license info
- copyright info
- contact info
- info about what type of data/annotation it contains?
- recommended corpus reader?
collections can contain other collections. they can also contain
multiple package types (corpora & models). Have a single 'basics'
package that includes everything we talk about in the book?
n.b.: there will have to be a fallback to the punkt tokenizer, in case
they didn't download that model.
default: unzip or not?
"""
import functools
import itertools
import os
import shutil
import subprocess
import sys
import textwrap
import threading
import time
import warnings
import zipfile
from hashlib import md5
from xml.etree import ElementTree
try:
TKINTER = True
from tkinter import Button, Canvas, Entry, Frame, IntVar, Label, Menu, TclError, Tk
from tkinter.messagebox import showerror
from nltk.draw.table import Table
from nltk.draw.util import ShowText
except ImportError:
TKINTER = False
TclError = ValueError
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import nltk
from loguru import logger
# urllib2 = nltk.internals.import_from_stdlib('urllib2')
######################################################################
# Directory entry objects (from the data server's index file)
######################################################################
class Package:
"""
A directory entry for a downloadable package. These entries are
extracted from the XML index file that is downloaded by
``Downloader``. Each package consists of a single file; but if
that file is a zip file, then it can be automatically decompressed
when the package is installed.
"""
def __init__(
self,
id,
url,
name=None,
subdir="",
size=None,
unzipped_size=None,
checksum=None,
svn_revision=None,
copyright="Unknown",
contact="Unknown",
license="Unknown",
author="Unknown",
unzip=True,
**kw,
):
self.id = id
"""A unique identifier for this package."""
self.name = name or id
"""A string name for this package."""
self.subdir = subdir
"""The subdirectory where this package should be installed.
E.g., ``'corpora'`` or ``'taggers'``."""
self.url = url
"""A URL that can be used to download this package's file."""
self.size = int(size)
"""The filesize (in bytes) of the package file."""
self.unzipped_size = int(unzipped_size)
"""The total filesize of the files contained in the package's
zipfile."""
self.checksum = checksum
"""The MD-5 checksum of the package file."""
self.svn_revision = svn_revision
"""A subversion revision number for this package."""
self.copyright = copyright
"""Copyright holder for this package."""
self.contact = contact
"""Name & email of the person who should be contacted with
questions about this package."""
self.license = license
"""License information for this package."""
self.author = author
"""Author of this package."""
ext = os.path.splitext(url.split("/")[-1])[1]
self.filename = os.path.join(subdir, id + ext)
"""The filename that should be used for this package's file. It
is formed by joining ``self.subdir`` with ``self.id``, and
using the same extension as ``url``."""
self.unzip = bool(int(unzip)) # '0' or '1'
"""A flag indicating whether this corpus should be unzipped by
default."""
# Include any other attributes provided by the XML file.
self.__dict__.update(kw)
@staticmethod
def fromxml(xml):
if isinstance(xml, str):
xml = ElementTree.parse(xml)
for key in xml.attrib:
xml.attrib[key] = str(xml.attrib[key])
return Package(**xml.attrib)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return "<Package %s>" % self.id
class Collection:
"""
A directory entry for a collection of downloadable packages.
These entries are extracted from the XML index file that is
downloaded by ``Downloader``.
"""
def __init__(self, id, children, name=None, **kw):
self.id = id
"""A unique identifier for this collection."""
self.name = name or id
"""A string name for this collection."""
self.children = children
"""A list of the ``Collections`` or ``Packages`` directly
contained by this collection."""
self.packages = None
"""A list of ``Packages`` contained by this collection or any
collections it recursively contains."""
# Include any other attributes provided by the XML file.
self.__dict__.update(kw)
@staticmethod
def fromxml(xml):
if isinstance(xml, str):
xml = ElementTree.parse(xml)
for key in xml.attrib:
xml.attrib[key] = str(xml.attrib[key])
children = [child.get("ref") for child in xml.findall("item")]
return Collection(children=children, **xml.attrib)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return "<Collection %s>" % self.id
######################################################################
# Message Passing Objects
######################################################################
class DownloaderMessage:
"""A status message object, used by ``incr_download`` to
communicate its progress."""
class StartCollectionMessage(DownloaderMessage):
"""Data server has started working on a collection of packages."""
def __init__(self, collection):
self.collection = collection
class FinishCollectionMessage(DownloaderMessage):
"""Data server has finished working on a collection of packages."""
def __init__(self, collection):
self.collection = collection
class StartPackageMessage(DownloaderMessage):
"""Data server has started working on a package."""
def __init__(self, package):
self.package = package
class FinishPackageMessage(DownloaderMessage):
"""Data server has finished working on a package."""
def __init__(self, package):
self.package = package
class StartDownloadMessage(DownloaderMessage):
"""Data server has started downloading a package."""
def __init__(self, package):
self.package = package
class FinishDownloadMessage(DownloaderMessage):
"""Data server has finished downloading a package."""
def __init__(self, package):
self.package = package
class StartUnzipMessage(DownloaderMessage):
"""Data server has started unzipping a package."""
def __init__(self, package):
self.package = package
class FinishUnzipMessage(DownloaderMessage):
"""Data server has finished unzipping a package."""
def __init__(self, package):
self.package = package
class UpToDateMessage(DownloaderMessage):
"""The package download file is already up-to-date"""
def __init__(self, package):
self.package = package
class StaleMessage(DownloaderMessage):
"""The package download file is out-of-date or corrupt"""
def __init__(self, package):
self.package = package
class ErrorMessage(DownloaderMessage):
"""Data server encountered an error"""
def __init__(self, package, message):
self.package = package
if isinstance(message, Exception):
self.message = str(message)
else:
self.message = message
class ProgressMessage(DownloaderMessage):
"""Indicates how much progress the data server has made"""
def __init__(self, progress):
self.progress = progress
class SelectDownloadDirMessage(DownloaderMessage):
"""Indicates what download directory the data server is using"""
def __init__(self, download_dir):
self.download_dir = download_dir
######################################################################
# NLTK Data Server
######################################################################
class Downloader:
"""
A class used to access the NLTK data server, which can be used to
download corpora and other data packages.
"""
# /////////////////////////////////////////////////////////////////
# Configuration
# /////////////////////////////////////////////////////////////////
INDEX_TIMEOUT = 60 * 60 # 1 hour
"""The amount of time after which the cached copy of the data
server index will be considered 'stale,' and will be
re-downloaded."""
DEFAULT_URL = "https://public.agent-matrix.com/publish/nltk/index.xml"
"""The default URL for the NLTK data server's index. An
alternative URL can be specified when creating a new
``Downloader`` object."""
# /////////////////////////////////////////////////////////////////
# Status Constants
# /////////////////////////////////////////////////////////////////
INSTALLED = "installed"
"""A status string indicating that a package or collection is
installed and up-to-date."""
NOT_INSTALLED = "not installed"
"""A status string indicating that a package or collection is
not installed."""
STALE = "out of date"
"""A status string indicating that a package or collection is
corrupt or out-of-date."""
PARTIAL = "partial"
"""A status string indicating that a collection is partially
installed (i.e., only some of its packages are installed.)"""
# /////////////////////////////////////////////////////////////////
# Constructor
# /////////////////////////////////////////////////////////////////
def __init__(self, server_index_url=None, download_dir=None):
self._url = server_index_url or self.DEFAULT_URL
"""The URL for the data server's index file."""
self._collections = {}
"""Dictionary from collection identifier to ``Collection``"""
self._packages = {}
"""Dictionary from package identifier to ``Package``"""
self._download_dir = download_dir
"""The default directory to which packages will be downloaded."""
self._index = None
"""The XML index file downloaded from the data server"""
self._index_timestamp = None
"""Time at which ``self._index`` was downloaded. If it is more
than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded."""
self._status_cache = {}
"""Dictionary from package/collection identifier to status
string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or
``PARTIAL``). Cache is used for packages only, not
collections."""
self._errors = None
"""Flag for telling if all packages got successfully downloaded or not."""
# decide where we're going to save things to.
if self._download_dir is None:
self._download_dir = self.default_download_dir()
# /////////////////////////////////////////////////////////////////
# Information
# /////////////////////////////////////////////////////////////////
def list(
self,
download_dir=None,
show_packages=True,
show_collections=True,
header=True,
more_prompt=False,
skip_installed=False,
):
lines = 0 # for more_prompt
if download_dir is None:
download_dir = self._download_dir
print("Using default data directory (%s)" % download_dir)
if header:
print("=" * (26 + len(self._url)))
print(" Data server index for <%s>" % self._url)
print("=" * (26 + len(self._url)))
lines += 3 # for more_prompt
stale = partial = False
categories = []
if show_packages:
categories.append("packages")
if show_collections:
categories.append("collections")
for category in categories:
print("%s:" % category.capitalize())
lines += 1 # for more_prompt
for info in sorted(getattr(self, category)(), key=str):
status = self.status(info, download_dir)
if status == self.INSTALLED and skip_installed:
continue
if status == self.STALE:
stale = True
if status == self.PARTIAL:
partial = True
prefix = {
self.INSTALLED: "*",
self.STALE: "-",
self.PARTIAL: "P",
self.NOT_INSTALLED: " ",
}[status]
name = textwrap.fill(
"-" * 27 + (info.name or info.id), 75, subsequent_indent=27 * " "
)[27:]
print(" [{}] {} {}".format(prefix, info.id.ljust(20, "."), name))
lines += len(name.split("\n")) # for more_prompt
if more_prompt and lines > 20:
user_input = input("Hit Enter to continue: ")
if user_input.lower() in ("x", "q"):
return
lines = 0
print()
msg = "([*] marks installed packages"
if stale:
msg += "; [-] marks out-of-date or corrupt packages"
if partial:
msg += "; [P] marks partially installed collections"
print(textwrap.fill(msg + ")", subsequent_indent=" ", width=76))
def packages(self):
self._update_index()
return self._packages.values()
def corpora(self):
self._update_index()
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == "corpora"]
def models(self):
self._update_index()
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != "corpora"]
def collections(self):
self._update_index()
return self._collections.values()
# /////////////////////////////////////////////////////////////////
# Downloading
# /////////////////////////////////////////////////////////////////
def _info_or_id(self, info_or_id):
if isinstance(info_or_id, str):
return self.info(info_or_id)
else:
return info_or_id
# [xx] When during downloading is it 'safe' to abort? Only unsafe
# time is *during* an unzip -- we don't want to leave a
# partially-unzipped corpus in place because we wouldn't notice
# it. But if we had the exact total size of the unzipped corpus,
# then that would be fine. Then we could abort anytime we want!
# So this is really what we should do. That way the threaded
# downloader in the gui can just kill the download thread anytime
# it wants.
def incr_download(self, info_or_id, download_dir=None, force=False):
# If they didn't specify a download_dir, then use the default one.
if download_dir is None:
download_dir = self._download_dir
yield SelectDownloadDirMessage(download_dir)
# If they gave us a list of ids, then download each one.
if isinstance(info_or_id, (list, tuple)):
yield from self._download_list(info_or_id, download_dir, force)
return
# Look up the requested collection or package.
try:
info = self._info_or_id(info_or_id)
except (OSError, ValueError) as e:
yield ErrorMessage(None, f"Error loading {info_or_id}: {e}")
return
# Handle collections.
if isinstance(info, Collection):
yield StartCollectionMessage(info)
yield from self.incr_download(info.children, download_dir, force)
yield FinishCollectionMessage(info)
# Handle Packages (delegate to a helper function).
else:
yield from self._download_package(info, download_dir, force)
def _num_packages(self, item):
if isinstance(item, Package):
return 1
else:
return len(item.packages)
def _download_list(self, items, download_dir, force):
# Look up the requested items.
for i in range(len(items)):
try:
items[i] = self._info_or_id(items[i])
except (OSError, ValueError) as e:
yield ErrorMessage(items[i], e)
return
# Download each item, re-scaling their progress.
num_packages = sum(self._num_packages(item) for item in items)
progress = 0
for i, item in enumerate(items):
if isinstance(item, Package):
delta = 1.0 / num_packages
else:
delta = len(item.packages) / num_packages
for msg in self.incr_download(item, download_dir, force):
if isinstance(msg, ProgressMessage):
yield ProgressMessage(progress + msg.progress * delta)
else:
yield msg
progress += 100 * delta
def _download_package(self, info, download_dir, force):
yield StartPackageMessage(info)
yield ProgressMessage(0)
# Do we already have the current version?
status = self.status(info, download_dir)
if not force and status == self.INSTALLED:
yield UpToDateMessage(info)
yield ProgressMessage(100)
yield FinishPackageMessage(info)
return
# Remove the package from our status cache
self._status_cache.pop(info.id, None)
# Check for (and remove) any old/stale version.
filepath = os.path.join(download_dir, info.filename)
if os.path.exists(filepath):
if status == self.STALE:
yield StaleMessage(info)
os.remove(filepath)
# Ensure the download_dir exists
if not os.path.exists(download_dir):
os.makedirs(download_dir)
if not os.path.exists(os.path.join(download_dir, info.subdir)):
os.makedirs(os.path.join(download_dir, info.subdir))
# Download the file. This will raise an IOError if the url
# is not found.
yield StartDownloadMessage(info)
yield ProgressMessage(5)
try:
# logger.info('+++====' + info.url)
infile = urlopen(info.url)
with open(filepath, "wb") as outfile:
num_blocks = max(1, info.size / (1024 * 16))
for block in itertools.count():
s = infile.read(1024 * 16) # 16k blocks.
outfile.write(s)
if not s:
break
if block % 2 == 0: # how often?
yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks)))
infile.close()
except OSError as e:
yield ErrorMessage(
info,
"Error downloading %r from <%s>:" "\n %s" % (info.id, info.url, e),
)
return
yield FinishDownloadMessage(info)
yield ProgressMessage(80)
# If it's a zipfile, uncompress it.
if info.filename.endswith(".zip"):
zipdir = os.path.join(download_dir, info.subdir)
# Unzip if we're unzipping by default; *or* if it's already
# been unzipped (presumably a previous version).
if info.unzip or os.path.exists(os.path.join(zipdir, info.id)):
yield StartUnzipMessage(info)
for msg in _unzip_iter(filepath, zipdir, verbose=False):
# Somewhat of a hack, but we need a proper package reference
msg.package = info
yield msg
yield FinishUnzipMessage(info)
yield FinishPackageMessage(info)
def download(
self,
info_or_id=None,
download_dir=None,
quiet=False,
force=False,
prefix="[nltk_data] ",
halt_on_error=True,
raise_on_error=False,
print_error_to=sys.stderr,
):
print_to = functools.partial(print, file=print_error_to)
# If no info or id is given, then use the interactive shell.
if info_or_id is None:
# [xx] hmm -- changing self._download_dir here seems like
# the wrong thing to do. Maybe the _interactive_download
# function should make a new copy of self to use?
if download_dir is not None:
self._download_dir = download_dir
self._interactive_download()
return True
else:
# Define a helper function for displaying output:
def show(s, prefix2=""):
print_to(
textwrap.fill(
s,
initial_indent=prefix + prefix2,
subsequent_indent=prefix + prefix2 + " " * 4,
)
)
for msg in self.incr_download(info_or_id, download_dir, force):
# Error messages
if isinstance(msg, ErrorMessage):
show(msg.message)
if raise_on_error:
raise ValueError(msg.message)
if halt_on_error:
return False
self._errors = True
if not quiet:
print_to("Error installing package. Retry? [n/y/e]")
choice = input().strip()
if choice in ["y", "Y"]:
if not self.download(
msg.package.id,
download_dir,
quiet,
force,
prefix,
halt_on_error,
raise_on_error,
):
return False
elif choice in ["e", "E"]:
return False
# All other messages
if not quiet:
# Collection downloading messages:
if isinstance(msg, StartCollectionMessage):
show("Downloading collection %r" % msg.collection.id)
prefix += " | "
print_to(prefix)
elif isinstance(msg, FinishCollectionMessage):
print_to(prefix)
prefix = prefix[:-4]
if self._errors:
show(
"Downloaded collection %r with errors"
% msg.collection.id
)
else:
show("Done downloading collection %s" % msg.collection.id)
# Package downloading messages:
elif isinstance(msg, StartPackageMessage):
show(
"Downloading package %s to %s..."
% (msg.package.id, download_dir)
)
elif isinstance(msg, UpToDateMessage):
show("Package %s is already up-to-date!" % msg.package.id, " ")
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt' %
# msg.package.id, ' ')
elif isinstance(msg, StartUnzipMessage):
show("Unzipping %s." % msg.package.filename, " ")
# Data directory message:
elif isinstance(msg, SelectDownloadDirMessage):
download_dir = msg.download_dir
return True
def is_stale(self, info_or_id, download_dir=None):
return self.status(info_or_id, download_dir) == self.STALE
def is_installed(self, info_or_id, download_dir=None):
return self.status(info_or_id, download_dir) == self.INSTALLED
def clear_status_cache(self, id=None):
if id is None:
self._status_cache.clear()
else:
self._status_cache.pop(id, None)
def status(self, info_or_id, download_dir=None):
"""
Return a constant describing the status of the given package
or collection. Status can be one of ``INSTALLED``,
``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
"""
if download_dir is None:
download_dir = self._download_dir
info = self._info_or_id(info_or_id)
# Handle collections:
if isinstance(info, Collection):
pkg_status = [self.status(pkg.id) for pkg in info.packages]
if self.STALE in pkg_status:
return self.STALE
elif self.PARTIAL in pkg_status:
return self.PARTIAL
elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status:
return self.PARTIAL
elif self.NOT_INSTALLED in pkg_status:
return self.NOT_INSTALLED
else:
return self.INSTALLED
# Handle packages:
else:
filepath = os.path.join(download_dir, info.filename)
if download_dir != self._download_dir:
return self._pkg_status(info, filepath)
else:
if info.id not in self._status_cache:
self._status_cache[info.id] = self._pkg_status(info, filepath)
return self._status_cache[info.id]
def _pkg_status(self, info, filepath):
if not os.path.exists(filepath):
return self.NOT_INSTALLED
# Check if the file has the correct size.
try:
filestat = os.stat(filepath)
except OSError:
return self.NOT_INSTALLED
if filestat.st_size != int(info.size):
return self.STALE
# Check if the file's checksum matches
if md5_hexdigest(filepath) != info.checksum:
return self.STALE
# If it's a zipfile, and it's been at least partially
# unzipped, then check if it's been fully unzipped.
if filepath.endswith(".zip"):
unzipdir = filepath[:-4]
if not os.path.exists(unzipdir):
return self.INSTALLED # but not unzipped -- ok!
if not os.path.isdir(unzipdir):
return self.STALE
unzipped_size = sum(
os.stat(os.path.join(d, f)).st_size
for d, _, files in os.walk(unzipdir)
for f in files
)
if unzipped_size != info.unzipped_size:
return self.STALE
# Otherwise, everything looks good.
return self.INSTALLED
def update(self, quiet=False, prefix="[nltk_data] "):
"""
Re-download any packages whose status is STALE.
"""
self.clear_status_cache()
for pkg in self.packages():
if self.status(pkg) == self.STALE:
self.download(pkg, quiet=quiet, prefix=prefix)
# /////////////////////////////////////////////////////////////////
# Index
# /////////////////////////////////////////////////////////////////
def _update_index(self, url=None):
"""A helper function that ensures that self._index is
up-to-date. If the index is older than self.INDEX_TIMEOUT,
then download it again."""
# Check if the index is already up-to-date. If so, do nothing.
if not (
self._index is None
or url is not None
or time.time() - self._index_timestamp > self.INDEX_TIMEOUT
):
return
# If a URL was specified, then update our URL.
self._url = url or self._url
# Download the index file.
# logger.info('+++====' + self._url)
self._index = nltk.internals.ElementWrapper(
ElementTree.parse(urlopen(self._url)).getroot()
)
self._index_timestamp = time.time()
# Build a dictionary of packages.
packages = [Package.fromxml(p) for p in self._index.findall("packages/package")]
self._packages = {p.id: p for p in packages}
# Build a dictionary of collections.
collections = [
Collection.fromxml(c) for c in self._index.findall("collections/collection")
]
self._collections = {c.id: c for c in collections}
# Replace identifiers with actual children in collection.children.
for collection in self._collections.values():
for i, child_id in enumerate(collection.children):
if child_id in self._packages:
collection.children[i] = self._packages[child_id]
elif child_id in self._collections:
collection.children[i] = self._collections[child_id]
else:
print(
"removing collection member with no package: {}".format(
child_id
)
)
del collection.children[i]
# Fill in collection.packages for each collection.
for collection in self._collections.values():
packages = {}
queue = [collection]
for child in queue:
if isinstance(child, Collection):
queue.extend(child.children)
elif isinstance(child, Package):
packages[child.id] = child
else:
pass
collection.packages = packages.values()
# Flush the status cache
self._status_cache.clear()
def index(self):
"""
Return the XML index describing the packages available from
the data server. If necessary, this index will be downloaded
from the data server.
"""
self._update_index()
return self._index
def info(self, id):
"""Return the ``Package`` or ``Collection`` record for the
given item."""
self._update_index()
if id in self._packages:
return self._packages[id]
if id in self._collections:
return self._collections[id]
raise ValueError("Package %r not found in index" % id)
def xmlinfo(self, id):
"""Return the XML info record for the given item"""
self._update_index()
for package in self._index.findall("packages/package"):
if package.get("id") == id:
return package
for collection in self._index.findall("collections/collection"):
if collection.get("id") == id:
return collection
raise ValueError("Package %r not found in index" % id)
# /////////////////////////////////////////////////////////////////
# URL & Data Directory
# /////////////////////////////////////////////////////////////////
def _get_url(self):
"""The URL for the data server's index file."""
return self._url
def _set_url(self, url):
"""
Set a new URL for the data server. If we're unable to contact
the given url, then the original url is kept.
"""
original_url = self._url
try:
self._update_index(url)
except:
self._url = original_url
raise
url = property(_get_url, _set_url)
def default_download_dir(self):
"""
Return the directory to which packages will be downloaded by
default. This value can be overridden using the constructor,
or on a case-by-case basis using the ``download_dir`` argument when
calling ``download()``.
On Windows, the default download directory is
``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the
directory containing Python, e.g. ``C:\\Python25``.
On all other platforms, the default directory is the first of
the following which exists or which can be created with write
permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``,
``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``.
"""
# Check if we are on GAE where we cannot write into filesystem.
if "APPENGINE_RUNTIME" in os.environ:
return
# Check if we have sufficient permissions to install in a
# variety of system-wide locations.
for nltkdir in nltk.data.path:
if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir):
return nltkdir
# On Windows, use %APPDATA%
if sys.platform == "win32" and "APPDATA" in os.environ:
homedir = os.environ["APPDATA"]
# Otherwise, install in the user's home directory.
else:
homedir = os.path.expanduser("~/")
if homedir == "~/":
raise ValueError("Could not find a default download directory")
# append "nltk_data" to the home directory
return os.path.join(homedir, "nltk_data")
def _get_download_dir(self):
"""
The default directory to which packages will be downloaded.
This defaults to the value returned by ``default_download_dir()``.
To override this default on a case-by-case basis, use the
``download_dir`` argument when calling ``download()``.
"""
return self._download_dir
def _set_download_dir(self, download_dir):
self._download_dir = download_dir
# Clear the status cache.
self._status_cache.clear()
download_dir = property(_get_download_dir, _set_download_dir)
# /////////////////////////////////////////////////////////////////
# Interactive Shell
# /////////////////////////////////////////////////////////////////
def _interactive_download(self):
# Try the GUI first; if that doesn't work, try the simple
# interactive shell.
if TKINTER:
try:
DownloaderGUI(self).mainloop()
except TclError:
DownloaderShell(self).run()
else:
DownloaderShell(self).run()
class DownloaderShell:
def __init__(self, dataserver):
self._ds = dataserver
def _simple_interactive_menu(self, *options):
print("-" * 75)
spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * " "
print(" " + spc.join(options))
print("-" * 75)
def run(self):
print("NLTK Downloader")
while True:
self._simple_interactive_menu(
"d) Download",
"l) List",
" u) Update",
"c) Config",
"h) Help",
"q) Quit",
)
user_input = input("Downloader> ").strip()
if not user_input:
print()
continue
command = user_input.lower().split()[0]
args = user_input.split()[1:]
try:
if command == "l":
print()
self._ds.list(self._ds.download_dir, header=False, more_prompt=True)
elif command == "h":
self._simple_interactive_help()
elif command == "c":
self._simple_interactive_config()
elif command in ("q", "x"):
return
elif command == "d":
self._simple_interactive_download(args)
elif command == "u":
self._simple_interactive_update()
else:
print("Command %r unrecognized" % user_input)
except HTTPError as e:
print("Error reading from server: %s" % e)
except URLError as e:
print("Error connecting to server: %s" % e.reason)
# try checking if user_input is a package name, &
# downloading it?
print()
def _simple_interactive_download(self, args):
if args:
for arg in args:
try:
self._ds.download(arg, prefix=" ")
except (OSError, ValueError) as e:
print(e)
else:
while True:
print()
print("Download which package (l=list; x=cancel)?")
user_input = input(" Identifier> ")
if user_input.lower() == "l":
self._ds.list(
self._ds.download_dir,
header=False,
more_prompt=True,
skip_installed=True,
)
continue
elif user_input.lower() in ("x", "q", ""):
return
elif user_input:
for id in user_input.split():
try:
self._ds.download(id, prefix=" ")
except (OSError, ValueError) as e:
print(e)
break
def _simple_interactive_update(self):
while True:
stale_packages = []
stale = partial = False
for info in sorted(getattr(self._ds, "packages")(), key=str):
if self._ds.status(info) == self._ds.STALE:
stale_packages.append((info.id, info.name))
print()
if stale_packages:
print("Will update following packages (o=ok; x=cancel)")
for pid, pname in stale_packages:
name = textwrap.fill(
"-" * 27 + (pname), 75, subsequent_indent=27 * " "
)[27:]
print(" [ ] {} {}".format(pid.ljust(20, "."), name))
print()
user_input = input(" Identifier> ")
if user_input.lower() == "o":
for pid, pname in stale_packages:
try:
self._ds.download(pid, prefix=" ")
except (OSError, ValueError) as e:
print(e)
break
elif user_input.lower() in ("x", "q", ""):
return
else:
print("Nothing to update.")
return
def _simple_interactive_help(self):
print()
print("Commands:")
print(
" d) Download a package or collection u) Update out of date packages"
)
print(" l) List packages & collections h) Help")
print(" c) View & Modify Configuration q) Quit")
def _show_config(self):
print()
print("Data Server:")
print(" - URL: <%s>" % self._ds.url)
print(" - %d Package Collections Available" % len(self._ds.collections()))
print(" - %d Individual Packages Available" % len(self._ds.packages()))
print()
print("Local Machine:")
print(" - Data directory: %s" % self._ds.download_dir)
def _simple_interactive_config(self):
self._show_config()
while True:
print()
self._simple_interactive_menu(
"s) Show Config", "u) Set Server URL", "d) Set Data Dir", "m) Main Menu"
)
user_input = input("Config> ").strip().lower()
if user_input == "s":
self._show_config()
elif user_input == "d":
new_dl_dir = input(" New Directory> ").strip()
if new_dl_dir in ("", "x", "q", "X", "Q"):
print(" Cancelled!")
elif os.path.isdir(new_dl_dir):
self._ds.download_dir = new_dl_dir
else:
print("Directory %r not found! Create it first." % new_dl_dir)
elif user_input == "u":
new_url = input(" New URL> ").strip()
if new_url in ("", "x", "q", "X", "Q"):
print(" Cancelled!")
else:
if not new_url.startswith(("http://", "https://")):
new_url = "http://" + new_url
try:
self._ds.url = new_url
except Exception as e:
print(f"Error reading <{new_url!r}>:\n {e}")
elif user_input == "m":
break
class DownloaderGUI:
"""
Graphical interface for downloading packages from the NLTK data
server.
"""
# /////////////////////////////////////////////////////////////////
# Column Configuration
# /////////////////////////////////////////////////////////////////
COLUMNS = [
"",
"Identifier",
"Name",
"Size",
"Status",
"Unzipped Size",
"Copyright",
"Contact",
"License",
"Author",
"Subdir",
"Checksum",
]
"""A list of the names of columns. This controls the order in
which the columns will appear. If this is edited, then
``_package_to_columns()`` may need to be edited to match."""
COLUMN_WEIGHTS = {"": 0, "Name": 5, "Size": 0, "Status": 0}
"""A dictionary specifying how columns should be resized when the
table is resized. Columns with weight 0 will not be resized at
all; and columns with high weight will be resized more.
Default weight (for columns not explicitly listed) is 1."""
COLUMN_WIDTHS = {
"": 1,
"Identifier": 20,
"Name": 45,
"Size": 10,
"Unzipped Size": 10,
"Status": 12,
}
"""A dictionary specifying how wide each column should be, in
characters. The default width (for columns not explicitly
listed) is specified by ``DEFAULT_COLUMN_WIDTH``."""
DEFAULT_COLUMN_WIDTH = 30
"""The default width for columns that are not explicitly listed
in ``COLUMN_WIDTHS``."""
INITIAL_COLUMNS = ["", "Identifier", "Name", "Size", "Status"]
"""The set of columns that should be displayed by default."""
# Perform a few import-time sanity checks to make sure that the
# column configuration variables are defined consistently:
for c in COLUMN_WEIGHTS:
assert c in COLUMNS
for c in COLUMN_WIDTHS:
assert c in COLUMNS
for c in INITIAL_COLUMNS:
assert c in COLUMNS
# /////////////////////////////////////////////////////////////////
# Color Configuration
# /////////////////////////////////////////////////////////////////
_BACKDROP_COLOR = ("#000", "#ccc")
_ROW_COLOR = {
Downloader.INSTALLED: ("#afa", "#080"),
Downloader.PARTIAL: ("#ffa", "#880"),
Downloader.STALE: ("#faa", "#800"),
Downloader.NOT_INSTALLED: ("#fff", "#888"),
}
_MARK_COLOR = ("#000", "#ccc")
# _FRONT_TAB_COLOR = ('#ccf', '#008')
# _BACK_TAB_COLOR = ('#88a', '#448')
_FRONT_TAB_COLOR = ("#fff", "#45c")
_BACK_TAB_COLOR = ("#aaa", "#67a")
_PROGRESS_COLOR = ("#f00", "#aaa")
_TAB_FONT = "helvetica -16 bold"
# /////////////////////////////////////////////////////////////////
# Constructor
# /////////////////////////////////////////////////////////////////
def __init__(self, dataserver, use_threads=True):
self._ds = dataserver
self._use_threads = use_threads
# For the threaded downloader:
self._download_lock = threading.Lock()
self._download_msg_queue = []
self._download_abort_queue = []
self._downloading = False
# For tkinter after callbacks:
self._afterid = {}
# A message log.
self._log_messages = []
self._log_indent = 0
self._log("NLTK Downloader Started!")
# Create the main window.
top = self.top = Tk()
top.geometry("+50+50")
top.title("NLTK Downloader")
top.configure(background=self._BACKDROP_COLOR[1])
# Set up some bindings now, in case anything goes wrong.
top.bind("<Control-q>", self.destroy)
top.bind("<Control-x>", self.destroy)
self._destroyed = False
self._column_vars = {}
# Initialize the GUI.
self._init_widgets()
self._init_menu()
try:
self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
self._show_info()
self._select_columns()
self._table.select(0)
# Make sure we get notified when we're destroyed, so we can
# cancel any download in progress.
self._table.bind("<Destroy>", self._destroy)
def _log(self, msg):
self._log_messages.append(
"{} {}{}".format(time.ctime(), " | " * self._log_indent, msg)
)
# /////////////////////////////////////////////////////////////////
# Internals
# /////////////////////////////////////////////////////////////////
def _init_widgets(self):
# Create the top-level frame structures
f1 = Frame(self.top, relief="raised", border=2, padx=8, pady=0)
f1.pack(sid="top", expand=True, fill="both")
f1.grid_rowconfigure(2, weight=1)
f1.grid_columnconfigure(0, weight=1)
Frame(f1, height=8).grid(column=0, row=0) # spacer
tabframe = Frame(f1)
tabframe.grid(column=0, row=1, sticky="news")
tableframe = Frame(f1)
tableframe.grid(column=0, row=2, sticky="news")
buttonframe = Frame(f1)
buttonframe.grid(column=0, row=3, sticky="news")
Frame(f1, height=8).grid(column=0, row=4) # spacer
infoframe = Frame(f1)
infoframe.grid(column=0, row=5, sticky="news")
Frame(f1, height=8).grid(column=0, row=6) # spacer
progressframe = Frame(
self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1]
)
progressframe.pack(side="bottom", fill="x")
self.top["border"] = 0
self.top["highlightthickness"] = 0
# Create the tabs
self._tab_names = ["Collections", "Corpora", "Models", "All Packages"]
self._tabs = {}
for i, tab in enumerate(self._tab_names):
label = Label(tabframe, text=tab, font=self._TAB_FONT)
label.pack(side="left", padx=((i + 1) % 2) * 10)
label.bind("<Button-1>", self._select_tab)
self._tabs[tab.lower()] = label
# Create the table.
column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS]
self._table = Table(
tableframe,
self.COLUMNS,
column_weights=column_weights,
highlightthickness=0,
listbox_height=16,
reprfunc=self._table_reprfunc,
)
self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked
for i, column in enumerate(self.COLUMNS):
width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH)
self._table.columnconfig(i, width=width)
self._table.pack(expand=True, fill="both")
self._table.focus()
self._table.bind_to_listboxes("<Double-Button-1>", self._download)
self._table.bind("<space>", self._table_mark)
self._table.bind("<Return>", self._download)
self._table.bind("<Left>", self._prev_tab)
self._table.bind("<Right>", self._next_tab)
self._table.bind("<Control-a>", self._mark_all)
# Create entry boxes for URL & download_dir
infoframe.grid_columnconfigure(1, weight=1)
info = [
("url", "Server Index:", self._set_url),
("download_dir", "Download Directory:", self._set_download_dir),
]
self._info = {}
for (i, (key, label, callback)) in enumerate(info):
Label(infoframe, text=label).grid(column=0, row=i, sticky="e")
entry = Entry(
infoframe,
font="courier",
relief="groove",
disabledforeground="#007aff",
foreground="#007aff",
)
self._info[key] = (entry, callback)
entry.bind("<Return>", self._info_save)
entry.bind("<Button-1>", lambda e, key=key: self._info_edit(key))
entry.grid(column=1, row=i, sticky="ew")
# If the user edits url or download_dir, and then clicks outside
# the entry box, then save their results.
self.top.bind("<Button-1>", self._info_save)
# Create Download & Refresh buttons.
self._download_button = Button(
buttonframe, text="Download", command=self._download, width=8
)
self._download_button.pack(side="left")
self._refresh_button = Button(
buttonframe, text="Refresh", command=self._refresh, width=8
)
self._refresh_button.pack(side="right")
# Create Progress bar
self._progresslabel = Label(
progressframe,
text="",
foreground=self._BACKDROP_COLOR[0],
background=self._BACKDROP_COLOR[1],
)
self._progressbar = Canvas(
progressframe,
width=200,
height=16,
background=self._PROGRESS_COLOR[1],
relief="sunken",
border=1,
)
self._init_progressbar()
self._progressbar.pack(side="right")
self._progresslabel.pack(side="left")
def _init_menu(self):
menubar = Menu(self.top)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(
label="Download", underline=0, command=self._download, accelerator="Return"
)
filemenu.add_separator()
filemenu.add_command(
label="Change Server Index",
underline=7,
command=lambda: self._info_edit("url"),
)
filemenu.add_command(
label="Change Download Directory",
underline=0,
command=lambda: self._info_edit("download_dir"),
)
filemenu.add_separator()
filemenu.add_command(label="Show Log", underline=5, command=self._show_log)
filemenu.add_separator()
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
# Create a menu to control which columns of the table are
# shown. n.b.: we never hide the first two columns (mark and
# identifier).
viewmenu = Menu(menubar, tearoff=0)
for column in self._table.column_names[2:]:
var = IntVar(self.top)
assert column not in self._column_vars
self._column_vars[column] = var
if column in self.INITIAL_COLUMNS:
var.set(1)
viewmenu.add_checkbutton(
label=column, underline=0, variable=var, command=self._select_columns
)
menubar.add_cascade(label="View", underline=0, menu=viewmenu)
# Create a sort menu
# [xx] this should be selectbuttons; and it should include
# reversed sorts as options.
sortmenu = Menu(menubar, tearoff=0)
for column in self._table.column_names[1:]:
sortmenu.add_command(
label="Sort by %s" % column,
command=(lambda c=column: self._table.sort_by(c, "ascending")),
)
sortmenu.add_separator()
# sortmenu.add_command(label='Descending Sort:')
for column in self._table.column_names[1:]:
sortmenu.add_command(
label="Reverse sort by %s" % column,
command=(lambda c=column: self._table.sort_by(c, "descending")),
)
menubar.add_cascade(label="Sort", underline=0, menu=sortmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", underline=0, command=self.about)
helpmenu.add_command(
label="Instructions", underline=0, command=self.help, accelerator="F1"
)
menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
self.top.bind("<F1>", self.help)
self.top.config(menu=menubar)
def _select_columns(self):
for (column, var) in self._column_vars.items():
if var.get():
self._table.show_column(column)
else:
self._table.hide_column(column)
def _refresh(self):
self._ds.clear_status_cache()
try:
self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
self._table.select(0)
def _info_edit(self, info_key):
self._info_save() # just in case.
(entry, callback) = self._info[info_key]
entry["state"] = "normal"
entry["relief"] = "sunken"
entry.focus()
def _info_save(self, e=None):
focus = self._table
for entry, callback in self._info.values():
if entry["state"] == "disabled":
continue
if e is not None and e.widget is entry and e.keysym != "Return":
focus = entry
else:
entry["state"] = "disabled"
entry["relief"] = "groove"
callback(entry.get())
focus.focus()
def _table_reprfunc(self, row, col, val):
if self._table.column_names[col].endswith("Size"):
if isinstance(val, str):
return " %s" % val
elif val < 1024**2:
return " %.1f KB" % (val / 1024.0**1)
elif val < 1024**3:
return " %.1f MB" % (val / 1024.0**2)
else:
return " %.1f GB" % (val / 1024.0**3)
if col in (0, ""):
return str(val)
else:
return " %s" % val
def _set_url(self, url):
if url == self._ds.url:
return
try:
self._ds.url = url
self._fill_table()
except OSError as e:
showerror("Error Setting Server Index", str(e))
self._show_info()
def _set_download_dir(self, download_dir):
if self._ds.download_dir == download_dir:
return
# check if the dir exists, and if not, ask if we should create it?
# Clear our status cache, & re-check what's installed
self._ds.download_dir = download_dir
try:
self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
self._show_info()
def _show_info(self):
print("showing info", self._ds.url)
for entry, cb in self._info.values():
entry["state"] = "normal"
entry.delete(0, "end")
self._info["url"][0].insert(0, self._ds.url)
self._info["download_dir"][0].insert(0, self._ds.download_dir)
for entry, cb in self._info.values():
entry["state"] = "disabled"
def _prev_tab(self, *e):
for i, tab in enumerate(self._tab_names):
if tab.lower() == self._tab and i > 0:
self._tab = self._tab_names[i - 1].lower()
try:
return self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
def _next_tab(self, *e):
for i, tab in enumerate(self._tab_names):
if tab.lower() == self._tab and i < (len(self._tabs) - 1):
self._tab = self._tab_names[i + 1].lower()
try:
return self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
def _select_tab(self, event):
self._tab = event.widget["text"].lower()
try:
self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
_tab = "collections"
# _tab = 'corpora'
_rows = None
def _fill_table(self):
selected_row = self._table.selected_row()
self._table.clear()
if self._tab == "all packages":
items = self._ds.packages()
elif self._tab == "corpora":
items = self._ds.corpora()
elif self._tab == "models":
items = self._ds.models()
elif self._tab == "collections":
items = self._ds.collections()
else:
assert 0, "bad tab value %r" % self._tab
rows = [self._package_to_columns(item) for item in items]
self._table.extend(rows)
# Highlight the active tab.
for tab, label in self._tabs.items():
if tab == self._tab:
label.configure(
foreground=self._FRONT_TAB_COLOR[0],
background=self._FRONT_TAB_COLOR[1],
)
else:
label.configure(
foreground=self._BACK_TAB_COLOR[0],
background=self._BACK_TAB_COLOR[1],
)
self._table.sort_by("Identifier", order="ascending")
self._color_table()
self._table.select(selected_row)
# This is a hack, because the scrollbar isn't updating its
# position right -- I'm not sure what the underlying cause is
# though. (This is on OS X w/ python 2.5) The length of
# delay that's necessary seems to depend on how fast the
# comptuer is. :-/
self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview())
self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview())
def _update_table_status(self):
for row_num in range(len(self._table)):
status = self._ds.status(self._table[row_num, "Identifier"])
self._table[row_num, "Status"] = status
self._color_table()
def _download(self, *e):
# If we're using threads, then delegate to the threaded
# downloader instead.
if self._use_threads:
return self._download_threaded(*e)
marked = [
self._table[row, "Identifier"]
for row in range(len(self._table))
if self._table[row, 0] != ""
]
selection = self._table.selected_row()
if not marked and selection is not None:
marked = [self._table[selection, "Identifier"]]
download_iter = self._ds.incr_download(marked, self._ds.download_dir)
self._log_indent = 0
self._download_cb(download_iter, marked)
_DL_DELAY = 10
def _download_cb(self, download_iter, ids):
try:
msg = next(download_iter)
except StopIteration:
# self._fill_table(sort=False)
self._update_table_status()
afterid = self.top.after(10, self._show_progress, 0)
self._afterid["_download_cb"] = afterid
return
def show(s):
self._progresslabel["text"] = s
self._log(s)
if isinstance(msg, ProgressMessage):
self._show_progress(msg.progress)
elif isinstance(msg, ErrorMessage):
show(msg.message)
if msg.package is not None:
self._select(msg.package.id)
self._show_progress(None)
return # halt progress.
elif isinstance(msg, StartCollectionMessage):
show("Downloading collection %s" % msg.collection.id)
self._log_indent += 1
elif isinstance(msg, StartPackageMessage):
show("Downloading package %s" % msg.package.id)
elif isinstance(msg, UpToDateMessage):
show("Package %s is up-to-date!" % msg.package.id)
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt' % msg.package.id)
elif isinstance(msg, FinishDownloadMessage):
show("Finished downloading %r." % msg.package.id)
elif isinstance(msg, StartUnzipMessage):
show("Unzipping %s" % msg.package.filename)
elif isinstance(msg, FinishCollectionMessage):
self._log_indent -= 1
show("Finished downloading collection %r." % msg.collection.id)
self._clear_mark(msg.collection.id)
elif isinstance(msg, FinishPackageMessage):
self._clear_mark(msg.package.id)
afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids)
self._afterid["_download_cb"] = afterid
def _select(self, id):
for row in range(len(self._table)):
if self._table[row, "Identifier"] == id:
self._table.select(row)
return
def _color_table(self):
# Color rows according to status.
for row in range(len(self._table)):
bg, sbg = self._ROW_COLOR[self._table[row, "Status"]]
fg, sfg = ("black", "white")
self._table.rowconfig(
row,
foreground=fg,
selectforeground=sfg,
background=bg,
selectbackground=sbg,
)
# Color the marked column
self._table.itemconfigure(
row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1]
)
def _clear_mark(self, id):
for row in range(len(self._table)):
if self._table[row, "Identifier"] == id:
self._table[row, 0] = ""
def _mark_all(self, *e):
for row in range(len(self._table)):
self._table[row, 0] = "X"
def _table_mark(self, *e):
selection = self._table.selected_row()
if selection >= 0:
if self._table[selection][0] != "":
self._table[selection, 0] = ""
else:
self._table[selection, 0] = "X"
self._table.select(delta=1)
def _show_log(self):
text = "\n".join(self._log_messages)
ShowText(self.top, "NLTK Downloader Log", text)
def _package_to_columns(self, pkg):
"""
Given a package, return a list of values describing that
package, one for each column in ``self.COLUMNS``.
"""
row = []
for column_index, column_name in enumerate(self.COLUMNS):
if column_index == 0: # Mark:
row.append("")
elif column_name == "Identifier":
row.append(pkg.id)
elif column_name == "Status":
row.append(self._ds.status(pkg))
else:
attr = column_name.lower().replace(" ", "_")
row.append(getattr(pkg, attr, "n/a"))
return row
# /////////////////////////////////////////////////////////////////
# External Interface
# /////////////////////////////////////////////////////////////////
def destroy(self, *e):
if self._destroyed:
return
self.top.destroy()
self._destroyed = True
def _destroy(self, *e):
if self.top is not None:
for afterid in self._afterid.values():
self.top.after_cancel(afterid)
# Abort any download in progress.
if self._downloading and self._use_threads:
self._abort_download()
# Make sure the garbage collector destroys these now;
# otherwise, they may get destroyed when we're not in the main
# thread, which would make Tkinter unhappy.
self._column_vars.clear()
def mainloop(self, *args, **kwargs):
self.top.mainloop(*args, **kwargs)
# /////////////////////////////////////////////////////////////////
# HELP
# /////////////////////////////////////////////////////////////////
HELP = textwrap.dedent(
"""\
This tool can be used to download a variety of corpora and models
that can be used with NLTK. Each corpus or model is distributed
in a single zip file, known as a \"package file.\" You can
download packages individually, or you can download pre-defined
collections of packages.
When you download a package, it will be saved to the \"download
directory.\" A default download directory is chosen when you run
the downloader; but you may also select a different download
directory. On Windows, the default download directory is
\"package.\"
The NLTK downloader can be used to download a variety of corpora,
models, and other data packages.
Keyboard shortcuts::
[return]\t Download
[up]\t Select previous package
[down]\t Select next package
[left]\t Select previous tab
[right]\t Select next tab
"""
)
def help(self, *e):
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(
self.top,
"Help: NLTK Downloader",
self.HELP.strip(),
width=75,
font="fixed",
)
except:
ShowText(self.top, "Help: NLTK Downloader", self.HELP.strip(), width=75)
def about(self, *e):
ABOUT = "NLTK Downloader\n" + "Written by Edward Loper"
TITLE = "About: NLTK Downloader"
try:
from tkinter.messagebox import Message
Message(message=ABOUT, title=TITLE).show()
except ImportError:
ShowText(self.top, TITLE, ABOUT)
# /////////////////////////////////////////////////////////////////
# Progress Bar
# /////////////////////////////////////////////////////////////////
_gradient_width = 5
def _init_progressbar(self):
c = self._progressbar
width, height = int(c["width"]), int(c["height"])
for i in range(0, (int(c["width"]) * 2) // self._gradient_width):
c.create_line(
i * self._gradient_width + 20,
-20,
i * self._gradient_width - height - 20,
height + 20,
width=self._gradient_width,
fill="#%02x0000" % (80 + abs(i % 6 - 3) * 12),
)
c.addtag_all("gradient")
c.itemconfig("gradient", state="hidden")
# This is used to display progress
c.addtag_withtag(
"redbox", c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0])
)
def _show_progress(self, percent):
c = self._progressbar
if percent is None:
c.coords("redbox", 0, 0, 0, 0)
c.itemconfig("gradient", state="hidden")
else:
width, height = int(c["width"]), int(c["height"])
x = percent * int(width) // 100 + 1
c.coords("redbox", 0, 0, x, height + 1)
def _progress_alive(self):
c = self._progressbar
if not self._downloading:
c.itemconfig("gradient", state="hidden")
else:
c.itemconfig("gradient", state="normal")
x1, y1, x2, y2 = c.bbox("gradient")
if x1 <= -100:
c.move("gradient", (self._gradient_width * 6) - 4, 0)
else:
c.move("gradient", -4, 0)
afterid = self.top.after(200, self._progress_alive)
self._afterid["_progress_alive"] = afterid
# /////////////////////////////////////////////////////////////////
# Threaded downloader
# /////////////////////////////////////////////////////////////////
def _download_threaded(self, *e):
# If the user tries to start a new download while we're already
# downloading something, then abort the current download instead.
if self._downloading:
self._abort_download()
return
# Change the 'download' button to an 'abort' button.
self._download_button["text"] = "Cancel"
marked = [
self._table[row, "Identifier"]
for row in range(len(self._table))
if self._table[row, 0] != ""
]
selection = self._table.selected_row()
if not marked and selection is not None:
marked = [self._table[selection, "Identifier"]]
# Create a new data server object for the download operation,
# just in case the user modifies our data server during the
# download (e.g., clicking 'refresh' or editing the index url).
ds = Downloader(self._ds.url, self._ds.download_dir)
# Start downloading in a separate thread.
assert self._download_msg_queue == []
assert self._download_abort_queue == []
self._DownloadThread(
ds,
marked,
self._download_lock,
self._download_msg_queue,
self._download_abort_queue,
).start()
# Monitor the download message queue & display its progress.
self._log_indent = 0
self._downloading = True
self._monitor_message_queue()
# Display an indication that we're still alive and well by
# cycling the progress bar.
self._progress_alive()
def _abort_download(self):
if self._downloading:
self._download_lock.acquire()
self._download_abort_queue.append("abort")
self._download_lock.release()
class _DownloadThread(threading.Thread):
def __init__(self, data_server, items, lock, message_queue, abort):
self.data_server = data_server
self.items = items
self.lock = lock
self.message_queue = message_queue
self.abort = abort
threading.Thread.__init__(self)
def run(self):
for msg in self.data_server.incr_download(self.items):
self.lock.acquire()
self.message_queue.append(msg)
# Check if we've been told to kill ourselves:
if self.abort:
self.message_queue.append("aborted")
self.lock.release()
return
self.lock.release()
self.lock.acquire()
self.message_queue.append("finished")
self.lock.release()
_MONITOR_QUEUE_DELAY = 100
def _monitor_message_queue(self):
def show(s):
self._progresslabel["text"] = s
self._log(s)
# Try to acquire the lock; if it's busy, then just try again later.
if not self._download_lock.acquire():
return
for msg in self._download_msg_queue:
# Done downloading?
if msg == "finished" or msg == "aborted":
# self._fill_table(sort=False)
self._update_table_status()
self._downloading = False
self._download_button["text"] = "Download"
del self._download_msg_queue[:]
del self._download_abort_queue[:]
self._download_lock.release()
if msg == "aborted":
show("Download aborted!")
self._show_progress(None)
else:
afterid = self.top.after(100, self._show_progress, None)
self._afterid["_monitor_message_queue"] = afterid
return
# All other messages
elif isinstance(msg, ProgressMessage):
self._show_progress(msg.progress)
elif isinstance(msg, ErrorMessage):
show(msg.message)
if msg.package is not None:
self._select(msg.package.id)
self._show_progress(None)
self._downloading = False
return # halt progress.
elif isinstance(msg, StartCollectionMessage):
show("Downloading collection %r" % msg.collection.id)
self._log_indent += 1
elif isinstance(msg, StartPackageMessage):
self._ds.clear_status_cache(msg.package.id)
show("Downloading package %r" % msg.package.id)
elif isinstance(msg, UpToDateMessage):
show("Package %s is up-to-date!" % msg.package.id)
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt; updating it' %
# msg.package.id)
elif isinstance(msg, FinishDownloadMessage):
show("Finished downloading %r." % msg.package.id)
elif isinstance(msg, StartUnzipMessage):
show("Unzipping %s" % msg.package.filename)
elif isinstance(msg, FinishUnzipMessage):
show("Finished installing %s" % msg.package.id)
elif isinstance(msg, FinishCollectionMessage):
self._log_indent -= 1
show("Finished downloading collection %r." % msg.collection.id)
self._clear_mark(msg.collection.id)
elif isinstance(msg, FinishPackageMessage):
self._update_table_status()
self._clear_mark(msg.package.id)
# Let the user know when we're aborting a download (but
# waiting for a good point to abort it, so we don't end up
# with a partially unzipped package or anything like that).
if self._download_abort_queue:
self._progresslabel["text"] = "Aborting download..."
# Clear the message queue and then release the lock
del self._download_msg_queue[:]
self._download_lock.release()
# Check the queue again after MONITOR_QUEUE_DELAY msec.
afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue)
self._afterid["_monitor_message_queue"] = afterid
######################################################################
# Helper Functions
######################################################################
# [xx] It may make sense to move these to nltk.internals.
def md5_hexdigest(file):
"""
Calculate and return the MD5 checksum for a given file.
``file`` may either be a filename or an open stream.
"""
if isinstance(file, str):
with open(file, "rb") as infile:
return _md5_hexdigest(infile)
return _md5_hexdigest(file)
def _md5_hexdigest(fp):
md5_digest = md5()
while True:
block = fp.read(1024 * 16) # 16k blocks
if not block:
break
md5_digest.update(block)
return md5_digest.hexdigest()
# change this to periodically yield progress messages?
# [xx] get rid of topdir parameter -- we should be checking
# this when we build the index, anyway.
def unzip(filename, root, verbose=True):
"""
Extract the contents of the zip file ``filename`` into the
directory ``root``.
"""
for message in _unzip_iter(filename, root, verbose):
if isinstance(message, ErrorMessage):
raise Exception(message)
def _unzip_iter(filename, root, verbose=True):
if verbose:
sys.stdout.write("Unzipping %s" % os.path.split(filename)[1])
sys.stdout.flush()
try:
zf = zipfile.ZipFile(filename)
except zipfile.error as e:
yield ErrorMessage(filename, "Error with downloaded zip file")
return
except Exception as e:
yield ErrorMessage(filename, e)
return
zf.extractall(root)
if verbose:
print()
######################################################################
# Index Builder
######################################################################
# This may move to a different file sometime.
def build_index(root, base_url):
"""
Create a new data.xml index file, by combining the xml description
files for various packages and collections. ``root`` should be the
path to a directory containing the package xml and zip files; and
the collection xml files. The ``root`` directory is expected to
have the following subdirectories::
root/
packages/ .................. subdirectory for packages
corpora/ ................. zip & xml files for corpora
grammars/ ................ zip & xml files for grammars
taggers/ ................. zip & xml files for taggers
tokenizers/ .............. zip & xml files for tokenizers
etc.
collections/ ............... xml files for collections
For each package, there should be two files: ``package.zip``
(where *package* is the package name)
which contains the package itself as a compressed zip file; and
``package.xml``, which is an xml description of the package. The
zipfile ``package.zip`` should expand to a single subdirectory
named ``package/``. The base filename ``package`` must match
the identifier given in the package's xml file.
For each collection, there should be a single file ``collection.zip``
describing the collection, where *collection* is the name of the collection.
All identifiers (for both packages and collections) must be unique.
"""
# Find all packages.
packages = []
for pkg_xml, zf, subdir in _find_packages(os.path.join(root, "packages")):
zipstat = os.stat(zf.filename)
url = f"{base_url}/{subdir}/{os.path.split(zf.filename)[1]}"
unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
# Fill in several fields of the package xml with calculated values.
pkg_xml.set("unzipped_size", "%s" % unzipped_size)
pkg_xml.set("size", "%s" % zipstat.st_size)
pkg_xml.set("checksum", "%s" % md5_hexdigest(zf.filename))
pkg_xml.set("subdir", subdir)
# pkg_xml.set('svn_revision', _svn_revision(zf.filename))
if not pkg_xml.get("url"):
pkg_xml.set("url", url)
# Record the package.
packages.append(pkg_xml)
# Find all collections
collections = list(_find_collections(os.path.join(root, "collections")))
# Check that all UIDs are unique
uids = set()
for item in packages + collections:
if item.get("id") in uids:
raise ValueError("Duplicate UID: %s" % item.get("id"))
uids.add(item.get("id"))
# Put it all together
top_elt = ElementTree.Element("nltk_data")
top_elt.append(ElementTree.Element("packages"))
top_elt[0].extend(sorted(packages, key=lambda package: package.get("id")))
top_elt.append(ElementTree.Element("collections"))
top_elt[1].extend(sorted(collections, key=lambda collection: collection.get("id")))
_indent_xml(top_elt)
return top_elt
def _indent_xml(xml, prefix=""):
"""
Helper for ``build_index()``: Given an XML ``ElementTree``, modify it
(and its descendents) ``text`` and ``tail`` attributes to generate
an indented tree, where each nested element is indented by 2
spaces with respect to its parent.
"""
if len(xml) > 0:
xml.text = (xml.text or "").strip() + "\n" + prefix + " "
for child in xml:
_indent_xml(child, prefix + " ")
for child in xml[:-1]:
child.tail = (child.tail or "").strip() + "\n" + prefix + " "
xml[-1].tail = (xml[-1].tail or "").strip() + "\n" + prefix
def _check_package(pkg_xml, zipfilename, zf):
"""
Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent.
"""
# The filename must patch the id given in the XML file.
uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
if pkg_xml.get("id") != uid:
raise ValueError(
"package identifier mismatch ({} vs {})".format(pkg_xml.get("id"), uid)
)
# Zip file must expand to a subdir whose name matches uid.
if sum((name != uid and not name.startswith(uid + "/")) for name in zf.namelist()):
raise ValueError(
"Zipfile %s.zip does not expand to a single "
"subdirectory %s/" % (uid, uid)
)
# update for git?
def _svn_revision(filename):
"""
Helper for ``build_index()``: Calculate the subversion revision
number for a given file (by using ``subprocess`` to run ``svn``).
"""
p = subprocess.Popen(
["svn", "status", "-v", filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(stdout, stderr) = p.communicate()
if p.returncode != 0 or stderr or not stdout:
raise ValueError(
"Error determining svn_revision for %s: %s"
% (os.path.split(filename)[1], textwrap.fill(stderr))
)
return stdout.split()[2]
def _find_collections(root):
"""
Helper for ``build_index()``: Yield a list of ElementTree.Element
objects, each holding the xml for a single package collection.
"""
for dirname, _subdirs, files in os.walk(root):
for filename in files:
if filename.endswith(".xml"):
xmlfile = os.path.join(dirname, filename)
yield ElementTree.parse(xmlfile).getroot()
def _find_packages(root):
"""
Helper for ``build_index()``: Yield a list of tuples
``(pkg_xml, zf, subdir)``, where:
- ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a
package
- ``zf`` is a ``zipfile.ZipFile`` for the package's contents.
- ``subdir`` is the subdirectory (relative to ``root``) where
the package was found (e.g. 'corpora' or 'grammars').
"""
from nltk.corpus.reader.util import _path_from
# Find all packages.
packages = []
for dirname, subdirs, files in os.walk(root):
relpath = "/".join(_path_from(root, dirname))
for filename in files:
if filename.endswith(".xml"):
xmlfilename = os.path.join(dirname, filename)
zipfilename = xmlfilename[:-4] + ".zip"
try:
zf = zipfile.ZipFile(zipfilename)
except Exception as e:
raise ValueError(f"Error reading file {zipfilename!r}!\n{e}") from e
try:
pkg_xml = ElementTree.parse(xmlfilename).getroot()
except Exception as e:
raise ValueError(f"Error reading file {xmlfilename!r}!\n{e}") from e
# Check that the UID matches the filename
uid = os.path.split(xmlfilename[:-4])[1]
if pkg_xml.get("id") != uid:
raise ValueError(
"package identifier mismatch (%s "
"vs %s)" % (pkg_xml.get("id"), uid)
)
# Check that the zipfile expands to a subdir whose
# name matches the uid.
if sum(
(name != uid and not name.startswith(uid + "/"))
for name in zf.namelist()
):
raise ValueError(
"Zipfile %s.zip does not expand to a "
"single subdirectory %s/" % (uid, uid)
)
yield pkg_xml, zf, relpath
elif filename.endswith(".zip"):
# Warn user in case a .xml does not exist for a .zip
resourcename = os.path.splitext(filename)[0]
xmlfilename = os.path.join(dirname, resourcename + ".xml")
if not os.path.exists(xmlfilename):
warnings.warn(
f"{filename} exists, but {resourcename + '.xml'} cannot be found! "
f"This could mean that {resourcename} can not be downloaded.",
stacklevel=2,
)
# Don't recurse into svn subdirectories:
try:
subdirs.remove(".svn")
except ValueError:
pass
######################################################################
# Main:
######################################################################
# There should be a command-line interface
# Aliases
_downloader = Downloader()
download = _downloader.download
def download_shell():
DownloaderShell(_downloader).run()
def download_gui():
DownloaderGUI(_downloader).mainloop()
def update():
_downloader.update()
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-d",
"--dir",
dest="dir",
help="download package to directory DIR",
metavar="DIR",
)
parser.add_option(
"-q",
"--quiet",
dest="quiet",
action="store_true",
default=False,
help="work quietly",
)
parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="download even if already installed",
)
parser.add_option(
"-e",
"--exit-on-error",
dest="halt_on_error",
action="store_true",
default=False,
help="exit if an error occurs",
)
parser.add_option(
"-u",
"--url",
dest="server_index_url",
default=os.environ.get("NLTK_DOWNLOAD_URL"),
help="download server index url",
)
(options, args) = parser.parse_args()
downloader = Downloader(server_index_url=options.server_index_url)
if args:
for pkg_id in args:
rv = downloader.download(
info_or_id=pkg_id,
download_dir=options.dir,
quiet=options.quiet,
force=options.force,
halt_on_error=options.halt_on_error,
)
if rv == False and options.halt_on_error:
break
else:
downloader.download(
download_dir=options.dir,
quiet=options.quiet,
force=options.force,
halt_on_error=options.halt_on_error,
)
| {
"repo_id": "binary-husky/gpt_academic",
"file_path": "shared_utils/nltk_downloader.py",
"license": "GNU General Public License v3.0",
"lines": 2169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
black-forest-labs/flux:src/flux/cli_kontext.py | import os
import re
import time
from dataclasses import dataclass
from glob import iglob
import torch
from fire import Fire
from flux.content_filters import PixtralContentFilter
from flux.sampling import denoise, get_schedule, prepare_kontext, unpack
from flux.util import (
aspect_ratio_to_height_width,
check_onnx_access_for_trt,
load_ae,
load_clip,
load_flow_model,
load_t5,
save_image,
)
@dataclass
class SamplingOptions:
prompt: str
width: int | None
height: int | None
num_steps: int
guidance: float
seed: int | None
img_cond_path: str
def parse_prompt(options: SamplingOptions) -> SamplingOptions | None:
user_question = "Next prompt (write /h for help, /q to quit and leave empty to repeat):\n"
usage = (
"Usage: Either write your prompt directly, leave this field empty "
"to repeat the prompt or write a command starting with a slash:\n"
"- '/ar <width>:<height>' will set the aspect ratio of the generated image\n"
"- '/s <seed>' sets the next seed\n"
"- '/g <guidance>' sets the guidance (flux-dev only)\n"
"- '/n <steps>' sets the number of steps\n"
"- '/q' to quit"
)
while (prompt := input(user_question)).startswith("/"):
if prompt.startswith("/ar"):
if prompt.count(" ") != 1:
print(f"Got invalid command '{prompt}'\n{usage}")
continue
_, ratio_prompt = prompt.split()
if ratio_prompt == "auto":
options.width = None
options.height = None
print("Setting resolution to input image resolution.")
else:
options.width, options.height = aspect_ratio_to_height_width(ratio_prompt)
print(f"Setting resolution to {options.width} x {options.height}.")
elif prompt.startswith("/h"):
if prompt.count(" ") != 1:
print(f"Got invalid command '{prompt}'\n{usage}")
continue
_, height = prompt.split()
if height == "auto":
options.height = None
else:
options.height = 16 * (int(height) // 16)
if options.height is not None and options.width is not None:
print(
f"Setting resolution to {options.width} x {options.height} "
f"({options.height * options.width / 1e6:.2f}MP)"
)
else:
print(f"Setting resolution to {options.width} x {options.height}.")
elif prompt.startswith("/g"):
if prompt.count(" ") != 1:
print(f"Got invalid command '{prompt}'\n{usage}")
continue
_, guidance = prompt.split()
options.guidance = float(guidance)
print(f"Setting guidance to {options.guidance}")
elif prompt.startswith("/s"):
if prompt.count(" ") != 1:
print(f"Got invalid command '{prompt}'\n{usage}")
continue
_, seed = prompt.split()
options.seed = int(seed)
print(f"Setting seed to {options.seed}")
elif prompt.startswith("/n"):
if prompt.count(" ") != 1:
print(f"Got invalid command '{prompt}'\n{usage}")
continue
_, steps = prompt.split()
options.num_steps = int(steps)
print(f"Setting number of steps to {options.num_steps}")
elif prompt.startswith("/q"):
print("Quitting")
return None
else:
if not prompt.startswith("/h"):
print(f"Got invalid command '{prompt}'\n{usage}")
print(usage)
if prompt != "":
options.prompt = prompt
return options
def parse_img_cond_path(options: SamplingOptions | None) -> SamplingOptions | None:
if options is None:
return None
user_question = "Next input image (write /h for help, /q to quit and leave empty to repeat):\n"
usage = (
"Usage: Either write a path to an image directly, leave this field empty "
"to repeat the last input image or write a command starting with a slash:\n"
"- '/q' to quit\n\n"
"The input image will be edited by FLUX.1 Kontext creating a new image based"
"on your instruction prompt."
)
while True:
img_cond_path = input(user_question)
if img_cond_path.startswith("/"):
if img_cond_path.startswith("/q"):
print("Quitting")
return None
else:
if not img_cond_path.startswith("/h"):
print(f"Got invalid command '{img_cond_path}'\n{usage}")
print(usage)
continue
if img_cond_path == "":
break
if not os.path.isfile(img_cond_path) or not img_cond_path.lower().endswith(
(".jpg", ".jpeg", ".png", ".webp")
):
print(f"File '{img_cond_path}' does not exist or is not a valid image file")
continue
options.img_cond_path = img_cond_path
break
return options
@torch.inference_mode()
def main(
name: str = "flux-dev-kontext",
aspect_ratio: str | None = None,
seed: int | None = None,
prompt: str = "replace the logo with the text 'Black Forest Labs'",
device: str = "cuda" if torch.cuda.is_available() else "cpu",
num_steps: int = 30,
loop: bool = False,
guidance: float = 2.5,
offload: bool = False,
output_dir: str = "output",
add_sampling_metadata: bool = True,
img_cond_path: str = "assets/cup.png",
trt: bool = False,
trt_transformer_precision: str = "bf16",
track_usage: bool = False,
):
"""
Sample the flux model. Either interactively (set `--loop`) or run for a
single image.
Args:
height: height of the sample in pixels (should be a multiple of 16), None
defaults to the size of the conditioning
width: width of the sample in pixels (should be a multiple of 16), None
defaults to the size of the conditioning
seed: Set a seed for sampling
output_name: where to save the output image, `{idx}` will be replaced
by the index of the sample
prompt: Prompt used for sampling
device: Pytorch device
num_steps: number of sampling steps (default 4 for schnell, 50 for guidance distilled)
loop: start an interactive session and sample multiple times
guidance: guidance value used for guidance distillation
add_sampling_metadata: Add the prompt to the image Exif metadata
img_cond_path: path to conditioning image (jpeg/png/webp)
trt: use TensorRT backend for optimized inference
track_usage: track usage of the model for licensing purposes
"""
assert name == "flux-dev-kontext", f"Got unknown model name: {name}"
torch_device = torch.device(device)
output_name = os.path.join(output_dir, "img_{idx}.jpg")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
idx = 0
else:
fns = [fn for fn in iglob(output_name.format(idx="*")) if re.search(r"img_[0-9]+\.jpg$", fn)]
if len(fns) > 0:
idx = max(int(fn.split("_")[-1].split(".")[0]) for fn in fns) + 1
else:
idx = 0
if aspect_ratio is None:
width = None
height = None
else:
width, height = aspect_ratio_to_height_width(aspect_ratio)
if not trt:
t5 = load_t5(torch_device, max_length=512)
clip = load_clip(torch_device)
model = load_flow_model(name, device="cpu" if offload else torch_device)
else:
# lazy import to make install optional
from flux.trt.trt_manager import ModuleName, TRTManager
# Check if we need ONNX model access (which requires authentication for FLUX models)
onnx_dir = check_onnx_access_for_trt(name, trt_transformer_precision)
trt_ctx_manager = TRTManager(
trt_transformer_precision=trt_transformer_precision,
trt_t5_precision=os.environ.get("TRT_T5_PRECISION", "bf16"),
)
engines = trt_ctx_manager.load_engines(
model_name=name,
module_names={
ModuleName.CLIP,
ModuleName.TRANSFORMER,
ModuleName.T5,
},
engine_dir=os.environ.get("TRT_ENGINE_DIR", "./engines"),
custom_onnx_paths=onnx_dir or os.environ.get("CUSTOM_ONNX_PATHS", ""),
trt_image_height=height,
trt_image_width=width,
trt_batch_size=1,
trt_timing_cache=os.getenv("TRT_TIMING_CACHE_FILE", None),
trt_static_batch=False,
trt_static_shape=False,
)
model = engines[ModuleName.TRANSFORMER].to(device="cpu" if offload else torch_device)
clip = engines[ModuleName.CLIP].to(torch_device)
t5 = engines[ModuleName.T5].to(device="cpu" if offload else torch_device)
ae = load_ae(name, device="cpu" if offload else torch_device)
content_filter = PixtralContentFilter(torch.device("cpu"))
rng = torch.Generator(device="cpu")
opts = SamplingOptions(
prompt=prompt,
width=width,
height=height,
num_steps=num_steps,
guidance=guidance,
seed=seed,
img_cond_path=img_cond_path,
)
if loop:
opts = parse_prompt(opts)
opts = parse_img_cond_path(opts)
while opts is not None:
if opts.seed is None:
opts.seed = rng.seed()
print(f"Generating with seed {opts.seed}:\n{opts.prompt}")
t0 = time.perf_counter()
if content_filter.test_txt(opts.prompt):
print("Your prompt has been automatically flagged. Please choose another prompt.")
if loop:
print("-" * 80)
opts = parse_prompt(opts)
opts = parse_img_cond_path(opts)
else:
opts = None
continue
if content_filter.test_image(opts.img_cond_path):
print("Your input image has been automatically flagged. Please choose another image.")
if loop:
print("-" * 80)
opts = parse_prompt(opts)
opts = parse_img_cond_path(opts)
else:
opts = None
continue
if offload:
t5, clip, ae = t5.to(torch_device), clip.to(torch_device), ae.to(torch_device)
inp, height, width = prepare_kontext(
t5=t5,
clip=clip,
prompt=opts.prompt,
ae=ae,
img_cond_path=opts.img_cond_path,
target_width=opts.width,
target_height=opts.height,
bs=1,
seed=opts.seed,
device=torch_device,
)
from safetensors.torch import save_file
save_file({k: v.cpu().contiguous() for k, v in inp.items()}, "output/noise.sft")
inp.pop("img_cond_orig")
opts.seed = None
timesteps = get_schedule(opts.num_steps, inp["img"].shape[1], shift=(name != "flux-schnell"))
# offload TEs and AE to CPU, load model to gpu
if offload:
t5, clip, ae = t5.cpu(), clip.cpu(), ae.cpu()
torch.cuda.empty_cache()
model = model.to(torch_device)
# denoise initial noise
t00 = time.time()
x = denoise(model, **inp, timesteps=timesteps, guidance=opts.guidance)
torch.cuda.synchronize()
t01 = time.time()
print(f"Denoising took {t01 - t00:.3f}s")
# offload model, load autoencoder to gpu
if offload:
model.cpu()
torch.cuda.empty_cache()
ae.decoder.to(x.device)
# decode latents to pixel space
x = unpack(x.float(), height, width)
with torch.autocast(device_type=torch_device.type, dtype=torch.bfloat16):
ae_dev_t0 = time.perf_counter()
x = ae.decode(x)
torch.cuda.synchronize()
ae_dev_t1 = time.perf_counter()
print(f"AE decode took {ae_dev_t1 - ae_dev_t0:.3f}s")
if content_filter.test_image(x.cpu()):
print(
"Your output image has been automatically flagged. Choose another prompt/image or try again."
)
if loop:
print("-" * 80)
opts = parse_prompt(opts)
opts = parse_img_cond_path(opts)
else:
opts = None
continue
if torch.cuda.is_available():
torch.cuda.synchronize()
t1 = time.perf_counter()
print(f"Done in {t1 - t0:.1f}s")
idx = save_image(
None, name, output_name, idx, x, add_sampling_metadata, prompt, track_usage=track_usage
)
if loop:
print("-" * 80)
opts = parse_prompt(opts)
opts = parse_img_cond_path(opts)
else:
opts = None
if __name__ == "__main__":
Fire(main)
| {
"repo_id": "black-forest-labs/flux",
"file_path": "src/flux/cli_kontext.py",
"license": "Apache License 2.0",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
black-forest-labs/flux:src/flux/content_filters.py | import torch
from einops import rearrange
from PIL import Image
from transformers import AutoProcessor, LlavaForConditionalGeneration, pipeline
PROMPT_IMAGE_INTEGRITY = """
Task: Analyze an image to identify potential copyright concerns or depictions of public figures.
Output: Respond with only "yes" or "no"
Criteria for "yes":
- The image contains a recognizable character from copyrighted media (movies, TV, comics, games, etc.)
- The image displays a trademarked logo or brand
- The image depicts a recognizable public figure (celebrities, politicians, athletes, influencers, historical figures, etc.)
Criteria for "no":
- All other cases
- When you cannot identify the specific copyrighted work or named individual
Critical Requirements:
1. You must be able to name the exact copyrighted work or specific person depicted
2. General references to demographics or characteristics are not sufficient
3. Base your decision solely on visual content, not interpretation
4. Provide only the one-word answer: "yes" or "no"
""".strip()
PROMPT_IMAGE_INTEGRITY_FOLLOW_UP = "Does this image have copyright concerns or includes public figures?"
PROMPT_TEXT_INTEGRITY = """
Task: Analyze a text prompt to identify potential copyright concerns or requests to depict living public figures.
Output: Respond with only "yes" or "no"
Criteria for "Yes":
- The prompt explicitly names a character from copyrighted media (movies, TV, comics, games, etc.)
- The prompt explicitly mentions a trademarked logo or brand
- The prompt names or describes a specific living public figure (celebrities, politicians, athletes, influencers, etc.)
Criteria for "No":
- All other cases
- When you cannot identify the specific copyrighted work or named individual
Critical Requirements:
1. You must be able to name the exact copyrighted work or specific person referenced
2. General demographic descriptions or characteristics are not sufficient
3. Analyze only the prompt text, not potential image outcomes
4. Provide only the one-word answer: "yes" or "no"
The prompt to check is:
-----
{prompt}
-----
Does this prompt have copyright concerns or includes public figures?
""".strip()
class PixtralContentFilter(torch.nn.Module):
def __init__(
self,
device: torch.device = torch.device("cpu"),
nsfw_threshold: float = 0.85,
):
super().__init__()
model_id = "mistral-community/pixtral-12b"
self.processor = AutoProcessor.from_pretrained(model_id)
self.model = LlavaForConditionalGeneration.from_pretrained(model_id, device_map=device)
self.yes_token, self.no_token = self.processor.tokenizer.encode(["yes", "no"])
self.nsfw_classifier = pipeline(
"image-classification", model="Falconsai/nsfw_image_detection", device=device
)
self.nsfw_threshold = nsfw_threshold
def yes_no_logit_processor(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor
) -> torch.FloatTensor:
"""
Sets all tokens but yes/no to the minimum.
"""
scores_yes_token = scores[:, self.yes_token].clone()
scores_no_token = scores[:, self.no_token].clone()
scores_min = scores.min()
scores[:, :] = scores_min - 1
scores[:, self.yes_token] = scores_yes_token
scores[:, self.no_token] = scores_no_token
return scores
def test_image(self, image: Image.Image | str | torch.Tensor) -> bool:
if isinstance(image, torch.Tensor):
image = rearrange(image[0].clamp(-1.0, 1.0), "c h w -> h w c")
image = Image.fromarray((127.5 * (image + 1.0)).cpu().byte().numpy())
elif isinstance(image, str):
image = Image.open(image)
classification = next(c for c in self.nsfw_classifier(image) if c["label"] == "nsfw")
if classification["score"] > self.nsfw_threshold:
return True
# 512^2 pixels are enough for checking
w, h = image.size
f = (512**2 / (w * h)) ** 0.5
image = image.resize((int(f * w), int(f * h)))
chat = [
{
"role": "user",
"content": [
{
"type": "text",
"content": PROMPT_IMAGE_INTEGRITY,
},
{
"type": "image",
"image": image,
},
{
"type": "text",
"content": PROMPT_IMAGE_INTEGRITY_FOLLOW_UP,
},
],
}
]
inputs = self.processor.apply_chat_template(
chat,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(self.model.device)
generate_ids = self.model.generate(
**inputs,
max_new_tokens=1,
logits_processor=[self.yes_no_logit_processor],
do_sample=False,
)
return generate_ids[0, -1].item() == self.yes_token
def test_txt(self, txt: str) -> bool:
chat = [
{
"role": "user",
"content": [
{
"type": "text",
"content": PROMPT_TEXT_INTEGRITY.format(prompt=txt),
},
],
}
]
inputs = self.processor.apply_chat_template(
chat,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(self.model.device)
generate_ids = self.model.generate(
**inputs,
max_new_tokens=1,
logits_processor=[self.yes_no_logit_processor],
do_sample=False,
)
return generate_ids[0, -1].item() == self.yes_token
| {
"repo_id": "black-forest-labs/flux",
"file_path": "src/flux/content_filters.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
black-forest-labs/flux:src/flux/trt/trt_config/base_trt_config.py | #
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from abc import abstractmethod
from collections import defaultdict
from dataclasses import dataclass, field
from enum import Enum
from typing import Any
from colored import fore, style
from huggingface_hub import snapshot_download
from tensorrt import __version__ as trt_version
class ModuleName(Enum):
CLIP = "clip"
T5 = "t5"
TRANSFORMER = "transformer"
VAE = "vae"
VAE_ENCODER = "vae_encoder"
registry = {}
@dataclass
class TRTBaseConfig:
engine_dir: str
precision: str
trt_verbose: bool
trt_static_batch: bool
trt_static_shape: bool
model_name: str
module_name: ModuleName
onnx_path: str = field(init=False)
engine_path: str = field(init=False)
trt_tf32: bool
trt_bf16: bool
trt_fp8: bool
trt_fp4: bool
trt_build_strongly_typed: bool
custom_onnx_path: str | None = None
trt_update_output_names: list[str] | None = None
trt_enable_all_tactics: bool = False
trt_timing_cache: str | None = None
trt_native_instancenorm: bool = True
trt_builder_optimization_level: int = 3
trt_precision_constraints: str = "none"
min_batch: int = 1
max_batch: int = 4
@staticmethod
def build_trt_engine(
engine_path: str,
onnx_path: str,
strongly_typed=False,
tf32=True,
bf16=False,
fp8=False,
fp4=False,
input_profile: dict[str, Any] | None = None,
update_output_names: list[str] | None = None,
enable_refit=False,
enable_all_tactics=False,
timing_cache: str | None = None,
native_instancenorm=True,
builder_optimization_level=3,
precision_constraints="none",
verbose=False,
):
"""
Metod used to build a TRT engine from a given set of flags or configurations using polygraphy.
Args:
engine_path (str): Output path used to store the build engine.
onnx_path (str): Path containing an onnx model used to generated the engine.
strongly_typed (bool): Flag indicating if the engine should be strongly typed.
tf32 (bool): Whether to build the engine with TF32 precision enabled.
bf16 (bool): Whether to build the engine with BF16 precision enabled.
fp8 (bool): Whether to build the engine with FP8 precision enabled. Refer to plain dataype and do not interfer with quantization introduced by modelopt.
fp4 (bool): Whether to build the engine with FP4 precision enabled. Refer to plain dataype and do not interfer with quantization introduced by modelopt.
input_profile (dict[str, Any]): A set of optimization profiles to add to the configuration. Only needed for networks with dynamic input shapes.
update_output_names (list[str]): List of output names to use in the trt engines.
enable_refit (bool): Enables the engine to be refitted with new weights after it is built.
enable_all_tactics (bool): Enables TRT to leverage all tactics or not.
timing_cache (str): A path or file-like object from which to load a tactic timing cache.
native_instancenorm (bool): support of instancenorm plugin.
builder_optimization_level (int): The builder optimization level.
precision_constraints (str): If set to "obey", require that layers execute in specified precisions. If set to "prefer", prefer that layers execute in specified precisions but allow TRT to fall back to other precisions if no implementation exists for the requested precision. Otherwise, precision constraints are ignored.
verbose (bool): Weather to support verbose output
Returns:
dict[str, Any]: A dictionary representing the input profile configuration.
"""
print(f"Building TensorRT engine for {onnx_path}: {engine_path}")
# Base command
build_command = [f"polygraphy convert {onnx_path} --convert-to trt --output {engine_path}"]
# Precision flags
build_args = [
"--bf16" if bf16 else "",
"--tf32" if tf32 else "",
"--fp8" if fp8 else "",
"--fp4" if fp4 else "",
"--strongly-typed" if strongly_typed else "",
]
# Additional arguments
build_args.extend(
[
"--refittable" if enable_refit else "",
"--tactic-sources" if not enable_all_tactics else "",
"--onnx-flags native_instancenorm" if native_instancenorm else "",
f"--builder-optimization-level {builder_optimization_level}",
f"--precision-constraints {precision_constraints}",
]
)
# Timing cache
if timing_cache:
build_args.extend([f"--load-timing-cache {timing_cache}", f"--save-timing-cache {timing_cache}"])
# Verbosity setting
verbosity = "extra_verbose" if verbose else "error"
build_args.append(f"--verbosity {verbosity}")
# Output names
if update_output_names:
print(f"Updating network outputs to {update_output_names}")
build_args.append(f"--trt-outputs {' '.join(update_output_names)}")
# Input profiles
if input_profile:
profile_args = defaultdict(str)
for name, dims in input_profile.items():
assert len(dims) == 3
profile_args["--trt-min-shapes"] += f"{name}:{str(list(dims[0])).replace(' ', '')} "
profile_args["--trt-opt-shapes"] += f"{name}:{str(list(dims[1])).replace(' ', '')} "
profile_args["--trt-max-shapes"] += f"{name}:{str(list(dims[2])).replace(' ', '')} "
build_args.extend(f"{k} {v}" for k, v in profile_args.items())
# Filter out empty strings and join command
build_args = [arg for arg in build_args if arg]
final_command = " \\\n".join(build_command + build_args)
# Execute command with improved error handling
try:
print(f"Engine build command:{fore('yellow')}\n{final_command}\n{style('reset')}")
subprocess.run(final_command, check=True, shell=True)
except subprocess.CalledProcessError as exc:
error_msg = f"Failed to build TensorRT engine. Error details:\nCommand: {exc.cmd}\n"
raise RuntimeError(error_msg) from exc
@classmethod
@abstractmethod
def from_args(cls, model_name: str, *args, **kwargs) -> Any:
raise NotImplementedError("Factory method is missing")
@abstractmethod
def get_input_profile(
self,
batch_size: int,
image_height: int | None,
image_width: int | None,
) -> dict[str, Any]:
"""
Generate max and min shape that each input of a TRT engine can have.
Subclasses must implement this method to return a dictionary that defines
the input profile based on the provided parameters. The input profile typically
includes details such as the expected shape of input tensors, whether the batch size
or image dimensions are fixed, and any additional configuration required by the
data processing or model inference pipeline.
Args:
batch_size (int): The number of images per batch.
image_height (int): Default height of each image in pixels.
image_width (int): Defailt width of each image in pixels.
static_batch (bool): Flag indicating if the batch size is fixed (static).
static_shape (bool): Flag indicating if the image dimensions are fixed (static).
Returns:
dict[str, Any]: A dictionary representing the input profile configuration.
Raises:
NotImplementedError: If the subclass does not override this abstract method.
"""
pass
@abstractmethod
def check_dims(self, *args, **kwargs) -> None | tuple[int, int] | int:
"""helper function that check the dimentions associated to each input of a TRT engine"""
pass
def _check_batch(self, batch_size):
assert (
self.min_batch <= batch_size <= self.max_batch
), f"Batch size {batch_size} must be between {self.min_batch} and {self.max_batch}"
def __post_init__(self):
self.onnx_path = self._get_onnx_path()
self.engine_path = self._get_engine_path()
assert os.path.isfile(self.onnx_path), "onnx_path do not exists: {}".format(self.onnx_path)
def _get_onnx_path(self) -> str:
if self.custom_onnx_path:
return self.custom_onnx_path
repo_id = self._get_repo_id(self.model_name)
snapshot_path = snapshot_download(repo_id, allow_patterns=[f"{self.module_name.value}.opt/*"])
onnx_model_path = os.path.join(snapshot_path, f"{self.module_name.value}.opt/model.onnx")
return onnx_model_path
def _get_engine_path(self) -> str:
return os.path.join(
self.engine_dir,
self.model_name,
f"{self.module_name.value}_{self.precision}.trt_{trt_version}.plan",
)
@staticmethod
def _get_repo_id(model_name: str) -> str:
if model_name == "flux-dev":
return "black-forest-labs/FLUX.1-dev-onnx"
elif model_name == "flux-schnell":
return "black-forest-labs/FLUX.1-schnell-onnx"
elif model_name == "flux-dev-canny":
return "black-forest-labs/FLUX.1-Canny-dev-onnx"
elif model_name == "flux-dev-depth":
return "black-forest-labs/FLUX.1-Depth-dev-onnx"
elif model_name == "flux-dev-kontext":
return "black-forest-labs/FLUX.1-Kontext-dev-onnx"
else:
raise ValueError(f"Unknown model name: {model_name}")
def register_config(module_name: ModuleName, precision: str):
"""Decorator to register a configuration class with specific flag conditions."""
def decorator(cls):
key = f"module={module_name.value}_dtype={precision}"
registry[key] = cls
return cls
return decorator
def get_config(module_name: ModuleName, precision: str) -> TRTBaseConfig:
"""Retrieve the appropriate configuration instance based on current flags."""
key = f"module={module_name.value}_dtype={precision}"
return registry[key]
| {
"repo_id": "black-forest-labs/flux",
"file_path": "src/flux/trt/trt_config/base_trt_config.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
black-forest-labs/flux:src/flux/trt/trt_config/clip_trt_config.py | #
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from flux.trt.trt_config.base_trt_config import ModuleName, TRTBaseConfig, register_config
from flux.util import configs
@register_config(module_name=ModuleName.CLIP, precision="bf16")
@dataclass
class ClipConfig(TRTBaseConfig):
text_maxlen: int | None = None
hidden_size: int | None = None
trt_tf32: bool = True
trt_bf16: bool = False
trt_fp8: bool = False
trt_fp4: bool = False
trt_build_strongly_typed: bool = True
@classmethod
def from_args(
cls,
model_name: str,
**kwargs,
):
return cls(
text_maxlen=77,
hidden_size=configs[model_name].params.vec_in_dim,
model_name=model_name,
module_name=ModuleName.CLIP,
**kwargs,
)
def check_dims(self, batch_size: int) -> None:
self._check_batch(batch_size)
def get_input_profile(
self,
batch_size: int,
image_height=None,
image_width=None,
):
min_batch = batch_size if self.trt_static_batch else self.min_batch
max_batch = batch_size if self.trt_static_batch else self.max_batch
self.check_dims(batch_size)
return {
"input_ids": [
(min_batch, self.text_maxlen),
(batch_size, self.text_maxlen),
(max_batch, self.text_maxlen),
]
}
| {
"repo_id": "black-forest-labs/flux",
"file_path": "src/flux/trt/trt_config/clip_trt_config.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
black-forest-labs/flux:src/flux/trt/trt_config/t5_trt_config.py | #
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass
from huggingface_hub import snapshot_download
from flux.trt.trt_config.base_trt_config import ModuleName, TRTBaseConfig, register_config
from flux.util import configs
@register_config(module_name=ModuleName.T5, precision="bf16")
@register_config(module_name=ModuleName.T5, precision="fp8")
@dataclass
class T5Config(TRTBaseConfig):
text_maxlen: int | None = None
hidden_size: int | None = None
trt_tf32: bool = True
trt_bf16: bool = False
trt_fp8: bool = False
trt_fp4: bool = False
trt_build_strongly_typed: bool = True
@classmethod
def from_args(
cls,
model_name: str,
**kwargs,
):
return cls(
text_maxlen=256 if model_name == "flux-schnell" else 512,
hidden_size=configs[model_name].params.context_in_dim,
model_name=model_name,
module_name=ModuleName.T5,
**kwargs,
)
def check_dims(self, batch_size: int) -> None:
self._check_batch(batch_size)
def get_input_profile(
self,
batch_size: int,
image_height=None,
image_width=None,
):
min_batch = batch_size if self.trt_static_batch else self.min_batch
max_batch = batch_size if self.trt_static_batch else self.max_batch
self.check_dims(batch_size)
return {
"input_ids": [
(min_batch, self.text_maxlen),
(batch_size, self.text_maxlen),
(max_batch, self.text_maxlen),
]
}
def _get_onnx_path(self) -> str:
if self.custom_onnx_path:
return self.custom_onnx_path
if self.precision == "fp8":
repo_id = self._get_repo_id(self.model_name)
snapshot_path = snapshot_download(repo_id, allow_patterns=["t5-fp8.opt/*"])
onnx_model_path = os.path.join(snapshot_path, "t5-fp8.opt/model.onnx")
return onnx_model_path
else:
return super()._get_onnx_path()
| {
"repo_id": "black-forest-labs/flux",
"file_path": "src/flux/trt/trt_config/t5_trt_config.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
black-forest-labs/flux:src/flux/trt/trt_config/transformer_trt_config.py | #
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from dataclasses import dataclass, field
from math import ceil
from huggingface_hub import snapshot_download
from flux.trt.trt_config.base_trt_config import ModuleName, TRTBaseConfig, register_config
from flux.util import PREFERED_KONTEXT_RESOLUTIONS, configs
@register_config(module_name=ModuleName.TRANSFORMER, precision="bf16")
@register_config(module_name=ModuleName.TRANSFORMER, precision="fp8")
@register_config(module_name=ModuleName.TRANSFORMER, precision="fp4")
@dataclass
class TransformerConfig(TRTBaseConfig):
guidance_embed: bool | None = None
vec_in_dim: int | None = None
context_in_dim: int | None = None
in_channels: int | None = None
out_channels: int | None = None
min_image_shape: int | None = None
max_image_shape: int | None = None
default_image_shape: int = 1024
compression_factor: int = 8
text_maxlen: int | None = None
min_latent_dim: int = field(init=False)
max_latent_dim: int = field(init=False)
min_context_latent_dim: int = field(init=False)
max_context_latent_dim: int = field(init=False)
trt_tf32: bool = True
trt_bf16: bool = False
trt_fp8: bool = False
trt_fp4: bool = False
trt_build_strongly_typed: bool = True
@classmethod
def from_args(
cls,
model_name,
**kwargs,
):
if model_name == "flux-dev-kontext" and kwargs["trt_static_shape"]:
warnings.warn("Flux-dev-Kontext does not support static shapes for the encoder.")
kwargs["trt_static_shape"] = False
if model_name == "flux-dev-kontext":
min_image_shape = 1008
max_image_shape = 1040
else:
min_image_shape = 768
max_image_shape = 1360
return cls(
model_name=model_name,
module_name=ModuleName.TRANSFORMER,
guidance_embed=configs[model_name].params.guidance_embed,
vec_in_dim=configs[model_name].params.vec_in_dim,
context_in_dim=configs[model_name].params.context_in_dim,
in_channels=configs[model_name].params.in_channels,
out_channels=configs[model_name].params.out_channels,
text_maxlen=256 if model_name == "flux-schnell" else 512,
min_image_shape=min_image_shape,
max_image_shape=max_image_shape,
**kwargs,
)
def _get_onnx_path(self) -> str:
if self.custom_onnx_path:
return self.custom_onnx_path
repo_id = self._get_repo_id(self.model_name)
typed_model_path = os.path.join(f"{self.module_name.value}.opt", self.precision)
snapshot_path = snapshot_download(repo_id, allow_patterns=[f"{typed_model_path}/*"])
onnx_model_path = os.path.join(snapshot_path, typed_model_path, "model.onnx")
return onnx_model_path
@staticmethod
def _get_latent(image_dim: int, compression_factor: int) -> int:
return ceil(image_dim / (2 * compression_factor))
@staticmethod
def _get_context_dim(
image_height: int,
image_width: int,
compression_factor: int,
) -> int:
seq_len = TransformerConfig._get_latent(
image_dim=image_height,
compression_factor=compression_factor,
) * TransformerConfig._get_latent(
image_dim=image_width,
compression_factor=compression_factor,
)
return seq_len
def __post_init__(self):
min_latent_dim = TransformerConfig._get_context_dim(
image_height=self.min_image_shape,
image_width=self.min_image_shape,
compression_factor=self.compression_factor,
)
max_latent_dim = TransformerConfig._get_context_dim(
image_height=self.max_image_shape,
image_width=self.max_image_shape,
compression_factor=self.compression_factor,
)
if self.model_name == "flux-dev-kontext":
# get min context size
_, min_context_height, min_context_width = min(
(w * h, w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS
)
self.min_context_latent_dim = TransformerConfig._get_context_dim(
image_height=min_context_height,
image_width=min_context_width,
compression_factor=self.compression_factor,
)
# get max context size
_, max_context_height, max_context_width = max(
(w * h, w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS
)
self.max_context_latent_dim = TransformerConfig._get_context_dim(
image_height=max_context_height,
image_width=max_context_width,
compression_factor=self.compression_factor,
)
else:
self.min_context_latent_dim = 0
self.max_context_latent_dim = 0
self.min_latent_dim = min_latent_dim + self.min_context_latent_dim
self.max_latent_dim = max_latent_dim + self.max_context_latent_dim
super().__post_init__()
def get_minmax_dims(
self,
batch_size: int,
image_height: int,
image_width: int,
):
min_batch = batch_size if self.trt_static_batch else self.min_batch
max_batch = batch_size if self.trt_static_batch else self.max_batch
# if a model has context: it is always dynamic. target image can be static
# or dynamic for every-model
min_latent_dim = (
self._get_context_dim(
image_height=image_height,
image_width=image_width,
compression_factor=self.compression_factor,
)
+ self.min_context_latent_dim
)
max_latent_dim = (
self._get_context_dim(
image_height=image_height,
image_width=image_width,
compression_factor=self.compression_factor,
)
+ self.max_context_latent_dim
)
# static-shape affects only the target image size
min_latent_dim = min_latent_dim if self.trt_static_shape else self.min_latent_dim
max_latent_dim = max_latent_dim if self.trt_static_shape else self.max_latent_dim
return (min_batch, max_batch, min_latent_dim, max_latent_dim)
def check_dims(
self,
batch_size: int,
image_height: int,
image_width: int,
) -> int:
self._check_batch(batch_size)
assert (
image_height % self.compression_factor == 0 or image_width % self.compression_factor == 0
), f"Image dimensions must be divisible by compression factor {self.compression_factor}"
latent_dim = self._get_context_dim(
image_height=image_height,
image_width=image_width,
compression_factor=self.compression_factor,
)
if self.model_name == "flux-dev-kontext":
# for context models, it is assumed that the optimal context image shape is the same
# as target image shape
latent_dim = 2 * latent_dim
assert self.min_latent_dim <= latent_dim <= self.max_latent_dim, "Image resolution out of boundaries."
return latent_dim
def get_input_profile(
self,
batch_size: int,
image_height: int | None,
image_width: int | None,
) -> dict[str, list[tuple]]:
if self.model_name == "flux-dev-kontext":
assert not self.trt_static_shape, "If Flux-dev-kontext then static_shape must be False."
else:
assert isinstance(image_height, int) and isinstance(
image_width, int
), "Only Flux-dev-kontext allows None image shape"
image_height = self.default_image_shape if image_height is None else image_height
image_width = self.default_image_shape if image_width is None else image_width
opt_latent_dim = self.check_dims(
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
)
(
min_batch,
max_batch,
min_latent_dim,
max_latent_dim,
) = self.get_minmax_dims(
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
)
input_profile = {
"hidden_states": [
(min_batch, min_latent_dim, self.in_channels),
(batch_size, opt_latent_dim, self.in_channels),
(max_batch, max_latent_dim, self.in_channels),
],
"encoder_hidden_states": [
(min_batch, self.text_maxlen, self.context_in_dim),
(batch_size, self.text_maxlen, self.context_in_dim),
(max_batch, self.text_maxlen, self.context_in_dim),
],
"pooled_projections": [
(min_batch, self.vec_in_dim),
(batch_size, self.vec_in_dim),
(max_batch, self.vec_in_dim),
],
"img_ids": [
(min_latent_dim, 3),
(opt_latent_dim, 3),
(max_latent_dim, 3),
],
"txt_ids": [
(self.text_maxlen, 3),
(self.text_maxlen, 3),
(self.text_maxlen, 3),
],
"timestep": [(min_batch,), (batch_size,), (max_batch,)],
}
if self.guidance_embed:
input_profile["guidance"] = [(min_batch,), (batch_size,), (max_batch,)]
return input_profile
| {
"repo_id": "black-forest-labs/flux",
"file_path": "src/flux/trt/trt_config/transformer_trt_config.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
black-forest-labs/flux:src/flux/trt/trt_config/vae_trt_config.py | #
# SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass, field
from math import ceil
from flux.trt.trt_config.base_trt_config import ModuleName, TRTBaseConfig, register_config
from flux.util import configs
@dataclass
class VAEBaseConfig(TRTBaseConfig):
z_channels: int | None = None
scale_factor: float | None = None
shift_factor: float | None = None
default_image_shape: int = 1024
compression_factor: int = 8
min_image_shape: int | None = None
max_image_shape: int | None = None
min_latent_shape: int = field(init=False)
max_latent_shape: int = field(init=False)
def _get_latent_dim(self, image_dim: int) -> int:
return 2 * ceil(image_dim / (2 * self.compression_factor))
def __post_init__(self):
self.min_latent_shape = self._get_latent_dim(self.min_image_shape)
self.max_latent_shape = self._get_latent_dim(self.max_image_shape)
super().__post_init__()
def check_dims(
self,
batch_size: int,
image_height: int,
image_width: int,
) -> tuple[int, int]:
self._check_batch(batch_size)
assert (
image_height % self.compression_factor == 0 or image_width % self.compression_factor == 0
), f"Image dimensions must be divisible by compression factor {self.compression_factor}"
latent_height = self._get_latent_dim(image_height)
latent_width = self._get_latent_dim(image_width)
assert (
self.min_latent_shape <= latent_height <= self.max_latent_shape
), f"Latent height {latent_height} must be between {self.min_latent_shape} and {self.max_latent_shape}"
assert (
self.min_latent_shape <= latent_width <= self.max_latent_shape
), f"Latent width {latent_width} must be between {self.min_latent_shape} and {self.max_latent_shape}"
return latent_height, latent_width
@register_config(module_name=ModuleName.VAE, precision="bf16")
@dataclass
class VAEDecoderConfig(VAEBaseConfig):
trt_tf32: bool = True
trt_bf16: bool = True
trt_fp8: bool = False
trt_fp4: bool = False
trt_build_strongly_typed: bool = False
@classmethod
def from_args(
cls,
model_name: str,
**kwargs,
):
if model_name == "flux-dev-kontext":
min_image_shape = 672
max_image_shape = 1568
else:
min_image_shape = 768
max_image_shape = 1360
return cls(
model_name=model_name,
module_name=ModuleName.VAE,
z_channels=configs[model_name].ae_params.z_channels,
scale_factor=configs[model_name].ae_params.scale_factor,
shift_factor=configs[model_name].ae_params.shift_factor,
min_image_shape=min_image_shape,
max_image_shape=max_image_shape,
**kwargs,
)
def get_minmax_dims(
self,
batch_size: int,
image_height: int,
image_width: int,
):
min_batch = batch_size if self.trt_static_batch else self.min_batch
max_batch = batch_size if self.trt_static_batch else self.max_batch
latent_height = self._get_latent_dim(image_height)
latent_width = self._get_latent_dim(image_width)
min_latent_height = latent_height if self.trt_static_shape else self.min_latent_shape
max_latent_height = latent_height if self.trt_static_shape else self.max_latent_shape
min_latent_width = latent_width if self.trt_static_shape else self.min_latent_shape
max_latent_width = latent_width if self.trt_static_shape else self.max_latent_shape
return (
min_batch,
max_batch,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
)
def get_input_profile(
self,
batch_size: int,
image_height: int | None,
image_width: int | None,
):
assert self.model_name == "flux-dev-kontext" or (
image_height is not None and image_width is not None
), "Only Flux-dev-kontext allows None image shape"
assert not self.trt_static_shape or (
image_height is not None and image_width is not None
), "If static_shape is True, image_height and image_width must be not None"
image_height = self.default_image_shape if image_height is None else image_height
image_width = self.default_image_shape if image_width is None else image_width
latent_height, latent_width = self.check_dims(
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
)
(
min_batch,
max_batch,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
) = self.get_minmax_dims(
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
)
return {
"latent": [
(min_batch, self.z_channels, min_latent_height, min_latent_width),
(batch_size, self.z_channels, latent_height, latent_width),
(max_batch, self.z_channels, max_latent_height, max_latent_width),
]
}
@register_config(module_name=ModuleName.VAE_ENCODER, precision="bf16")
@dataclass
class VAEEncoderConfig(VAEBaseConfig):
trt_tf32: bool = True
trt_bf16: bool = True
trt_fp8: bool = False
trt_fp4: bool = False
trt_build_strongly_typed: bool = False
@classmethod
def from_args(cls, model_name: str, **kwargs):
if model_name == "flux-dev-kontext" and kwargs["trt_static_shape"]:
warnings.warn("Flux-dev-Kontext does not support static shapes for the encoder.")
kwargs["trt_static_shape"] = False
if model_name == "flux-dev-kontext":
min_image_shape = 672
max_image_shape = 1568
else:
min_image_shape = 768
max_image_shape = 1360
return cls(
model_name=model_name,
module_name=ModuleName.VAE_ENCODER,
z_channels=configs[model_name].ae_params.z_channels,
scale_factor=configs[model_name].ae_params.scale_factor,
shift_factor=configs[model_name].ae_params.shift_factor,
min_image_shape=min_image_shape,
max_image_shape=max_image_shape,
**kwargs,
)
def get_minmax_dims(
self,
batch_size: int,
image_height: int,
image_width: int,
):
min_batch = batch_size if self.trt_static_batch else self.min_batch
max_batch = batch_size if self.trt_static_batch else self.max_batch
min_image_height = image_height if self.trt_static_shape else self.min_image_shape
max_image_height = image_height if self.trt_static_shape else self.max_image_shape
min_image_width = image_width if self.trt_static_shape else self.min_image_shape
max_image_width = image_width if self.trt_static_shape else self.max_image_shape
return (
min_batch,
max_batch,
min_image_height,
max_image_height,
min_image_width,
max_image_width,
)
def get_input_profile(
self,
batch_size: int,
image_height: int | None,
image_width: int | None,
):
if self.model_name == "flux-dev-kontext":
assert (
not self.trt_static_shape
), "Flux-dev-kontext does not support dynamic shapes for the encoder."
else:
assert isinstance(image_height, int) and isinstance(
image_width, int
), "Only Flux-dev-kontext allows None image shape"
image_height = self.default_image_shape if image_height is None else image_height
image_width = self.default_image_shape if image_width is None else image_width
self.check_dims(
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
)
(
min_batch,
max_batch,
min_image_height,
max_image_height,
min_image_width,
max_image_width,
) = self.get_minmax_dims(
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
)
return {
"images": [
(min_batch, 3, min_image_height, min_image_width),
(batch_size, 3, image_height, image_width),
(max_batch, 3, max_image_height, max_image_width),
],
}
| {
"repo_id": "black-forest-labs/flux",
"file_path": "src/flux/trt/trt_config/vae_trt_config.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
browser-use/browser-use:browser_use/browser/watchdogs/captcha_watchdog.py | """Captcha solver watchdog — monitors captcha events from the browser proxy.
Listens for BrowserUse.captchaSolverStarted/Finished CDP events and exposes a
wait_if_captcha_solving() method that the agent step loop uses to block until
a captcha is resolved (with a configurable timeout).
NOTE: Only a single captcha solve is tracked at a time. If multiple captchas
overlap (e.g. rapid successive navigations), only the latest one is tracked and
earlier in-flight waits may return prematurely.
"""
import asyncio
from dataclasses import dataclass
from typing import Any, ClassVar, Literal
from bubus import BaseEvent
from cdp_use.cdp.browseruse.events import CaptchaSolverFinishedEvent as CDPCaptchaSolverFinishedEvent
from cdp_use.cdp.browseruse.events import CaptchaSolverStartedEvent as CDPCaptchaSolverStartedEvent
from pydantic import PrivateAttr
from browser_use.browser.events import (
BrowserConnectedEvent,
BrowserStoppedEvent,
CaptchaSolverFinishedEvent,
CaptchaSolverStartedEvent,
_get_timeout,
)
from browser_use.browser.watchdog_base import BaseWatchdog
CaptchaResultType = Literal['success', 'failed', 'timeout', 'unknown']
@dataclass
class CaptchaWaitResult:
"""Result returned by wait_if_captcha_solving() when the agent had to wait."""
waited: bool
vendor: str
url: str
duration_ms: int
result: CaptchaResultType
class CaptchaWatchdog(BaseWatchdog):
"""Monitors captcha solver events from the browser proxy.
When the proxy detects a CAPTCHA and starts solving it, a CDP event
``BrowserUse.captchaSolverStarted`` is sent over the WebSocket. This
watchdog catches that event and blocks the agent's step loop (via
``wait_if_captcha_solving``) until ``BrowserUse.captchaSolverFinished``
arrives or the configurable timeout expires.
"""
# Event contracts
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [
BrowserConnectedEvent,
BrowserStoppedEvent,
]
EMITS: ClassVar[list[type[BaseEvent]]] = [
CaptchaSolverStartedEvent,
CaptchaSolverFinishedEvent,
]
# --- private state ---
_captcha_solving: bool = PrivateAttr(default=False)
_captcha_solved_event: asyncio.Event = PrivateAttr(default_factory=asyncio.Event)
_captcha_info: dict[str, Any] = PrivateAttr(default_factory=dict)
_captcha_result: CaptchaResultType = PrivateAttr(default='unknown')
_captcha_duration_ms: int = PrivateAttr(default=0)
_cdp_handlers_registered: bool = PrivateAttr(default=False)
def model_post_init(self, __context: Any) -> None:
# Start in "not blocked" state so callers never wait when there is no captcha.
self._captcha_solved_event.set()
# ------------------------------------------------------------------
# Event handlers
# ------------------------------------------------------------------
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
"""Register CDP event handlers for BrowserUse captcha solver events."""
if self._cdp_handlers_registered:
self.logger.debug('CaptchaWatchdog: CDP handlers already registered, skipping')
return
cdp_client = self.browser_session.cdp_client
def _on_captcha_started(event_data: CDPCaptchaSolverStartedEvent, session_id: str | None) -> None:
try:
self._captcha_solving = True
self._captcha_result = 'unknown'
self._captcha_duration_ms = 0
self._captcha_info = {
'vendor': event_data.get('vendor', 'unknown'),
'url': event_data.get('url', ''),
'targetId': event_data.get('targetId', ''),
'startedAt': event_data.get('startedAt', 0),
}
# Block any waiter
self._captcha_solved_event.clear()
vendor = self._captcha_info['vendor']
url = self._captcha_info['url']
self.logger.info(f'🔒 Captcha solving started: {vendor} on {url}')
self.event_bus.dispatch(
CaptchaSolverStartedEvent(
target_id=event_data.get('targetId', ''),
vendor=vendor,
url=url,
started_at=event_data.get('startedAt', 0),
)
)
except Exception:
self.logger.exception('Error handling captchaSolverStarted CDP event')
# Ensure consistent state: unblock any waiter
self._captcha_solving = False
self._captcha_solved_event.set()
def _on_captcha_finished(event_data: CDPCaptchaSolverFinishedEvent, session_id: str | None) -> None:
try:
success = event_data.get('success', False)
self._captcha_solving = False
self._captcha_duration_ms = event_data.get('durationMs', 0)
self._captcha_result = 'success' if success else 'failed'
vendor = event_data.get('vendor', self._captcha_info.get('vendor', 'unknown'))
url = event_data.get('url', self._captcha_info.get('url', ''))
duration_s = self._captcha_duration_ms / 1000
self.logger.info(f'🔓 Captcha solving finished: {self._captcha_result} — {vendor} on {url} ({duration_s:.1f}s)')
# Unblock any waiter
self._captcha_solved_event.set()
self.event_bus.dispatch(
CaptchaSolverFinishedEvent(
target_id=event_data.get('targetId', ''),
vendor=vendor,
url=url,
duration_ms=self._captcha_duration_ms,
finished_at=event_data.get('finishedAt', 0),
success=success,
)
)
except Exception:
self.logger.exception('Error handling captchaSolverFinished CDP event')
# Ensure consistent state: unblock any waiter
self._captcha_solving = False
self._captcha_solved_event.set()
cdp_client.register.BrowserUse.captchaSolverStarted(_on_captcha_started)
cdp_client.register.BrowserUse.captchaSolverFinished(_on_captcha_finished)
self._cdp_handlers_registered = True
self.logger.debug('🔒 CaptchaWatchdog: registered CDP event handlers for BrowserUse captcha events')
async def on_BrowserStoppedEvent(self, event: BrowserStoppedEvent) -> None:
"""Clear captcha state when the browser disconnects so nothing hangs."""
self._captcha_solving = False
self._captcha_result = 'unknown'
self._captcha_duration_ms = 0
self._captcha_info = {}
self._captcha_solved_event.set()
self._cdp_handlers_registered = False
# ------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------
async def wait_if_captcha_solving(self, timeout: float | None = None) -> CaptchaWaitResult | None:
"""Wait if a captcha is currently being solved.
Returns:
``None`` if no captcha was in progress.
A ``CaptchaWaitResult`` with the outcome otherwise.
"""
if not self._captcha_solving:
return None
if timeout is None:
timeout = _get_timeout('TIMEOUT_CaptchaSolverWait', 120.0)
assert timeout is not None
vendor = self._captcha_info.get('vendor', 'unknown')
url = self._captcha_info.get('url', '')
self.logger.info(f'⏳ Waiting for {vendor} captcha to be solved on {url} (timeout={timeout}s)...')
try:
await asyncio.wait_for(self._captcha_solved_event.wait(), timeout=timeout)
return CaptchaWaitResult(
waited=True,
vendor=vendor,
url=url,
duration_ms=self._captcha_duration_ms,
result=self._captcha_result,
)
except TimeoutError:
# Timed out — unblock and report
self._captcha_solving = False
self._captcha_solved_event.set()
self.logger.warning(f'⏰ Captcha wait timed out after {timeout}s for {vendor} on {url}')
return CaptchaWaitResult(
waited=True,
vendor=vendor,
url=url,
duration_ms=int(timeout * 1000),
result='timeout',
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/captcha_watchdog.py",
"license": "MIT License",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/browser/watchdogs/har_recording_watchdog.py | """HAR Recording Watchdog for Browser-Use sessions.
Captures HTTPS network activity via CDP Network domain and writes a HAR 1.2
file on browser shutdown. Respects `record_har_content` (omit/embed/attach)
and `record_har_mode` (full/minimal).
"""
from __future__ import annotations
import base64
import hashlib
import json
from dataclasses import dataclass, field
from importlib import metadata as importlib_metadata
from pathlib import Path
from typing import ClassVar
from bubus import BaseEvent
from cdp_use.cdp.network.events import (
DataReceivedEvent,
LoadingFailedEvent,
LoadingFinishedEvent,
RequestWillBeSentEvent,
ResponseReceivedEvent,
)
from cdp_use.cdp.page.events import FrameNavigatedEvent, LifecycleEventEvent
from browser_use.browser.events import BrowserConnectedEvent, BrowserStopEvent
from browser_use.browser.watchdog_base import BaseWatchdog
@dataclass
class _HarContent:
mime_type: str | None = None
text_b64: str | None = None # for embed
file_rel: str | None = None # for attach
size: int | None = None
@dataclass
class _HarEntryBuilder:
request_id: str = ''
frame_id: str | None = None
document_url: str | None = None
url: str | None = None
method: str | None = None
request_headers: dict = field(default_factory=dict)
request_body: bytes | None = None
post_data: str | None = None # CDP postData field
status: int | None = None
status_text: str | None = None
response_headers: dict = field(default_factory=dict)
mime_type: str | None = None
encoded_data: bytearray = field(default_factory=bytearray)
failed: bool = False
# timing info (CDP timestamps are monotonic seconds); wallTime is epoch seconds
ts_request: float | None = None
wall_time_request: float | None = None
ts_response: float | None = None
ts_finished: float | None = None
encoded_data_length: int | None = None
response_body: bytes | None = None
content_length: int | None = None # From Content-Length header
protocol: str | None = None
server_ip_address: str | None = None
server_port: int | None = None
security_details: dict | None = None
transfer_size: int | None = None
def _is_https(url: str | None) -> bool:
return bool(url and url.lower().startswith('https://'))
def _origin(url: str) -> str:
# Very small origin extractor, assumes https URLs
# https://host[:port]/...
if not url:
return ''
try:
without_scheme = url.split('://', 1)[1]
host_port = without_scheme.split('/', 1)[0]
return f'https://{host_port}'
except Exception:
return ''
def _mime_to_extension(mime_type: str | None) -> str:
"""Map MIME type to file extension, matching Playwright's behavior."""
if not mime_type:
return 'bin'
mime_lower = mime_type.lower().split(';')[0].strip()
# Common MIME type to extension mapping
mime_map = {
'text/html': 'html',
'text/css': 'css',
'text/javascript': 'js',
'application/javascript': 'js',
'application/x-javascript': 'js',
'application/json': 'json',
'application/xml': 'xml',
'text/xml': 'xml',
'text/plain': 'txt',
'image/png': 'png',
'image/jpeg': 'jpg',
'image/jpg': 'jpg',
'image/gif': 'gif',
'image/webp': 'webp',
'image/svg+xml': 'svg',
'image/x-icon': 'ico',
'font/woff': 'woff',
'font/woff2': 'woff2',
'application/font-woff': 'woff',
'application/font-woff2': 'woff2',
'application/x-font-woff': 'woff',
'application/x-font-woff2': 'woff2',
'font/ttf': 'ttf',
'application/x-font-ttf': 'ttf',
'font/otf': 'otf',
'application/x-font-opentype': 'otf',
'application/pdf': 'pdf',
'application/zip': 'zip',
'application/x-zip-compressed': 'zip',
'video/mp4': 'mp4',
'video/webm': 'webm',
'audio/mpeg': 'mp3',
'audio/mp3': 'mp3',
'audio/wav': 'wav',
'audio/ogg': 'ogg',
}
return mime_map.get(mime_lower, 'bin')
def _generate_har_filename(content: bytes, mime_type: str | None) -> str:
"""Generate a hash-based filename for HAR attach mode, matching Playwright's format."""
content_hash = hashlib.sha1(content).hexdigest()
extension = _mime_to_extension(mime_type)
return f'{content_hash}.{extension}'
class HarRecordingWatchdog(BaseWatchdog):
"""Collects HTTPS requests/responses and writes a HAR 1.2 file on stop."""
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [BrowserConnectedEvent, BrowserStopEvent]
EMITS: ClassVar[list[type[BaseEvent]]] = []
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._enabled: bool = False
self._entries: dict[str, _HarEntryBuilder] = {}
self._top_level_pages: dict[
str, dict
] = {} # frameId -> {url, title, startedDateTime, monotonic_start, onContentLoad, onLoad}
async def on_BrowserConnectedEvent(self, event: BrowserConnectedEvent) -> None:
profile = self.browser_session.browser_profile
if not profile.record_har_path:
return
# Normalize config
self._content_mode = (profile.record_har_content or 'embed').lower()
self._mode = (profile.record_har_mode or 'full').lower()
self._har_path = Path(str(profile.record_har_path)).expanduser().resolve()
self._har_dir = self._har_path.parent
self._har_dir.mkdir(parents=True, exist_ok=True)
try:
# Enable Network and Page domains for events
cdp_session = await self.browser_session.get_or_create_cdp_session()
await cdp_session.cdp_client.send.Network.enable(session_id=cdp_session.session_id)
await cdp_session.cdp_client.send.Page.enable(session_id=cdp_session.session_id)
# Query browser version for HAR log.browser
try:
version_info = await self.browser_session.cdp_client.send.Browser.getVersion()
self._browser_name = version_info.get('product') or 'Chromium'
self._browser_version = version_info.get('jsVersion') or ''
except Exception:
self._browser_name = 'Chromium'
self._browser_version = ''
cdp = self.browser_session.cdp_client.register
cdp.Network.requestWillBeSent(self._on_request_will_be_sent)
cdp.Network.responseReceived(self._on_response_received)
cdp.Network.dataReceived(self._on_data_received)
cdp.Network.loadingFinished(self._on_loading_finished)
cdp.Network.loadingFailed(self._on_loading_failed)
cdp.Page.lifecycleEvent(self._on_lifecycle_event)
cdp.Page.frameNavigated(self._on_frame_navigated)
self._enabled = True
self.logger.info(f'📊 Starting HAR recording to {self._har_path}')
except Exception as e:
self.logger.warning(f'Failed to enable HAR recording: {e}')
self._enabled = False
async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None:
if not self._enabled:
return
try:
await self._write_har()
self.logger.info(f'📊 HAR file saved: {self._har_path}')
except Exception as e:
self.logger.warning(f'Failed to write HAR: {e}')
# =============== CDP Event Handlers (sync) ==================
def _on_request_will_be_sent(self, params: RequestWillBeSentEvent, session_id: str | None) -> None:
try:
req = params.get('request', {}) if hasattr(params, 'get') else getattr(params, 'request', {})
url = req.get('url') if isinstance(req, dict) else getattr(req, 'url', None)
if not _is_https(url):
return # HTTPS-only requirement (only HTTPS requests are recorded for now)
request_id = params.get('requestId') if hasattr(params, 'get') else getattr(params, 'requestId', None)
if not request_id:
return
entry = self._entries.setdefault(request_id, _HarEntryBuilder(request_id=request_id))
entry.url = url
entry.method = req.get('method') if isinstance(req, dict) else getattr(req, 'method', None)
entry.post_data = req.get('postData') if isinstance(req, dict) else getattr(req, 'postData', None)
# Convert headers to plain dict, handling various formats
headers_raw = req.get('headers') if isinstance(req, dict) else getattr(req, 'headers', None)
if headers_raw is None:
entry.request_headers = {}
elif isinstance(headers_raw, dict):
entry.request_headers = {k.lower(): str(v) for k, v in headers_raw.items()}
elif isinstance(headers_raw, list):
entry.request_headers = {
h.get('name', '').lower(): str(h.get('value') or '') for h in headers_raw if isinstance(h, dict)
}
else:
# Handle Headers type or other formats - convert to dict
try:
headers_dict = dict(headers_raw) if hasattr(headers_raw, '__iter__') else {}
entry.request_headers = {k.lower(): str(v) for k, v in headers_dict.items()}
except Exception:
entry.request_headers = {}
entry.frame_id = params.get('frameId') if hasattr(params, 'get') else getattr(params, 'frameId', None)
entry.document_url = (
params.get('documentURL')
if hasattr(params, 'get')
else getattr(params, 'documentURL', None) or entry.document_url
)
# Timing anchors
entry.ts_request = params.get('timestamp') if hasattr(params, 'get') else getattr(params, 'timestamp', None)
entry.wall_time_request = params.get('wallTime') if hasattr(params, 'get') else getattr(params, 'wallTime', None)
# Track top-level navigations for page context
req_type = params.get('type') if hasattr(params, 'get') else getattr(params, 'type', None)
is_same_doc = (
params.get('isSameDocument', False) if hasattr(params, 'get') else getattr(params, 'isSameDocument', False)
)
if req_type == 'Document' and not is_same_doc:
# best-effort: consider as navigation
if entry.frame_id and url:
if entry.frame_id not in self._top_level_pages:
self._top_level_pages[entry.frame_id] = {
'url': str(url),
'title': str(url), # Default to URL, will be updated from DOM
'startedDateTime': entry.wall_time_request,
'monotonic_start': entry.ts_request, # Track monotonic start time for timing calculations
'onContentLoad': -1,
'onLoad': -1,
}
else:
# Update startedDateTime and monotonic_start if this is earlier
page_info = self._top_level_pages[entry.frame_id]
if entry.wall_time_request and (
page_info['startedDateTime'] is None or entry.wall_time_request < page_info['startedDateTime']
):
page_info['startedDateTime'] = entry.wall_time_request
page_info['monotonic_start'] = entry.ts_request
except Exception as e:
self.logger.debug(f'requestWillBeSent handling error: {e}')
def _on_response_received(self, params: ResponseReceivedEvent, session_id: str | None) -> None:
try:
request_id = params.get('requestId') if hasattr(params, 'get') else getattr(params, 'requestId', None)
if not request_id or request_id not in self._entries:
return
response = params.get('response', {}) if hasattr(params, 'get') else getattr(params, 'response', {})
entry = self._entries[request_id]
entry.status = response.get('status') if isinstance(response, dict) else getattr(response, 'status', None)
entry.status_text = (
response.get('statusText') if isinstance(response, dict) else getattr(response, 'statusText', None)
)
# Extract Content-Length for compression calculation (before converting headers)
headers_raw = response.get('headers') if isinstance(response, dict) else getattr(response, 'headers', None)
if headers_raw:
if isinstance(headers_raw, dict):
cl_str = headers_raw.get('content-length') or headers_raw.get('Content-Length')
elif isinstance(headers_raw, list):
cl_header = next(
(h for h in headers_raw if isinstance(h, dict) and h.get('name', '').lower() == 'content-length'), None
)
cl_str = cl_header.get('value') if cl_header else None
else:
cl_str = None
if cl_str:
try:
entry.content_length = int(cl_str)
except Exception:
pass
# Convert headers to plain dict, handling various formats
if headers_raw is None:
entry.response_headers = {}
elif isinstance(headers_raw, dict):
entry.response_headers = {k.lower(): str(v) for k, v in headers_raw.items()}
elif isinstance(headers_raw, list):
entry.response_headers = {
h.get('name', '').lower(): str(h.get('value') or '') for h in headers_raw if isinstance(h, dict)
}
else:
# Handle Headers type or other formats - convert to dict
try:
headers_dict = dict(headers_raw) if hasattr(headers_raw, '__iter__') else {}
entry.response_headers = {k.lower(): str(v) for k, v in headers_dict.items()}
except Exception:
entry.response_headers = {}
entry.mime_type = response.get('mimeType') if isinstance(response, dict) else getattr(response, 'mimeType', None)
entry.ts_response = params.get('timestamp') if hasattr(params, 'get') else getattr(params, 'timestamp', None)
protocol_raw = response.get('protocol') if isinstance(response, dict) else getattr(response, 'protocol', None)
if protocol_raw:
protocol_lower = str(protocol_raw).lower()
if protocol_lower == 'h2' or protocol_lower.startswith('http/2'):
entry.protocol = 'HTTP/2.0'
elif protocol_lower.startswith('http/1.1'):
entry.protocol = 'HTTP/1.1'
elif protocol_lower.startswith('http/1.0'):
entry.protocol = 'HTTP/1.0'
else:
entry.protocol = str(protocol_raw).upper()
entry.server_ip_address = (
response.get('remoteIPAddress') if isinstance(response, dict) else getattr(response, 'remoteIPAddress', None)
)
server_port_raw = response.get('remotePort') if isinstance(response, dict) else getattr(response, 'remotePort', None)
if server_port_raw is not None:
try:
entry.server_port = int(server_port_raw)
except (ValueError, TypeError):
pass
# Extract security details (TLS info)
security_details_raw = (
response.get('securityDetails') if isinstance(response, dict) else getattr(response, 'securityDetails', None)
)
if security_details_raw:
try:
entry.security_details = dict(security_details_raw)
except Exception:
pass
except Exception as e:
self.logger.debug(f'responseReceived handling error: {e}')
def _on_data_received(self, params: DataReceivedEvent, session_id: str | None) -> None:
try:
request_id = params.get('requestId') if hasattr(params, 'get') else getattr(params, 'requestId', None)
if not request_id or request_id not in self._entries:
return
data = params.get('data') if hasattr(params, 'get') else getattr(params, 'data', None)
if isinstance(data, str):
try:
self._entries[request_id].encoded_data.extend(data.encode('latin1'))
except Exception:
pass
except Exception as e:
self.logger.debug(f'dataReceived handling error: {e}')
def _on_loading_finished(self, params: LoadingFinishedEvent, session_id: str | None) -> None:
try:
request_id = params.get('requestId') if hasattr(params, 'get') else getattr(params, 'requestId', None)
if not request_id or request_id not in self._entries:
return
entry = self._entries[request_id]
entry.ts_finished = params.get('timestamp')
# Fetch response body via CDP as dataReceived may be incomplete
import asyncio as _asyncio
async def _fetch_body(self_ref, req_id, sess_id):
try:
resp = await self_ref.browser_session.cdp_client.send.Network.getResponseBody(
params={'requestId': req_id}, session_id=sess_id
)
data = resp.get('body', b'')
if resp.get('base64Encoded'):
import base64 as _b64
data = _b64.b64decode(data)
else:
# Ensure data is bytes even if CDP returns a string
if isinstance(data, str):
data = data.encode('utf-8', errors='replace')
# Ensure we always have bytes
if not isinstance(data, bytes):
data = bytes(data) if data else b''
entry.response_body = data
except Exception:
pass
# Always schedule the response body fetch task
_asyncio.create_task(_fetch_body(self, request_id, session_id))
encoded_length = (
params.get('encodedDataLength') if hasattr(params, 'get') else getattr(params, 'encodedDataLength', None)
)
if encoded_length is not None:
try:
entry.encoded_data_length = int(encoded_length)
entry.transfer_size = entry.encoded_data_length
except Exception:
entry.encoded_data_length = None
except Exception as e:
self.logger.debug(f'loadingFinished handling error: {e}')
def _on_loading_failed(self, params: LoadingFailedEvent, session_id: str | None) -> None:
try:
request_id = params.get('requestId') if hasattr(params, 'get') else getattr(params, 'requestId', None)
if request_id and request_id in self._entries:
self._entries[request_id].failed = True
except Exception as e:
self.logger.debug(f'loadingFailed handling error: {e}')
# ===================== HAR Writing ==========================
def _on_lifecycle_event(self, params: LifecycleEventEvent, session_id: str | None) -> None:
"""Handle Page.lifecycleEvent for tracking page load timings."""
try:
frame_id = params.get('frameId') if hasattr(params, 'get') else getattr(params, 'frameId', None)
name = params.get('name') if hasattr(params, 'get') else getattr(params, 'name', None)
timestamp = params.get('timestamp') if hasattr(params, 'get') else getattr(params, 'timestamp', None)
if not frame_id or not name or frame_id not in self._top_level_pages:
return
page_info = self._top_level_pages[frame_id]
# Use monotonic_start instead of startedDateTime (wall-clock) for timing calculations
monotonic_start = page_info.get('monotonic_start')
if name == 'DOMContentLoaded' and monotonic_start is not None:
# Calculate milliseconds since page start using monotonic timestamps
try:
elapsed_ms = int(round((timestamp - monotonic_start) * 1000))
page_info['onContentLoad'] = max(0, elapsed_ms)
except Exception:
pass
elif name == 'load' and monotonic_start is not None:
try:
elapsed_ms = int(round((timestamp - monotonic_start) * 1000))
page_info['onLoad'] = max(0, elapsed_ms)
except Exception:
pass
except Exception as e:
self.logger.debug(f'lifecycleEvent handling error: {e}')
def _on_frame_navigated(self, params: FrameNavigatedEvent, session_id: str | None) -> None:
"""Handle Page.frameNavigated to update page title from DOM."""
try:
frame = params.get('frame') if hasattr(params, 'get') else getattr(params, 'frame', None)
if not frame:
return
frame_id = frame.get('id') if isinstance(frame, dict) else getattr(frame, 'id', None)
title = (
frame.get('name') or frame.get('url')
if isinstance(frame, dict)
else getattr(frame, 'name', None) or getattr(frame, 'url', None)
)
if frame_id and frame_id in self._top_level_pages:
# Try to get actual page title via Runtime.evaluate if possible
# For now, use frame name or URL as fallback
if title:
self._top_level_pages[frame_id]['title'] = str(title)
except Exception as e:
self.logger.debug(f'frameNavigated handling error: {e}')
# ===================== HAR Writing ==========================
async def _write_har(self) -> None:
# Filter by mode and HTTPS already respected at collection time
entries = [e for e in self._entries.values() if self._include_entry(e)]
har_entries = []
sidecar_dir: Path | None = None
if self._content_mode == 'attach':
sidecar_dir = self._har_dir / f'{self._har_path.stem}_har_parts'
sidecar_dir.mkdir(parents=True, exist_ok=True)
for e in entries:
content_obj: dict = {'mimeType': e.mime_type or ''}
# Get body data, preferring response_body over encoded_data
if e.response_body is not None:
body_data = e.response_body
else:
body_data = e.encoded_data
# Defensive conversion: ensure body_data is always bytes
if isinstance(body_data, str):
body_bytes = body_data.encode('utf-8', errors='replace')
elif isinstance(body_data, bytearray):
body_bytes = bytes(body_data)
elif isinstance(body_data, bytes):
body_bytes = body_data
else:
# Fallback: try to convert to bytes
try:
body_bytes = bytes(body_data) if body_data else b''
except (TypeError, ValueError):
body_bytes = b''
content_size = len(body_bytes)
# Calculate compression (bytes saved by compression)
compression = 0
if e.content_length is not None and e.encoded_data_length is not None:
compression = max(0, e.content_length - e.encoded_data_length)
if self._content_mode == 'embed' and content_size > 0:
# Prefer plain text; fallback to base64 only if decoding fails
try:
text_decoded = body_bytes.decode('utf-8')
content_obj['text'] = text_decoded
content_obj['size'] = content_size
content_obj['compression'] = compression
except UnicodeDecodeError:
content_obj['text'] = base64.b64encode(body_bytes).decode('ascii')
content_obj['encoding'] = 'base64'
content_obj['size'] = content_size
content_obj['compression'] = compression
elif self._content_mode == 'attach' and content_size > 0 and sidecar_dir is not None:
filename = _generate_har_filename(body_bytes, e.mime_type)
(sidecar_dir / filename).write_bytes(body_bytes)
content_obj['_file'] = filename
content_obj['size'] = content_size
content_obj['compression'] = compression
else:
# omit or empty
content_obj['size'] = content_size
if content_size > 0:
content_obj['compression'] = compression
started_date_time, total_time_ms, timings = self._compute_timings(e)
req_headers_list = [{'name': k, 'value': str(v)} for k, v in (e.request_headers or {}).items()]
resp_headers_list = [{'name': k, 'value': str(v)} for k, v in (e.response_headers or {}).items()]
request_headers_size = self._calc_headers_size(e.method or 'GET', e.url or '', req_headers_list)
response_headers_size = self._calc_headers_size(None, None, resp_headers_list)
request_body_size = self._calc_request_body_size(e)
request_post_data = None
if e.post_data and self._content_mode != 'omit':
if self._content_mode == 'embed':
request_post_data = {'mimeType': e.request_headers.get('content-type', ''), 'text': e.post_data}
elif self._content_mode == 'attach' and sidecar_dir is not None:
post_data_bytes = e.post_data.encode('utf-8')
req_mime_type = e.request_headers.get('content-type', 'text/plain')
req_filename = _generate_har_filename(post_data_bytes, req_mime_type)
(sidecar_dir / req_filename).write_bytes(post_data_bytes)
request_post_data = {
'mimeType': req_mime_type,
'_file': req_filename,
}
http_version = e.protocol if e.protocol else 'HTTP/1.1'
response_body_size = e.transfer_size
if response_body_size is None:
response_body_size = e.encoded_data_length
if response_body_size is None:
response_body_size = content_size if content_size > 0 else -1
entry_dict = {
'startedDateTime': started_date_time,
'time': total_time_ms,
'request': {
'method': e.method or 'GET',
'url': e.url or '',
'httpVersion': http_version,
'headers': req_headers_list,
'queryString': [],
'cookies': [],
'headersSize': request_headers_size,
'bodySize': request_body_size,
'postData': request_post_data,
},
'response': {
'status': e.status or 0,
'statusText': e.status_text or '',
'httpVersion': http_version,
'headers': resp_headers_list,
'cookies': [],
'content': content_obj,
'redirectURL': '',
'headersSize': response_headers_size,
'bodySize': response_body_size,
},
'cache': {},
'timings': timings,
'pageref': self._page_ref_for_entry(e),
}
# Add security/TLS details if available
if e.server_ip_address:
entry_dict['serverIPAddress'] = e.server_ip_address
if e.server_port is not None:
entry_dict['_serverPort'] = e.server_port
if e.security_details:
# Filter to match Playwright's minimal security details set
security_filtered = {}
if 'protocol' in e.security_details:
security_filtered['protocol'] = e.security_details['protocol']
if 'subjectName' in e.security_details:
security_filtered['subjectName'] = e.security_details['subjectName']
if 'issuer' in e.security_details:
security_filtered['issuer'] = e.security_details['issuer']
if 'validFrom' in e.security_details:
security_filtered['validFrom'] = e.security_details['validFrom']
if 'validTo' in e.security_details:
security_filtered['validTo'] = e.security_details['validTo']
if security_filtered:
entry_dict['_securityDetails'] = security_filtered
if e.transfer_size is not None:
entry_dict['response']['_transferSize'] = e.transfer_size
har_entries.append(entry_dict)
# Try to include our library version in creator
try:
bu_version = importlib_metadata.version('browser-use')
except Exception:
# Fallback when running from source without installed package metadata
bu_version = 'dev'
har_obj = {
'log': {
'version': '1.2',
'creator': {'name': 'browser-use', 'version': bu_version},
'browser': {'name': self._browser_name, 'version': self._browser_version},
'pages': [
{
'id': f'page@{pid}', # Use Playwright format: "page@{frame_id}"
'title': page_info.get('title', page_info.get('url', '')),
'startedDateTime': self._format_page_started_datetime(page_info.get('startedDateTime')),
'pageTimings': (
(lambda _ocl, _ol: ({k: v for k, v in (('onContentLoad', _ocl), ('onLoad', _ol)) if v is not None}))(
(page_info.get('onContentLoad') if page_info.get('onContentLoad', -1) >= 0 else None),
(page_info.get('onLoad') if page_info.get('onLoad', -1) >= 0 else None),
)
),
}
for pid, page_info in self._top_level_pages.items()
],
'entries': har_entries,
}
}
tmp_path = self._har_path.with_suffix(self._har_path.suffix + '.tmp')
# Write as bytes explicitly to avoid any text/binary mode confusion in different environments
tmp_path.write_bytes(json.dumps(har_obj, indent=2, ensure_ascii=False).encode('utf-8'))
tmp_path.replace(self._har_path)
def _format_page_started_datetime(self, timestamp: float | None) -> str:
"""Format page startedDateTime from timestamp."""
if timestamp is None:
return ''
try:
from datetime import datetime, timezone
return datetime.fromtimestamp(timestamp, tz=timezone.utc).isoformat().replace('+00:00', 'Z')
except Exception:
return ''
def _page_ref_for_entry(self, e: _HarEntryBuilder) -> str | None:
# Use Playwright format: "page@{frame_id}" if frame_id is known
if e.frame_id and e.frame_id in self._top_level_pages:
return f'page@{e.frame_id}'
return None
def _include_entry(self, e: _HarEntryBuilder) -> bool:
if not _is_https(e.url):
return False
# Filter out favicon requests (matching Playwright behavior)
if e.url and '/favicon.ico' in e.url.lower():
return False
if getattr(self, '_mode', 'full') == 'full':
return True
# minimal: include main document and same-origin subresources
if e.frame_id and e.frame_id in self._top_level_pages:
page_info = self._top_level_pages[e.frame_id]
page_url = page_info.get('url') if isinstance(page_info, dict) else page_info
return _origin(e.url or '') == _origin(page_url or '')
return False
# ===================== Helpers ==============================
def _compute_timings(self, e: _HarEntryBuilder) -> tuple[str, int, dict]:
# startedDateTime from wall_time_request in ISO8601 Z
started = ''
try:
if e.wall_time_request is not None:
from datetime import datetime, timezone
started = datetime.fromtimestamp(e.wall_time_request, tz=timezone.utc).isoformat().replace('+00:00', 'Z')
except Exception:
started = ''
# Calculate timings - CDP doesn't always provide DNS/connect/SSL breakdown
# Default to 0 for unavailable timings, calculate what we can from timestamps
dns_ms = 0
connect_ms = 0
ssl_ms = 0
send_ms = 0
wait_ms = 0
receive_ms = 0
if e.ts_request is not None and e.ts_response is not None:
wait_ms = max(0, int(round((e.ts_response - e.ts_request) * 1000)))
if e.ts_response is not None and e.ts_finished is not None:
receive_ms = max(0, int(round((e.ts_finished - e.ts_response) * 1000)))
# Note: DNS, connect, and SSL timings would require additional CDP events or ResourceTiming API
# For now, we structure the timings dict to match Playwright format
# but leave DNS/connect/SSL as 0 since CDP doesn't provide this breakdown directly
total = dns_ms + connect_ms + ssl_ms + send_ms + wait_ms + receive_ms
return (
started,
total,
{
'dns': dns_ms,
'connect': connect_ms,
'ssl': ssl_ms,
'send': send_ms,
'wait': wait_ms,
'receive': receive_ms,
},
)
def _calc_headers_size(self, method: str | None, url: str | None, headers_list: list[dict]) -> int:
try:
# Approximate per RFC: sum of header lines + CRLF; include request/status line only for request
size = 0
if method and url:
# Use HTTP/1.1 request line approximation
size += len(f'{method} {url} HTTP/1.1\r\n'.encode('latin1'))
for h in headers_list:
size += len(f'{h.get("name", "")}: {h.get("value", "")}\r\n'.encode('latin1'))
size += len(b'\r\n')
return size
except Exception:
return -1
def _calc_request_body_size(self, e: _HarEntryBuilder) -> int:
# Try Content-Length header first; else post_data; else request_body; else 0 for GET/HEAD, -1 if unknown
try:
cl = None
if e.request_headers:
cl = e.request_headers.get('content-length') or e.request_headers.get('Content-Length')
if cl is not None:
return int(cl)
if e.post_data:
return len(e.post_data.encode('utf-8'))
if e.request_body is not None:
return len(e.request_body)
# GET/HEAD requests typically have no body
if e.method and e.method.upper() in ('GET', 'HEAD'):
return 0
except Exception:
pass
return -1
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/browser/watchdogs/har_recording_watchdog.py",
"license": "MIT License",
"lines": 690,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/__main__.py | """Entry point for: python -m browser_use.skill_cli"""
import sys
from browser_use.skill_cli.main import main
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/__main__.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/skill_cli/api_key.py | """API key management for browser-use CLI."""
import json
import os
import sys
from pathlib import Path
class APIKeyRequired(Exception):
"""Raised when API key is required but not provided."""
pass
def get_config_path() -> Path:
"""Get browser-use config file path."""
if sys.platform == 'win32':
base = Path(os.environ.get('APPDATA', Path.home()))
else:
base = Path(os.environ.get('XDG_CONFIG_HOME', Path.home() / '.config'))
return base / 'browser-use' / 'config.json'
def require_api_key(feature: str = 'this feature') -> str:
"""Get API key or raise helpful error.
Checks in order:
1. BROWSER_USE_API_KEY environment variable
2. Config file (~/.config/browser-use/config.json)
3. Interactive prompt (if TTY)
4. Raises APIKeyRequired with helpful message
"""
# 1. Check environment
key = os.environ.get('BROWSER_USE_API_KEY')
if key:
return key
# 2. Check config file
config_path = get_config_path()
if config_path.exists():
try:
config = json.loads(config_path.read_text())
if key := config.get('api_key'):
return key
except Exception:
pass
# 3. Interactive prompt (if TTY)
if sys.stdin.isatty() and sys.stdout.isatty():
return prompt_for_api_key(feature)
# 4. Error with helpful message
raise APIKeyRequired(
f"""
╭─────────────────────────────────────────────────────────────╮
│ 🔑 Browser-Use API Key Required │
│ │
│ {feature} requires an API key. │
│ │
│ Get yours at: https://browser-use.com/new-api-key │
│ │
│ Then set it via: │
│ export BROWSER_USE_API_KEY=your_key_here │
│ │
│ Or add to {config_path}: │
│ {{"api_key": "your_key_here"}} │
╰─────────────────────────────────────────────────────────────╯
"""
)
def prompt_for_api_key(feature: str) -> str:
"""Interactive prompt for API key."""
print(
f"""
╭─────────────────────────────────────────────────────────────╮
│ 🔑 Browser-Use API Key Required │
│ │
│ {feature} requires an API key. │
│ Get yours at: https://browser-use.com/new-api-key │
╰─────────────────────────────────────────────────────────────╯
"""
)
try:
key = input('Enter API key: ').strip()
except (EOFError, KeyboardInterrupt):
raise APIKeyRequired('No API key provided')
if not key:
raise APIKeyRequired('No API key provided')
try:
save = input('Save to config? [y/N]: ').strip().lower()
if save == 'y':
save_api_key(key)
except (EOFError, KeyboardInterrupt):
pass
return key
def save_api_key(key: str) -> None:
"""Save API key to config file."""
config_path = get_config_path()
config_path.parent.mkdir(parents=True, exist_ok=True)
config: dict = {}
if config_path.exists():
try:
config = json.loads(config_path.read_text())
except Exception:
pass
config['api_key'] = key
config_path.write_text(json.dumps(config, indent=2))
# Restrict permissions to owner only (0600)
config_path.chmod(0o600)
print(f'Saved to {config_path}')
def get_api_key() -> str | None:
"""Get API key if available, without raising error."""
try:
return require_api_key('API key check')
except APIKeyRequired:
return None
def check_api_key() -> dict[str, bool | str | None]:
"""Check API key availability without interactive prompts.
Returns:
Dict with keys:
- 'available': bool - whether API key is configured
- 'source': str | None - where it came from ('env', 'config', or None)
- 'key_prefix': str | None - first 8 chars of key (for display)
"""
# Check environment
key = os.environ.get('BROWSER_USE_API_KEY')
if key:
return {
'available': True,
'source': 'env',
'key_prefix': key[:8] if len(key) >= 8 else key,
}
# Check config file
config_path = get_config_path()
if config_path.exists():
try:
config = json.loads(config_path.read_text())
if key := config.get('api_key'):
return {
'available': True,
'source': 'config',
'key_prefix': key[:8] if len(key) >= 8 else key,
}
except Exception:
pass
# Not available
return {
'available': False,
'source': None,
'key_prefix': None,
}
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/api_key.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/commands/agent.py | """Agent task command handler."""
import logging
import os
from typing import Any
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
from browser_use.skill_cli.sessions import SessionInfo
logger = logging.getLogger(__name__)
# Cloud-only flags that only work in remote mode
CLOUD_ONLY_FLAGS = [
'session_id',
'proxy_country',
'wait',
'stream',
'flash',
'keep_alive',
'thinking',
'start_url',
'metadata',
'secret',
'allowed_domain',
'skill_id',
'structured_output',
'judge',
'judge_ground_truth',
]
async def handle(session: SessionInfo, params: dict[str, Any]) -> Any:
"""Handle agent run command.
Routes based on browser mode:
- Remote mode (--browser remote): Uses Cloud API with US proxy by default
- Local mode (default): Uses local browser-use agent
"""
task = params.get('task')
if not task:
return {'success': False, 'error': 'No task provided'}
# Route based on browser mode
if session.browser_mode == 'remote':
# Remote mode requires Browser-Use API key
try:
require_api_key('Cloud agent tasks')
except APIKeyRequired as e:
return {'success': False, 'error': str(e)}
return await _handle_cloud_task(params)
else:
# Check if user tried to use cloud-only flags in local mode
used_cloud_flags = [f for f in CLOUD_ONLY_FLAGS if params.get(f)]
if used_cloud_flags:
from browser_use.skill_cli.install_config import is_mode_available
flags_str = ', '.join(f'--{f.replace("_", "-")}' for f in used_cloud_flags)
if is_mode_available('remote'):
# Remote is available, user just needs to use it
return {
'success': False,
'error': f'Cloud-only flags used in local mode: {flags_str}\nUse --browser remote to enable cloud features.',
}
else:
# Remote not installed (--local-only install)
return {
'success': False,
'error': f'Cloud-only flags require remote mode: {flags_str}\n'
f'Remote mode is not installed. Reinstall to enable:\n'
f' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --remote-only\n'
f' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --full',
}
return await _handle_local_task(session, params)
async def _handle_cloud_task(params: dict[str, Any]) -> Any:
"""Handle task execution via Cloud API.
By default uses US proxy for all cloud tasks.
"""
from browser_use.skill_cli.commands import cloud_session, cloud_task
task = params['task']
# Handle vision flag (--vision vs --no-vision)
vision: bool | None = None
if params.get('vision'):
vision = True
elif params.get('no_vision'):
vision = False
# Parse key=value list params
metadata = _parse_key_value_list(params.get('metadata'))
secrets = _parse_key_value_list(params.get('secret'))
# Build session params - only include what user explicitly set
session_id = params.get('session_id')
profile_id = params.get('profile')
proxy_country = params.get('proxy_country')
try:
logger.info(f'Creating cloud task: {task}')
# Create session first if profile or proxy specified and no session_id
if (profile_id or proxy_country) and not session_id:
session = cloud_session.create_session(
profile_id=profile_id,
proxy_country=proxy_country,
keep_alive=params.get('keep_alive'),
)
session_id = session.id
logger.info(f'Created cloud session: {session_id}')
# Create cloud task - only pass what user explicitly set
task_response = cloud_task.create_task(
task=task,
llm=params.get('llm'),
session_id=session_id,
max_steps=params.get('max_steps'),
flash_mode=params.get('flash'),
thinking=params.get('thinking'),
vision=vision,
start_url=params.get('start_url'),
metadata=metadata,
secrets=secrets,
allowed_domains=params.get('allowed_domain'),
skill_ids=params.get('skill_id'),
structured_output=params.get('structured_output'),
judge=params.get('judge'),
judge_ground_truth=params.get('judge_ground_truth'),
)
task_id = task_response.id
response_session_id = task_response.session_id
if not task_id:
return {
'success': False,
'error': 'Cloud API did not return a task ID',
'task': task,
}
logger.info(f'Cloud task created: {task_id}')
# Return immediately unless --wait is specified
if not params.get('wait'):
return {
'success': True,
'task_id': task_id,
'session_id': response_session_id,
'message': 'Task started. Use "browser-use task status <task_id>" to check progress.',
}
# Poll until complete
logger.info('Waiting for task completion...')
result = await cloud_task.poll_until_complete(task_id, stream=params.get('stream', False))
return {
'success': True,
'task': task,
'task_id': task_id,
'session_id': response_session_id,
'status': result.status,
'output': result.output,
'cost': result.cost,
'done': result.status == 'finished',
}
except Exception as e:
logger.exception(f'Cloud task failed: {e}')
return {
'success': False,
'error': str(e),
'task': task,
}
def _parse_key_value_list(items: list[str] | None) -> dict[str, str | None] | None:
"""Parse a list of 'key=value' strings into a dict."""
if not items:
return None
result: dict[str, str | None] = {}
for item in items:
if '=' in item:
key, value = item.split('=', 1)
result[key] = value
return result if result else None
async def _handle_local_task(session: SessionInfo, params: dict[str, Any]) -> Any:
"""Handle task execution locally with browser-use agent."""
task = params['task']
max_steps = params.get('max_steps')
model = params.get('llm') # Optional model override
try:
# Import agent and LLM
from browser_use.agent.service import Agent
# Try to get LLM from environment (with optional model override)
llm = await get_llm(model=model)
if llm is None:
if model:
return {
'success': False,
'error': f'Could not initialize model "{model}". '
f'Make sure the appropriate API key is set (OPENAI_API_KEY, ANTHROPIC_API_KEY, or GOOGLE_API_KEY).',
}
return {
'success': False,
'error': 'No LLM configured. Set BROWSER_USE_API_KEY, OPENAI_API_KEY, ANTHROPIC_API_KEY, or GOOGLE_API_KEY',
}
# Create and run agent
agent = Agent(
task=task,
llm=llm,
browser_session=session.browser_session,
)
logger.info(f'Running local agent task: {task}')
run_kwargs = {}
if max_steps is not None:
run_kwargs['max_steps'] = max_steps
result = await agent.run(**run_kwargs)
# Extract result info
final_result = result.final_result() if result else None
return {
'success': True,
'task': task,
'steps': len(result) if result else 0,
'result': str(final_result) if final_result else None,
'done': result.is_done() if result else False,
}
except Exception as e:
logger.exception(f'Local agent task failed: {e}')
return {
'success': False,
'error': str(e),
'task': task,
}
def _get_verified_models() -> dict[str, set[str]]:
"""Extract verified model names from SDK sources of truth."""
import typing
from anthropic.types.model_param import ModelParam
from openai.types.shared.chat_model import ChatModel
from browser_use.llm.google.chat import VerifiedGeminiModels
# OpenAI: ChatModel is a Literal type
openai_models = set(typing.get_args(ChatModel))
# Anthropic: ModelParam is Union[Literal[...], str] - extract the Literal
anthropic_literal = typing.get_args(ModelParam)[0]
anthropic_models = set(typing.get_args(anthropic_literal))
# Google: VerifiedGeminiModels Literal
google_models = set(typing.get_args(VerifiedGeminiModels))
# Browser-Use: cloud models
browser_use_models = {'bu-latest', 'bu-1-0', 'bu-2-0'}
return {
'openai': openai_models,
'anthropic': anthropic_models,
'google': google_models,
'browser-use': browser_use_models,
}
_VERIFIED_MODELS: dict[str, set[str]] | None = None
def _get_provider_for_model(model: str) -> str | None:
"""Determine the provider by checking SDK verified model lists."""
global _VERIFIED_MODELS
if _VERIFIED_MODELS is None:
_VERIFIED_MODELS = _get_verified_models()
for provider, models in _VERIFIED_MODELS.items():
if model in models:
return provider
return None
def get_llm(model: str | None = None) -> Any:
"""Get LLM instance from environment configuration.
Args:
model: Optional model name to use. If provided, will instantiate
the appropriate provider for that model. If not provided,
auto-detects from available API keys.
Supported providers: OpenAI, Anthropic, Google, Browser-Use.
Model names are validated against each SDK's verified model list.
"""
from browser_use.llm import ChatAnthropic, ChatBrowserUse, ChatGoogle, ChatOpenAI
if model:
provider = _get_provider_for_model(model)
if provider == 'openai':
return ChatOpenAI(model=model)
elif provider == 'anthropic':
return ChatAnthropic(model=model)
elif provider == 'google':
return ChatGoogle(model=model)
elif provider == 'browser-use':
return ChatBrowserUse(model=model)
else:
logger.warning(f'Unknown model: {model}. Not in any verified model list.')
return None
# No model specified - auto-detect from available API keys
if os.environ.get('BROWSER_USE_API_KEY'):
return ChatBrowserUse()
if os.environ.get('OPENAI_API_KEY'):
return ChatOpenAI(model='o3')
if os.environ.get('ANTHROPIC_API_KEY'):
return ChatAnthropic(model='claude-sonnet-4-0')
if os.environ.get('GOOGLE_API_KEY'):
return ChatGoogle(model='gemini-flash-latest')
return None
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/agent.py",
"license": "MIT License",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/commands/browser.py | """Browser control commands."""
import asyncio
import base64
import logging
from pathlib import Path
from typing import Any
from browser_use.skill_cli.sessions import SessionInfo
logger = logging.getLogger(__name__)
COMMANDS = {
'open',
'click',
'type',
'input',
'scroll',
'back',
'screenshot',
'state',
'switch',
'close-tab',
'keys',
'select',
'eval',
'extract',
'cookies',
'wait',
'hover',
'dblclick',
'rightclick',
'get',
}
async def _execute_js(session: SessionInfo, js: str) -> Any:
"""Execute JavaScript in the browser via CDP."""
bs = session.browser_session
# Get or create a CDP session for the focused target
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
raise RuntimeError('No active browser session')
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': js, 'returnByValue': True},
session_id=cdp_session.session_id,
)
return result.get('result', {}).get('value')
async def _get_element_center(session: SessionInfo, node: Any) -> tuple[float, float] | None:
"""Get the center coordinates of an element."""
bs = session.browser_session
try:
cdp_session = await bs.cdp_client_for_node(node)
session_id = cdp_session.session_id
backend_node_id = node.backend_node_id
# Scroll element into view first
try:
await cdp_session.cdp_client.send.DOM.scrollIntoViewIfNeeded(
params={'backendNodeId': backend_node_id}, session_id=session_id
)
await asyncio.sleep(0.05)
except Exception:
pass
# Get element coordinates
element_rect = await bs.get_element_coordinates(backend_node_id, cdp_session)
if element_rect:
center_x = element_rect.x + element_rect.width / 2
center_y = element_rect.y + element_rect.height / 2
return center_x, center_y
return None
except Exception as e:
logger.error(f'Failed to get element center: {e}')
return None
async def handle(action: str, session: SessionInfo, params: dict[str, Any]) -> Any:
"""Handle browser control command."""
bs = session.browser_session
if action == 'open':
url = params['url']
# Ensure URL has scheme
if not url.startswith(('http://', 'https://', 'file://')):
url = 'https://' + url
from browser_use.browser.events import NavigateToUrlEvent
await bs.event_bus.dispatch(NavigateToUrlEvent(url=url))
result: dict[str, Any] = {'url': url}
# Add live preview URL for cloud browsers
if bs.browser_profile.use_cloud and bs.cdp_url:
from urllib.parse import quote
result['live_url'] = f'https://live.browser-use.com/?wss={quote(bs.cdp_url, safe="")}'
return result
elif action == 'click':
args = params.get('args', [])
if len(args) == 2:
# Coordinate click: browser-use click <x> <y>
from browser_use.browser.events import ClickCoordinateEvent
x, y = args
await bs.event_bus.dispatch(ClickCoordinateEvent(coordinate_x=x, coordinate_y=y))
return {'clicked_coordinate': {'x': x, 'y': y}}
elif len(args) == 1:
# Index click: browser-use click <index>
from browser_use.browser.events import ClickElementEvent
index = args[0]
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
await bs.event_bus.dispatch(ClickElementEvent(node=node))
return {'clicked': index}
else:
return {'error': 'Usage: click <index> or click <x> <y>'}
elif action == 'type':
# Type into currently focused element using CDP directly
text = params['text']
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
return {'error': 'No active browser session'}
await cdp_session.cdp_client.send.Input.insertText(
params={'text': text},
session_id=cdp_session.session_id,
)
return {'typed': text}
elif action == 'input':
from browser_use.browser.events import ClickElementEvent, TypeTextEvent
index = params['index']
text = params['text']
# Look up node from selector map
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
await bs.event_bus.dispatch(ClickElementEvent(node=node))
await bs.event_bus.dispatch(TypeTextEvent(node=node, text=text))
return {'input': text, 'element': index}
elif action == 'scroll':
from browser_use.browser.events import ScrollEvent
direction = params.get('direction', 'down')
amount = params.get('amount', 500)
await bs.event_bus.dispatch(ScrollEvent(direction=direction, amount=amount))
return {'scrolled': direction, 'amount': amount}
elif action == 'back':
from browser_use.browser.events import GoBackEvent
await bs.event_bus.dispatch(GoBackEvent())
return {'back': True}
elif action == 'screenshot':
data = await bs.take_screenshot(full_page=params.get('full', False))
if params.get('path'):
path = Path(params['path'])
path.write_bytes(data)
return {'saved': str(path), 'size': len(data)}
# Return base64 encoded
return {'screenshot': base64.b64encode(data).decode(), 'size': len(data)}
elif action == 'state':
# Return the LLM representation with viewport info for coordinate clicking
state = await bs.get_browser_state_summary()
assert state.dom_state is not None
state_text = state.dom_state.llm_representation()
# Prepend viewport dimensions so LLMs know the coordinate space
if state.page_info:
pi = state.page_info
viewport_text = f'viewport: {pi.viewport_width}x{pi.viewport_height}\n'
viewport_text += f'page: {pi.page_width}x{pi.page_height}\n'
viewport_text += f'scroll: ({pi.scroll_x}, {pi.scroll_y})\n'
state_text = viewport_text + state_text
return {'_raw_text': state_text}
elif action == 'switch':
from browser_use.browser.events import SwitchTabEvent
tab_index = params['tab']
# Get target_id from tab index
page_targets = bs.session_manager.get_all_page_targets() if bs.session_manager else []
if tab_index < 0 or tab_index >= len(page_targets):
return {'error': f'Invalid tab index {tab_index}. Available: 0-{len(page_targets) - 1}'}
target_id = page_targets[tab_index].target_id
await bs.event_bus.dispatch(SwitchTabEvent(target_id=target_id))
return {'switched': tab_index}
elif action == 'close-tab':
from browser_use.browser.events import CloseTabEvent
tab_index = params.get('tab')
# Get target_id from tab index
page_targets = bs.session_manager.get_all_page_targets() if bs.session_manager else []
if tab_index is not None:
if tab_index < 0 or tab_index >= len(page_targets):
return {'error': f'Invalid tab index {tab_index}. Available: 0-{len(page_targets) - 1}'}
target_id = page_targets[tab_index].target_id
else:
# Close current/focused tab
target_id = bs.session_manager.get_focused_target().target_id if bs.session_manager else None
if not target_id:
return {'error': 'No focused tab to close'}
await bs.event_bus.dispatch(CloseTabEvent(target_id=target_id))
return {'closed': tab_index}
elif action == 'keys':
from browser_use.browser.events import SendKeysEvent
keys = params['keys']
await bs.event_bus.dispatch(SendKeysEvent(keys=keys))
return {'sent': keys}
elif action == 'select':
from browser_use.browser.events import SelectDropdownOptionEvent
index = params['index']
value = params['value']
# Look up node from selector map
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
await bs.event_bus.dispatch(SelectDropdownOptionEvent(node=node, text=value))
return {'selected': value, 'element': index}
elif action == 'eval':
js = params['js']
# Execute JavaScript via CDP
result = await _execute_js(session, js)
return {'result': result}
elif action == 'extract':
query = params['query']
# This requires LLM integration
# For now, return a placeholder
return {'query': query, 'error': 'extract requires agent mode - use: browser-use run "extract ..."'}
elif action == 'hover':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
coords = await _get_element_center(session, node)
if not coords:
return {'error': 'Could not get element coordinates for hover'}
center_x, center_y = coords
cdp_session = await bs.cdp_client_for_node(node)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': center_x, 'y': center_y},
session_id=cdp_session.session_id,
)
return {'hovered': index}
elif action == 'dblclick':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
coords = await _get_element_center(session, node)
if not coords:
return {'error': 'Could not get element coordinates for double-click'}
center_x, center_y = coords
cdp_session = await bs.cdp_client_for_node(node)
session_id = cdp_session.session_id
# Move mouse to element
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': center_x, 'y': center_y},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Double click (clickCount: 2)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 2,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 2,
},
session_id=session_id,
)
return {'double_clicked': index}
elif action == 'rightclick':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
coords = await _get_element_center(session, node)
if not coords:
return {'error': 'Could not get element coordinates for right-click'}
center_x, center_y = coords
cdp_session = await bs.cdp_client_for_node(node)
session_id = cdp_session.session_id
# Move mouse to element
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': center_x, 'y': center_y},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Right click (button: 'right')
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'right',
'clickCount': 1,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'right',
'clickCount': 1,
},
session_id=session_id,
)
return {'right_clicked': index}
elif action == 'cookies':
cookies_command = params.get('cookies_command')
if cookies_command == 'get':
# Get cookies via direct CDP
cookies = await bs._cdp_get_cookies()
# Convert Cookie objects to dicts
cookie_list: list[dict[str, Any]] = []
for c in cookies:
cookie_dict: dict[str, Any] = {
'name': c.get('name', ''),
'value': c.get('value', ''),
'domain': c.get('domain', ''),
'path': c.get('path', '/'),
'secure': c.get('secure', False),
'httpOnly': c.get('httpOnly', False),
}
if 'sameSite' in c:
cookie_dict['sameSite'] = c.get('sameSite')
if 'expires' in c:
cookie_dict['expires'] = c.get('expires')
cookie_list.append(cookie_dict)
# Filter by URL if provided
url = params.get('url')
if url:
from urllib.parse import urlparse
parsed = urlparse(url)
domain = parsed.netloc
cookie_list = [
c
for c in cookie_list
if domain.endswith(str(c.get('domain', '')).lstrip('.'))
or str(c.get('domain', '')).lstrip('.').endswith(domain)
]
return {'cookies': cookie_list}
elif cookies_command == 'set':
from cdp_use.cdp.network import Cookie
cookie_dict: dict[str, Any] = {
'name': params['name'],
'value': params['value'],
'path': params.get('path', '/'),
'secure': params.get('secure', False),
'httpOnly': params.get('http_only', False),
}
if params.get('domain'):
cookie_dict['domain'] = params['domain']
if params.get('same_site'):
cookie_dict['sameSite'] = params['same_site']
if params.get('expires'):
cookie_dict['expires'] = params['expires']
# If no domain specified, get current URL's domain
if not params.get('domain'):
hostname = await _execute_js(session, 'window.location.hostname')
if hostname:
cookie_dict['domain'] = hostname
try:
cookie_obj = Cookie(**cookie_dict)
await bs._cdp_set_cookies([cookie_obj])
return {'set': params['name'], 'success': True}
except Exception as e:
logger.error(f'Failed to set cookie: {e}')
return {'set': params['name'], 'success': False, 'error': str(e)}
elif cookies_command == 'clear':
url = params.get('url')
if url:
# Clear cookies only for specific URL domain
from urllib.parse import urlparse
cookies = await bs._cdp_get_cookies()
parsed = urlparse(url)
domain = parsed.netloc
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
if cdp_session:
for cookie in cookies:
cookie_domain = str(cookie.get('domain', '')).lstrip('.')
if domain.endswith(cookie_domain) or cookie_domain.endswith(domain):
await cdp_session.cdp_client.send.Network.deleteCookies(
params={
'name': cookie.get('name', ''),
'domain': cookie.get('domain'),
'path': cookie.get('path', '/'),
},
session_id=cdp_session.session_id,
)
else:
# Clear all cookies
await bs._cdp_clear_cookies()
return {'cleared': True, 'url': url}
elif cookies_command == 'export':
import json
# Get cookies via direct CDP
cookies = await bs._cdp_get_cookies()
# Convert to list of dicts
cookie_list: list[dict[str, Any]] = []
for c in cookies:
cookie_dict: dict[str, Any] = {
'name': c.get('name', ''),
'value': c.get('value', ''),
'domain': c.get('domain', ''),
'path': c.get('path', '/'),
'secure': c.get('secure', False),
'httpOnly': c.get('httpOnly', False),
}
if 'sameSite' in c:
cookie_dict['sameSite'] = c.get('sameSite')
if 'expires' in c:
cookie_dict['expires'] = c.get('expires')
cookie_list.append(cookie_dict)
# Filter by URL if provided
url = params.get('url')
if url:
from urllib.parse import urlparse
parsed = urlparse(url)
domain = parsed.netloc
cookie_list = [
c
for c in cookie_list
if domain.endswith(str(c.get('domain', '')).lstrip('.'))
or str(c.get('domain', '')).lstrip('.').endswith(domain)
]
file_path = Path(params['file'])
file_path.write_text(json.dumps(cookie_list, indent=2, ensure_ascii=False), encoding='utf-8')
return {'exported': len(cookie_list), 'file': str(file_path)}
elif cookies_command == 'import':
import json
file_path = Path(params['file'])
if not file_path.exists():
return {'error': f'File not found: {file_path}'}
cookies = json.loads(file_path.read_text())
# Get CDP session for bulk cookie setting
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
return {'error': 'No active browser session'}
# Build cookie list for bulk set
cookie_list = []
for c in cookies:
cookie_params = {
'name': c['name'],
'value': c['value'],
'domain': c.get('domain'),
'path': c.get('path', '/'),
'secure': c.get('secure', False),
'httpOnly': c.get('httpOnly', False),
}
if c.get('sameSite'):
cookie_params['sameSite'] = c['sameSite']
if c.get('expires'):
cookie_params['expires'] = c['expires']
cookie_list.append(cookie_params)
# Set all cookies in one call
try:
await cdp_session.cdp_client.send.Network.setCookies(
params={'cookies': cookie_list}, # type: ignore[arg-type]
session_id=cdp_session.session_id,
)
return {'imported': len(cookie_list), 'file': str(file_path)}
except Exception as e:
return {'error': f'Failed to import cookies: {e}'}
return {'error': 'Invalid cookies command. Use: get, set, clear, export, import'}
elif action == 'wait':
import json as json_module
wait_command = params.get('wait_command')
if wait_command == 'selector':
timeout_seconds = params.get('timeout', 30000) / 1000.0
state = params.get('state', 'visible')
selector = params['selector']
poll_interval = 0.1
elapsed = 0.0
while elapsed < timeout_seconds:
# Build JS check based on state
if state == 'attached':
js = f'document.querySelector({json_module.dumps(selector)}) !== null'
elif state == 'detached':
js = f'document.querySelector({json_module.dumps(selector)}) === null'
elif state == 'visible':
js = f"""
(function() {{
const el = document.querySelector({json_module.dumps(selector)});
if (!el) return false;
const style = window.getComputedStyle(el);
const rect = el.getBoundingClientRect();
return style.display !== 'none' &&
style.visibility !== 'hidden' &&
style.opacity !== '0' &&
rect.width > 0 &&
rect.height > 0;
}})()
"""
elif state == 'hidden':
js = f"""
(function() {{
const el = document.querySelector({json_module.dumps(selector)});
if (!el) return true;
const style = window.getComputedStyle(el);
const rect = el.getBoundingClientRect();
return style.display === 'none' ||
style.visibility === 'hidden' ||
style.opacity === '0' ||
rect.width === 0 ||
rect.height === 0;
}})()
"""
else:
js = f'document.querySelector({json_module.dumps(selector)}) !== null'
result = await _execute_js(session, js)
if result:
return {'selector': selector, 'found': True}
await asyncio.sleep(poll_interval)
elapsed += poll_interval
return {'selector': selector, 'found': False}
elif wait_command == 'text':
import json as json_module
timeout_seconds = params.get('timeout', 30000) / 1000.0
text = params['text']
poll_interval = 0.1
elapsed = 0.0
while elapsed < timeout_seconds:
js = f"""
(function() {{
const text = {json_module.dumps(text)};
return document.body.innerText.includes(text);
}})()
"""
result = await _execute_js(session, js)
if result:
return {'text': text, 'found': True}
await asyncio.sleep(poll_interval)
elapsed += poll_interval
return {'text': text, 'found': False}
return {'error': 'Invalid wait command. Use: selector, text'}
elif action == 'get':
import json as json_module
get_command = params.get('get_command')
if get_command == 'title':
title = await _execute_js(session, 'document.title')
return {'title': title or ''}
elif get_command == 'html':
selector = params.get('selector')
if selector:
js = f'(function(){{ const el = document.querySelector({json_module.dumps(selector)}); return el ? el.outerHTML : null; }})()'
else:
js = 'document.documentElement.outerHTML'
html = await _execute_js(session, js)
return {'html': html or ''}
elif get_command == 'text':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
# Use the node's text from our model
text = node.get_all_children_text(max_depth=10) if node else ''
return {'index': index, 'text': text}
elif get_command == 'value':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
try:
cdp_session = await bs.cdp_client_for_node(node)
resolve_result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': node.backend_node_id},
session_id=cdp_session.session_id,
)
object_id = resolve_result['object'].get('objectId') # type: ignore[union-attr]
if object_id:
value_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': 'function() { return this.value; }',
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
value = value_result.get('result', {}).get('value')
return {'index': index, 'value': value or ''}
else:
return {'index': index, 'value': ''}
except Exception as e:
logger.error(f'Failed to get element value: {e}')
return {'index': index, 'value': ''}
elif get_command == 'attributes':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
# Use the attributes from the node model
attrs = node.attributes or {}
return {'index': index, 'attributes': dict(attrs)}
elif get_command == 'bbox':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
try:
cdp_session = await bs.cdp_client_for_node(node)
box_result = await cdp_session.cdp_client.send.DOM.getBoxModel(
params={'backendNodeId': node.backend_node_id},
session_id=cdp_session.session_id,
)
model = box_result['model'] # type: ignore[index]
content = model.get('content', []) # type: ignore[union-attr]
if len(content) >= 8:
# content is [x1, y1, x2, y2, x3, y3, x4, y4] - corners of the quad
x = min(content[0], content[2], content[4], content[6])
y = min(content[1], content[3], content[5], content[7])
width = max(content[0], content[2], content[4], content[6]) - x
height = max(content[1], content[3], content[5], content[7]) - y
return {'index': index, 'bbox': {'x': x, 'y': y, 'width': width, 'height': height}}
else:
return {'index': index, 'bbox': {}}
except Exception as e:
logger.error(f'Failed to get element bbox: {e}')
return {'index': index, 'bbox': {}}
return {'error': 'Invalid get command. Use: title, html, text, value, attributes, bbox'}
raise ValueError(f'Unknown browser action: {action}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/browser.py",
"license": "MIT License",
"lines": 614,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/commands/cloud_session.py | """Cloud session SDK wrappers and CLI handlers.
This module provides:
- SDK wrapper functions for the Browser-Use Cloud Session API
- CLI command handlers for `browser-use session <command>`
"""
import argparse
import json
import logging
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any
from browser_use_sdk.types.session_item_view import SessionItemView
from browser_use_sdk.types.session_view import SessionView
from browser_use_sdk.types.share_view import ShareView
from browser_use.skill_cli.commands.utils import format_duration, get_sdk_client
logger = logging.getLogger(__name__)
# ============ SDK Wrappers ============
def create_session(**kwargs: Any) -> SessionItemView:
"""Create a cloud browser session.
Args:
profile_id: Cloud profile ID for persistent auth/cookies
proxy_country: Proxy country code (us, gb, de, etc.)
keep_alive: Keep session alive after task completes
persist_memory: Share memory between tasks in session
start_url: URL to navigate to when session starts
screen_width: Browser screen width in pixels
screen_height: Browser screen height in pixels
Returns:
SessionItemView with session details
"""
# Map our param names to SDK param names
param_map = {
'proxy_country': 'proxy_country_code',
'screen_width': 'browser_screen_width',
'screen_height': 'browser_screen_height',
}
params = {}
for k, v in kwargs.items():
if v is not None:
params[param_map.get(k, k)] = v
return get_sdk_client().sessions.create_session(**params)
def list_sessions(limit: int = 10, status: str | None = None) -> list[SessionItemView]:
"""List cloud browser sessions."""
client = get_sdk_client()
response = client.sessions.list_sessions(
page_size=min(limit, 100),
filter_by=status,
)
return list(response.items) if response.items else []
def get_session(session_id: str) -> SessionView:
"""Get details of a specific session."""
return get_sdk_client().sessions.get_session(session_id)
def stop_session(session_id: str) -> SessionView:
"""Stop a cloud session."""
return get_sdk_client().sessions.update_session(session_id, action='stop')
def delete_session(session_id: str) -> None:
"""Delete a cloud session and all its tasks."""
get_sdk_client().sessions.delete_session(session_id)
def create_public_share(session_id: str) -> ShareView:
"""Create a public share URL for a session."""
return get_sdk_client().sessions.create_session_public_share(session_id)
def delete_public_share(session_id: str) -> None:
"""Delete the public share for a session."""
get_sdk_client().sessions.delete_session_public_share(session_id)
def stop_sessions_parallel(session_ids: list[str]) -> tuple[list[str], list[dict[str, Any]]]:
"""Stop multiple cloud sessions in parallel."""
client = get_sdk_client()
stopped: list[str] = []
errors: list[dict[str, Any]] = []
def stop_one(sid: str) -> tuple[str, str | None]:
try:
client.sessions.update_session(sid, action='stop')
return (sid, None)
except Exception as e:
return (sid, str(e))
with ThreadPoolExecutor(max_workers=10) as executor:
futures = {executor.submit(stop_one, sid): sid for sid in session_ids}
for future in as_completed(futures):
sid, error = future.result()
if error:
errors.append({'id': sid, 'error': error})
else:
stopped.append(sid)
return stopped, errors
# ============ CLI Handlers ============
def handle_session_command(args: argparse.Namespace) -> int:
"""Handle session subcommands.
Session commands manage cloud sessions and always require the cloud API.
Args:
args: Parsed command-line arguments
Returns:
Exit code (0 for success, 1 for error)
"""
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
from browser_use.skill_cli.install_config import is_mode_available
# Check if remote mode is available
if not is_mode_available('remote'):
print(
'Error: Session management requires remote mode.\n'
'Remote mode is not installed. Reinstall to enable:\n'
' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --remote-only\n'
' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --full',
file=sys.stderr,
)
return 1
# Check API key
try:
require_api_key('Cloud sessions')
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if args.session_command == 'list':
return _handle_list(args)
elif args.session_command == 'get':
return _handle_get(args)
elif args.session_command == 'stop':
return _handle_stop(args)
elif args.session_command == 'create':
return _handle_create(args)
elif args.session_command == 'share':
return _handle_share(args)
else:
print('Usage: browser-use session <command>')
print('Commands: list, get <id>, stop <id>, create, share <id>')
return 1
# ============ CLI Helper Functions ============
def _session_to_dict(session: Any) -> dict[str, Any]:
"""Convert SDK session object to dict for JSON output."""
return {
'id': session.id,
'status': session.status,
'liveUrl': session.live_url,
'startedAt': session.started_at.isoformat() if session.started_at else None,
'finishedAt': session.finished_at.isoformat() if session.finished_at else None,
'keepAlive': session.keep_alive,
'persistMemory': getattr(session, 'persist_memory', None),
'proxyCost': session.proxy_cost,
'publicShareUrl': getattr(session, 'public_share_url', None),
}
def _handle_list(args: argparse.Namespace) -> int:
"""Handle 'session list' command."""
try:
status_filter = getattr(args, 'status', None)
sessions = list_sessions(limit=args.limit, status=status_filter)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps([_session_to_dict(s) for s in sessions]))
else:
if not sessions:
status_msg = f' with status "{status_filter}"' if status_filter else ''
print(f'No sessions found{status_msg}')
else:
header = f'Sessions ({len(sessions)})'
if status_filter:
header = f'{status_filter.capitalize()} sessions ({len(sessions)})'
print(f'{header}:')
for s in sessions:
session_id = s.id or 'unknown'
status = s.status or 'unknown'
live_url = s.live_url
started_at = s.started_at
finished_at = s.finished_at
keep_alive = '🔄' if s.keep_alive else ''
# Status emoji
status_emoji = {
'active': '🟢',
'stopped': '⏹️',
}.get(status, '❓')
# Truncate ID for display
short_id = session_id[:8] + '...' if len(session_id) > 8 else session_id
# Build line with duration
duration = format_duration(started_at, finished_at)
line = f' {status_emoji} {short_id} [{status}]'
if duration:
line += f' {duration}'
if keep_alive:
line += f' {keep_alive}'
if live_url and status == 'active':
line += f'\n live: {live_url}'
print(line)
return 0
def _handle_get(args: argparse.Namespace) -> int:
"""Handle 'session get <session_id>' command."""
try:
session = get_session(args.session_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps(_session_to_dict(session)))
else:
session_id = session.id or args.session_id
status = session.status or 'unknown'
live_url = session.live_url
started_at = session.started_at
finished_at = session.finished_at
keep_alive = session.keep_alive
proxy_cost = session.proxy_cost
public_share_url = getattr(session, 'public_share_url', None)
# Status emoji
status_emoji = {
'active': '🟢',
'stopped': '⏹️',
}.get(status, '❓')
# Build header with duration
duration = format_duration(started_at, finished_at)
header_parts = [f'{status_emoji} {session_id[:8]}... [{status}]']
if duration:
header_parts.append(duration)
if proxy_cost:
# Format proxy cost to 2 decimal places
try:
cost_val = float(proxy_cost)
header_parts.append(f'${cost_val:.2f}')
except (ValueError, TypeError):
header_parts.append(f'${proxy_cost}')
print(' '.join(header_parts))
if keep_alive:
print(' Keep Alive: Yes')
if live_url:
print(f' Live URL: {live_url}')
if public_share_url:
print(f' Public Share: {public_share_url}')
return 0
def _handle_stop(args: argparse.Namespace) -> int:
"""Handle 'session stop <session_id>' command."""
# Handle --all flag
if getattr(args, 'all', False):
return _handle_stop_all(args)
try:
stop_session(args.session_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'stopped': args.session_id}))
else:
print(f'Stopped session: {args.session_id}')
return 0
def _handle_stop_all(args: argparse.Namespace) -> int:
"""Handle 'session stop --all' command."""
try:
# Get all active sessions
sessions = list_sessions(limit=100, status='active')
except Exception as e:
print(f'Error listing sessions: {e}', file=sys.stderr)
return 1
if not sessions:
print('No active sessions to stop')
return 0
# Extract session IDs
session_ids = [s.id for s in sessions if s.id]
if not session_ids:
print('No active sessions to stop')
return 0
# Stop all sessions in parallel
stopped, errors = stop_sessions_parallel(session_ids)
if getattr(args, 'json', False):
print(json.dumps({'stopped': stopped, 'errors': errors}))
else:
if stopped:
print(f'Stopped {len(stopped)} session(s):')
for sid in stopped:
print(f' ✓ {sid[:8]}...')
if errors:
print(f'Failed to stop {len(errors)} session(s):')
for err in errors:
print(f' ✗ {err["id"][:8]}...: {err["error"]}')
return 0 if not errors else 1
def _handle_create(args: argparse.Namespace) -> int:
"""Handle 'session create' command."""
# Parse screen size if provided
screen_width = None
screen_height = None
if hasattr(args, 'screen_size') and args.screen_size:
try:
w, h = args.screen_size.lower().split('x')
screen_width = int(w)
screen_height = int(h)
except ValueError:
print('Error: Invalid screen size format. Use WxH (e.g., 1920x1080)', file=sys.stderr)
return 1
try:
session = create_session(
profile_id=getattr(args, 'profile', None),
proxy_country=getattr(args, 'proxy_country', None),
keep_alive=getattr(args, 'keep_alive', None),
persist_memory=getattr(args, 'persist_memory', None),
start_url=getattr(args, 'start_url', None),
screen_width=screen_width,
screen_height=screen_height,
)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps(_session_to_dict(session)))
else:
print(f'Created session: {session.id}')
if session.live_url:
print(f' Live URL: {session.live_url}')
return 0
def _handle_share(args: argparse.Namespace) -> int:
"""Handle 'session share <session_id>' command."""
session_id = args.session_id
# Delete share if requested
if getattr(args, 'delete', False):
try:
delete_public_share(session_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'deleted': session_id}))
else:
print(f'Deleted public share for session: {session_id}')
return 0
# Create share
try:
share = create_public_share(session_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(
json.dumps(
{
'sessionId': session_id,
'url': share.share_url,
'shareToken': share.share_token,
'viewCount': share.view_count,
}
)
)
else:
print(f'Public share created for session: {session_id}')
if share.share_url:
print(f' URL: {share.share_url}')
return 0
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/cloud_session.py",
"license": "MIT License",
"lines": 341,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/commands/cloud_task.py | """Cloud task SDK wrappers and CLI handlers.
This module provides:
- SDK wrapper functions for the Browser-Use Cloud Task API
- CLI command handlers for `browser-use task <command>`
"""
import argparse
import json
import logging
import sys
from typing import Any
from browser_use_sdk.types.task_created_response import TaskCreatedResponse
from browser_use_sdk.types.task_item_view import TaskItemView
from browser_use_sdk.types.task_log_file_response import TaskLogFileResponse
from browser_use_sdk.types.task_view import TaskView
from browser_use.skill_cli.commands.utils import format_duration, get_sdk_client
logger = logging.getLogger(__name__)
def _filter_none(kwargs: dict[str, Any]) -> dict[str, Any]:
"""Filter out None values from kwargs (SDK passes them as null, API rejects)."""
return {k: v for k, v in kwargs.items() if v is not None}
# ============ SDK Wrappers ============
def create_task(task: str, **kwargs: Any) -> TaskCreatedResponse:
"""Create a cloud task via API.
Args:
task: Task description for the agent
llm: LLM model identifier
session_id: Existing session ID to use
max_steps: Maximum agent steps
flash_mode: Enable flash mode for faster execution
thinking: Enable extended reasoning mode
vision: Enable/disable vision
start_url: URL to start the task from
metadata: Task metadata key-value pairs
secrets: Task secrets key-value pairs
allowed_domains: Restrict navigation to these domains
skill_ids: Enable specific skill IDs
structured_output: JSON schema for structured output
judge: Enable judge mode
judge_ground_truth: Expected answer for judge evaluation
Returns:
TaskCreatedResponse with task ID and session ID
"""
params = _filter_none(kwargs)
params['task'] = task
return get_sdk_client().tasks.create_task(**params)
def get_task(task_id: str) -> TaskView:
"""Get full task details including steps."""
return get_sdk_client().tasks.get_task(task_id)
def list_tasks(
limit: int = 10,
status: str | None = None,
session_id: str | None = None,
) -> list[TaskItemView]:
"""List recent tasks."""
client = get_sdk_client()
response = client.tasks.list_tasks(
page_size=limit,
**_filter_none({'filter_by': status, 'session_id': session_id}),
)
return list(response.items) if response.items else []
def stop_task(task_id: str) -> TaskView:
"""Stop a running task."""
return get_sdk_client().tasks.update_task(task_id, action='stop')
def get_task_logs(task_id: str) -> TaskLogFileResponse:
"""Get task execution logs."""
return get_sdk_client().tasks.get_task_logs(task_id)
async def poll_until_complete(
task_id: str,
stream: bool = False,
poll_interval: float = 1.0,
) -> TaskView:
"""Poll task status until finished."""
import asyncio
client = get_sdk_client()
last_status = None
while True:
# Run blocking SDK call in thread to avoid blocking event loop
task = await asyncio.to_thread(client.tasks.get_task, task_id)
current_status = task.status
if stream and current_status != last_status:
print(f'Status: {current_status}')
last_status = current_status
if current_status in ('finished', 'stopped', 'failed'):
return task
await asyncio.sleep(poll_interval)
# ============ CLI Handlers ============
def handle_task_command(args: argparse.Namespace) -> int:
"""Handle task subcommands.
Task commands manage cloud tasks and always require the cloud API.
Args:
args: Parsed command-line arguments
Returns:
Exit code (0 for success, 1 for error)
"""
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
from browser_use.skill_cli.install_config import is_mode_available
# Check if remote mode is available
if not is_mode_available('remote'):
print(
'Error: Task management requires remote mode.\n'
'Remote mode is not installed. Reinstall to enable:\n'
' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --remote-only\n'
' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --full',
file=sys.stderr,
)
return 1
# Check API key
try:
require_api_key('Cloud tasks')
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if args.task_command == 'list':
return _handle_list(args)
elif args.task_command == 'status':
return _handle_status(args)
elif args.task_command == 'stop':
return _handle_stop(args)
elif args.task_command == 'logs':
return _handle_logs(args)
else:
print('Usage: browser-use task <command>')
print('Commands: list, status <task_id>, stop <task_id>, logs <task_id>')
return 1
# ============ CLI Helper Functions ============
def _task_item_to_dict(task: Any) -> dict[str, Any]:
"""Convert SDK TaskItemView to dict for JSON output."""
return {
'id': task.id,
'status': task.status,
'task': task.task,
'sessionId': task.session_id,
}
def _task_to_dict(task: Any) -> dict[str, Any]:
"""Convert SDK TaskView to dict for JSON output."""
return {
'id': task.id,
'status': task.status,
'task': task.task,
'output': task.output,
'cost': task.cost,
'sessionId': task.session_id,
'startedAt': task.started_at.isoformat() if task.started_at else None,
'finishedAt': task.finished_at.isoformat() if task.finished_at else None,
'steps': [_step_to_dict(s) for s in (task.steps or [])],
}
def _step_to_dict(step: Any) -> dict[str, Any]:
"""Convert SDK step to dict for JSON output."""
return {
'number': step.number,
'url': step.url,
'memory': step.memory,
'actions': step.actions,
}
def _handle_list(args: argparse.Namespace) -> int:
"""Handle 'task list' command."""
try:
status_filter = getattr(args, 'status', None)
session_filter = getattr(args, 'session', None)
tasks = list_tasks(
limit=args.limit,
status=status_filter,
session_id=session_filter,
)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps([_task_item_to_dict(t) for t in tasks]))
else:
if not tasks:
status_msg = f' with status "{status_filter}"' if status_filter else ''
session_msg = f' in session "{session_filter}"' if session_filter else ''
print(f'No tasks found{status_msg}{session_msg}')
else:
header = f'Tasks ({len(tasks)})'
if status_filter:
header = f'{status_filter.capitalize()} tasks ({len(tasks)})'
print(f'{header}:')
for t in tasks:
task_id = t.id or 'unknown'
status = t.status or 'unknown'
task_desc = t.task or ''
# Truncate long task descriptions
if len(task_desc) > 50:
task_desc = task_desc[:47] + '...'
# Status emoji
status_emoji = {
'started': '🔄',
'running': '🔄',
'finished': '✅',
'stopped': '⏹️',
'failed': '❌',
}.get(status, '❓')
print(f' {status_emoji} {task_id[:8]}... [{status}] {task_desc}')
return 0
def _handle_status(args: argparse.Namespace) -> int:
"""Handle 'task status <task_id>' command."""
try:
# Use get_task() for full details including steps
task = get_task(args.task_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps(_task_to_dict(task)))
else:
task_id = task.id or args.task_id
task_status = task.status or 'unknown'
output = task.output
cost = task.cost
steps = task.steps or []
started_at = task.started_at
finished_at = task.finished_at
compact = getattr(args, 'compact', False)
verbose = getattr(args, 'verbose', False)
last_n = getattr(args, 'last', None)
reverse = getattr(args, 'reverse', False)
specific_step = getattr(args, 'step', None)
# Determine display mode:
# - Default: show only latest step
# - --compact: show all steps with reasoning
# - --verbose: show all steps with full details
show_all_steps = compact or verbose
# Status emoji
status_emoji = {
'started': '🔄',
'running': '🔄',
'finished': '✅',
'stopped': '⏹️',
'failed': '❌',
}.get(task_status, '❓')
# Build header line: status, cost, duration
parts = [f'{status_emoji} {task_id[:8]}... [{task_status}]']
if cost is not None:
parts.append(f'${cost}')
duration = format_duration(started_at, finished_at)
if duration:
parts.append(duration)
print(' '.join(parts))
# Show steps
if steps:
total_steps = len(steps)
# Filter to specific step if requested
if specific_step is not None:
steps = [s for s in steps if s.number == specific_step]
if not steps:
print(f' Step {specific_step} not found (task has {total_steps} steps)')
else:
print(f' (showing step {specific_step} of {total_steps})')
# Display the specific step
for step in steps:
_print_step(step, verbose)
elif not show_all_steps:
# Default mode: show only the latest step
latest_step = steps[-1]
earlier_count = total_steps - 1
if earlier_count > 0:
print(f' ... {earlier_count} earlier steps')
_print_step(latest_step, verbose=False)
else:
# --compact or --verbose: show all steps (with optional filters)
skipped_earlier = 0
if last_n is not None and last_n < total_steps:
skipped_earlier = total_steps - last_n
steps = steps[-last_n:]
# Apply --reverse
if reverse:
steps = list(reversed(steps))
# Show count info
if skipped_earlier > 0:
print(f' ... {skipped_earlier} earlier steps')
# Display steps
for step in steps:
_print_step(step, verbose)
if output:
print(f'\nOutput: {output}')
return 0
def _print_step(step: Any, verbose: bool) -> None:
"""Print a single step in compact or verbose format."""
step_num = step.number if step.number is not None else '?'
memory = step.memory or ''
if verbose:
url = step.url or ''
actions = step.actions or []
# Truncate URL for display
short_url = url[:60] + '...' if len(url) > 60 else url
print(f' [{step_num}] {short_url}')
if memory:
# Truncate memory/reasoning for display
short_memory = memory[:100] + '...' if len(memory) > 100 else memory
print(f' Reasoning: {short_memory}')
if actions:
for action in actions[:2]: # Show max 2 actions per step
# Truncate action for display
short_action = action[:70] + '...' if len(action) > 70 else action
print(f' Action: {short_action}')
if len(actions) > 2:
print(f' ... and {len(actions) - 2} more actions')
else:
# Compact mode: just step number and reasoning
if memory:
# Truncate reasoning for compact display
short_memory = memory[:80] + '...' if len(memory) > 80 else memory
print(f' {step_num}. {short_memory}')
else:
print(f' {step_num}. (no reasoning)')
def _handle_stop(args: argparse.Namespace) -> int:
"""Handle 'task stop <task_id>' command."""
try:
stop_task(args.task_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'stopped': args.task_id}))
else:
print(f'Stopped task: {args.task_id}')
return 0
def _handle_logs(args: argparse.Namespace) -> int:
"""Handle 'task logs <task_id>' command."""
try:
result = get_task_logs(args.task_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'downloadUrl': result.download_url}))
else:
download_url = result.download_url
if download_url:
print(f'Download logs: {download_url}')
else:
print('No logs available for this task')
return 0
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/cloud_task.py",
"license": "MIT License",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/commands/doctor.py | """Doctor command - check installation and dependencies.
Validates that browser-use is properly installed and all dependencies
are available. Provides helpful diagnostic information and fixes.
"""
import logging
from typing import Any
logger = logging.getLogger(__name__)
COMMANDS = {'doctor'}
async def handle() -> dict[str, Any]:
"""Run health checks and return results."""
checks: dict[str, dict[str, Any]] = {}
# 1. Package installation
checks['package'] = _check_package()
# 2. Browser availability
checks['browser'] = _check_browser()
# 3. API key configuration
checks['api_key'] = _check_api_key_config()
# 4. Cloudflared availability
checks['cloudflared'] = _check_cloudflared()
# 5. Network connectivity (basic check)
checks['network'] = await _check_network()
# Determine overall status
all_ok = all(check.get('status') == 'ok' for check in checks.values())
return {
'status': 'healthy' if all_ok else 'issues_found',
'checks': checks,
'summary': _summarize_checks(checks),
}
def _check_package() -> dict[str, Any]:
"""Check if browser-use is installed."""
try:
import browser_use
version = getattr(browser_use, '__version__', 'unknown')
return {
'status': 'ok',
'message': f'browser-use {version}',
}
except ImportError:
return {
'status': 'error',
'message': 'browser-use not installed',
'fix': 'pip install browser-use',
}
def _check_browser() -> dict[str, Any]:
"""Check if browser is available."""
try:
from browser_use.browser.profile import BrowserProfile
# Just check if we can import and create a profile
profile = BrowserProfile(headless=True)
return {
'status': 'ok',
'message': 'Browser profile available',
}
except Exception as e:
return {
'status': 'warning',
'message': f'Browser may not be available: {e}',
'note': 'Will be installed on first use',
}
def _check_api_key_config() -> dict[str, Any]:
"""Check if API key is configured."""
from browser_use.skill_cli.api_key import check_api_key
status = check_api_key()
if status['available']:
return {
'status': 'ok',
'message': f'API key configured ({status["source"]})',
}
else:
return {
'status': 'missing',
'message': 'No API key configured',
'note': 'Required for remote browser. Get one at https://browser-use.com/new-api-key',
}
def _check_cloudflared() -> dict[str, Any]:
"""Check if cloudflared is available."""
from browser_use.skill_cli.tunnel import get_tunnel_manager
tunnel_mgr = get_tunnel_manager()
status_info = tunnel_mgr.get_status()
if status_info['available']:
return {
'status': 'ok',
'message': f'Cloudflared available ({status_info["source"]})',
'note': status_info.get('note'),
}
else:
return {
'status': 'missing',
'message': 'Cloudflared not available',
'note': 'Will be auto-installed on first tunnel use',
}
async def _check_network() -> dict[str, Any]:
"""Check basic network connectivity."""
try:
import httpx
async with httpx.AsyncClient(timeout=5.0) as client:
# Just ping a reliable endpoint
response = await client.head('https://api.github.com', follow_redirects=True)
if response.status_code < 500:
return {
'status': 'ok',
'message': 'Network connectivity OK',
}
except Exception as e:
logger.debug(f'Network check failed: {e}')
return {
'status': 'warning',
'message': 'Network connectivity check inconclusive',
'note': 'Some features may not work offline',
}
def _summarize_checks(checks: dict[str, dict[str, Any]]) -> str:
"""Generate a summary of check results."""
ok = sum(1 for c in checks.values() if c.get('status') == 'ok')
warning = sum(1 for c in checks.values() if c.get('status') == 'warning')
error = sum(1 for c in checks.values() if c.get('status') == 'error')
missing = sum(1 for c in checks.values() if c.get('status') == 'missing')
total = len(checks)
parts = [f'{ok}/{total} checks passed']
if warning > 0:
parts.append(f'{warning} warnings')
if error > 0:
parts.append(f'{error} errors')
if missing > 0:
parts.append(f'{missing} missing')
return ', '.join(parts)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/doctor.py",
"license": "MIT License",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/commands/profile.py | """Profile management command handlers.
Unified profile management that works with both local Chrome profiles and cloud profiles.
The behavior is determined by the browser mode (-b real or -b remote).
"""
import argparse
import json
import logging
import sys
import tempfile
from pathlib import Path
from typing import Any, Literal
from browser_use.skill_cli.commands.utils import get_sdk_client
logger = logging.getLogger(__name__)
ProfileMode = Literal['real', 'remote']
class ProfileModeError(Exception):
"""Raised when profile mode cannot be determined or is invalid."""
pass
def get_profile_mode(args: argparse.Namespace) -> ProfileMode:
"""Determine profile mode from -b flag or install config.
Args:
args: Parsed command-line arguments with browser attribute
Returns:
'real' for local Chrome profiles, 'remote' for cloud profiles
Raises:
ProfileModeError: If mode cannot be determined or chromium mode is used
"""
from browser_use.skill_cli.install_config import is_mode_available
browser_mode = getattr(args, 'browser', None)
# Explicit mode specified
if browser_mode == 'real':
return 'real'
elif browser_mode == 'remote':
return 'remote'
elif browser_mode == 'chromium':
raise ProfileModeError(
'Profile commands are not supported in chromium mode.\n'
'Use -b real for local Chrome profiles or -b remote for cloud profiles.'
)
# No explicit mode - try to infer from install config
local_available = is_mode_available('real')
remote_available = is_mode_available('remote')
if local_available and not remote_available:
return 'real'
elif remote_available and not local_available:
return 'remote'
elif local_available and remote_available:
raise ProfileModeError(
'Both local and remote modes are available.\n'
'Specify -b real for local Chrome profiles or -b remote for cloud profiles.'
)
else:
raise ProfileModeError('No profile modes available. Run browser-use setup first.')
def handle_profile_command(args: argparse.Namespace) -> int:
"""Handle profile subcommands.
Routes to local or cloud implementation based on browser mode.
"""
command = args.profile_command
# Commands that don't need mode inference
if command is None:
_print_usage()
return 1
# For sync command, we need special handling (local → cloud)
if command == 'sync':
return _handle_sync(args)
# Get profile mode for all other commands
try:
mode = get_profile_mode(args)
except ProfileModeError as e:
print(f'Error: {e}', file=sys.stderr)
return 1
# Route to appropriate handler
if command == 'list':
return _handle_list(args, mode)
elif command == 'get':
return _handle_get(args, mode)
elif command == 'create':
return _handle_create(args, mode)
elif command == 'update':
return _handle_update(args, mode)
elif command == 'delete':
return _handle_delete(args, mode)
elif command == 'cookies':
return _handle_cookies(args, mode)
else:
_print_usage()
return 1
def _print_usage() -> None:
"""Print profile command usage."""
print('Usage: browser-use [-b real|remote] profile <command>')
print()
print('Commands:')
print(' list List profiles')
print(' get <id> Get profile details')
print(' create Create a new profile (remote only)')
print(' update <id> Update profile')
print(' delete <id> Delete profile')
print(' cookies <id> Show cookies by domain (real only)')
print(' sync Sync local profile to cloud')
print()
print('The -b flag determines which profile system to use:')
print(' -b real Local Chrome profiles')
print(' -b remote Cloud profiles (requires API key)')
# -----------------------------------------------------------------------------
# List profiles
# -----------------------------------------------------------------------------
def _handle_list(args: argparse.Namespace, mode: ProfileMode) -> int:
"""Handle 'profile list' command."""
if mode == 'real':
return _list_local_profiles(args)
else:
return _list_cloud_profiles(args)
def _list_local_profiles(args: argparse.Namespace) -> int:
"""List local Chrome profiles."""
profiles = list_local_chrome_profiles()
if getattr(args, 'json', False):
print(json.dumps({'profiles': profiles}))
else:
if profiles:
print('Local Chrome profiles:')
for p in profiles:
print(f' {p["id"]}: {p["name"]} ({p["email"]})')
else:
print('No Chrome profiles found')
return 0
def _list_cloud_profiles(args: argparse.Namespace) -> int:
"""List cloud profiles."""
from browser_use.skill_cli.api_key import APIKeyRequired
page = getattr(args, 'page', 1)
page_size = getattr(args, 'page_size', 20)
try:
client = get_sdk_client()
response = client.profiles.list_profiles(page_number=page, page_size=page_size)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
# Convert to dict for JSON output
data = {
'items': [{'id': p.id, 'name': p.name} for p in response.items],
'totalItems': response.total_items,
'pageNumber': response.page_number,
'pageSize': response.page_size,
}
print(json.dumps(data))
else:
if response.items:
print(f'Cloud profiles ({len(response.items)}/{response.total_items}):')
for p in response.items:
name = p.name or 'Unnamed'
print(f' {p.id}: {name}')
else:
print('No cloud profiles found')
return 0
# -----------------------------------------------------------------------------
# Get profile
# -----------------------------------------------------------------------------
def _handle_get(args: argparse.Namespace, mode: ProfileMode) -> int:
"""Handle 'profile get <id>' command."""
if mode == 'real':
return _get_local_profile(args)
else:
return _get_cloud_profile(args)
def _get_local_profile(args: argparse.Namespace) -> int:
"""Get local Chrome profile details."""
profiles = list_local_chrome_profiles()
profile_id = args.id
for p in profiles:
if p['id'] == profile_id or p['name'] == profile_id:
if getattr(args, 'json', False):
print(json.dumps(p))
else:
print(f'Profile: {p["id"]}')
print(f' Name: {p["name"]}')
print(f' Email: {p["email"]}')
return 0
print(f'Error: Profile "{profile_id}" not found', file=sys.stderr)
return 1
def _get_cloud_profile(args: argparse.Namespace) -> int:
"""Get cloud profile details."""
from browser_use.skill_cli.api_key import APIKeyRequired
try:
client = get_sdk_client()
profile = client.profiles.get_profile(args.id)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
data = {
'id': profile.id,
'name': profile.name,
'createdAt': profile.created_at.isoformat() if profile.created_at else None,
'updatedAt': profile.updated_at.isoformat() if profile.updated_at else None,
}
print(json.dumps(data))
else:
print(f'Profile: {profile.id}')
if profile.name:
print(f' Name: {profile.name}')
if profile.created_at:
print(f' Created: {profile.created_at.isoformat()}')
if profile.updated_at:
print(f' Updated: {profile.updated_at.isoformat()}')
return 0
# -----------------------------------------------------------------------------
# Create profile
# -----------------------------------------------------------------------------
def _handle_create(args: argparse.Namespace, mode: ProfileMode) -> int:
"""Handle 'profile create' command."""
if mode == 'real':
print('Error: Cannot create local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser to create new profiles.', file=sys.stderr)
return 1
return _create_cloud_profile(args)
def _create_cloud_profile(args: argparse.Namespace) -> int:
"""Create a cloud profile."""
from browser_use.skill_cli.api_key import APIKeyRequired
try:
client = get_sdk_client()
params = {}
if args.name:
params['name'] = args.name
profile = client.profiles.create_profile(**params)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'id': profile.id, 'name': profile.name}))
else:
print(f'Created profile: {profile.id}')
return 0
# -----------------------------------------------------------------------------
# Update profile
# -----------------------------------------------------------------------------
def _handle_update(args: argparse.Namespace, mode: ProfileMode) -> int:
"""Handle 'profile update <id>' command."""
if mode == 'real':
print('Error: Cannot update local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser settings to update profiles.', file=sys.stderr)
return 1
return _update_cloud_profile(args)
def _update_cloud_profile(args: argparse.Namespace) -> int:
"""Update a cloud profile."""
from browser_use.skill_cli.api_key import APIKeyRequired
try:
client = get_sdk_client()
params = {}
if args.name:
params['name'] = args.name
profile = client.profiles.update_profile(args.id, **params)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'id': profile.id, 'name': profile.name}))
else:
print(f'Updated profile: {profile.id}')
return 0
# -----------------------------------------------------------------------------
# Delete profile
# -----------------------------------------------------------------------------
def _handle_delete(args: argparse.Namespace, mode: ProfileMode) -> int:
"""Handle 'profile delete <id>' command."""
if mode == 'real':
print('Error: Cannot delete local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser settings to remove profiles.', file=sys.stderr)
return 1
return _delete_cloud_profile(args)
def _delete_cloud_profile(args: argparse.Namespace) -> int:
"""Delete a cloud profile."""
from browser_use.skill_cli.api_key import APIKeyRequired
try:
client = get_sdk_client()
client.profiles.delete_browser_profile(args.id)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'deleted': args.id}))
else:
print(f'Deleted profile: {args.id}')
return 0
# -----------------------------------------------------------------------------
# Cookies (local only)
# -----------------------------------------------------------------------------
def _handle_cookies(args: argparse.Namespace, mode: ProfileMode) -> int:
"""Handle 'profile cookies <id>' command."""
if mode == 'remote':
print('Error: Cookie listing is only available for local Chrome profiles.', file=sys.stderr)
print('Use -b real to access local profile cookies.', file=sys.stderr)
return 1
return _list_profile_cookies(args)
def _list_profile_cookies(args: argparse.Namespace) -> int:
"""List cookies by domain in a local Chrome profile."""
import asyncio
from browser_use.skill_cli.sessions import create_browser_session
# Get local profiles
local_profiles = list_local_chrome_profiles()
if not local_profiles:
print('Error: No local Chrome profiles found', file=sys.stderr)
return 1
# Find the matching profile
profile_arg = args.id
selected_profile = None
for p in local_profiles:
if p['id'] == profile_arg or p['name'] == profile_arg:
selected_profile = p
break
if not selected_profile:
print(f'Error: Profile "{profile_arg}" not found', file=sys.stderr)
print('Available profiles:')
for p in local_profiles:
print(f' {p["id"]}: {p["name"]}')
return 1
profile_id = selected_profile['id']
print(f'Loading cookies from: {selected_profile["name"]} ({selected_profile["email"]})')
async def get_cookies():
local_session = await create_browser_session('real', headed=False, profile=profile_id)
await local_session.start()
try:
cookies = await local_session._cdp_get_cookies()
return cookies
finally:
await local_session.kill()
try:
cookies = asyncio.get_event_loop().run_until_complete(get_cookies())
except RuntimeError:
cookies = asyncio.run(get_cookies())
# Group cookies by domain
domains: dict[str, int] = {}
for cookie in cookies:
domain = cookie.get('domain', 'unknown')
# Normalize domain (remove leading dot)
if domain.startswith('.'):
domain = domain[1:]
domains[domain] = domains.get(domain, 0) + 1
# Sort by count descending
sorted_domains = sorted(domains.items(), key=lambda x: x[1], reverse=True)
if getattr(args, 'json', False):
print(json.dumps({'domains': dict(sorted_domains), 'total_cookies': len(cookies)}))
else:
print(f'\nCookies by domain ({len(cookies)} total):')
for domain, count in sorted_domains[:20]: # Show top 20
print(f' {domain}: {count}')
if len(sorted_domains) > 20:
print(f' ... and {len(sorted_domains) - 20} more domains')
print('\nTo sync cookies to cloud:')
print(f' browser-use profile sync --from "{profile_id}" --domain <domain>')
return 0
# -----------------------------------------------------------------------------
# Sync (local → cloud)
# -----------------------------------------------------------------------------
def _handle_sync(args: argparse.Namespace) -> int:
"""Handle 'profile sync' command - sync local profile to cloud."""
import asyncio
from browser_use.skill_cli.api_key import APIKeyRequired
from browser_use.skill_cli.sessions import create_browser_session
# Get SDK client (validates API key)
try:
client = get_sdk_client()
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
# Get local profiles
local_profiles = list_local_chrome_profiles()
if not local_profiles:
print('Error: No local Chrome profiles found', file=sys.stderr)
return 1
# Determine which profile to sync
from_profile = args.from_profile
if not from_profile:
# Show available profiles and ask user to specify
print('Available local profiles:')
for p in local_profiles:
print(f' {p["id"]}: {p["name"]} ({p["email"]})')
print()
print('Use --from to specify a profile:')
print(' browser-use profile sync --from "Default"')
print(' browser-use profile sync --from "Profile 1"')
return 1
# Find the matching profile
selected_profile = None
for p in local_profiles:
if p['id'] == from_profile or p['name'] == from_profile:
selected_profile = p
break
if not selected_profile:
print(f'Error: Profile "{from_profile}" not found', file=sys.stderr)
print('Available profiles:')
for p in local_profiles:
print(f' {p["id"]}: {p["name"]}')
return 1
profile_id = selected_profile['id']
profile_name = selected_profile['name']
domain_filter = getattr(args, 'domain', None)
# Generate cloud profile name
cloud_name = args.name if args.name else None
if not cloud_name:
if domain_filter:
cloud_name = f'Chrome - {profile_name} ({domain_filter})'
else:
cloud_name = f'Chrome - {profile_name}'
# Use stderr for progress when JSON output is requested
json_output = getattr(args, 'json', False)
out = sys.stderr if json_output else sys.stdout
def log(msg: str) -> None:
print(msg, file=out)
if domain_filter:
log(f'Syncing: {profile_name} → {domain_filter} cookies only')
else:
log(f'Syncing: {profile_name} ({selected_profile["email"]})')
# Step 1: Create cloud profile
log(' Creating cloud profile...')
try:
cloud_profile = client.profiles.create_profile(name=cloud_name)
cloud_profile_id = cloud_profile.id
except Exception as e:
print(f'Error creating cloud profile: {e}', file=sys.stderr)
return 1
log(f' ✓ Created: {cloud_profile_id}')
def cleanup_cloud_profile() -> None:
"""Delete the cloud profile on failure."""
try:
client.profiles.delete_browser_profile(cloud_profile_id)
except Exception:
pass
# Step 2: Export cookies from local profile
async def sync_cookies():
log(' Exporting cookies from local profile...')
local_session = await create_browser_session('real', headed=False, profile=profile_id)
await local_session.start()
try:
cookies = await local_session._cdp_get_cookies()
if not cookies:
return 0, 'No cookies found in local profile'
# Filter by domain if specified
if domain_filter:
cookies = [c for c in cookies if domain_filter in c.get('domain', '')]
if not cookies:
return 0, f'No cookies found for domain: {domain_filter}'
log(f' ✓ Found {len(cookies)} cookies')
# Save to temp file - convert Cookie objects to dicts for JSON serialization
cookies_file = Path(tempfile.gettempdir()) / f'browser-use-sync-{cloud_profile_id}.json'
cookies_data = [dict(c) if hasattr(c, '__dict__') else c for c in cookies]
cookies_file.write_text(json.dumps(cookies_data))
return len(cookies), str(cookies_file)
finally:
await local_session.kill()
try:
loop = asyncio.get_event_loop()
if loop.is_running():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(asyncio.run, sync_cookies())
cookie_count, cookies_file = future.result()
else:
cookie_count, cookies_file = loop.run_until_complete(sync_cookies())
except RuntimeError:
cookie_count, cookies_file = asyncio.run(sync_cookies())
if cookie_count == 0:
log(f' ⚠ {cookies_file}') # cookies_file contains error message
cleanup_cloud_profile()
return 1
# Step 3: Import cookies to cloud profile
async def import_to_cloud():
log(' Importing cookies to cloud profile...')
remote_session = await create_browser_session('remote', headed=False, profile=cloud_profile_id)
await remote_session.start()
try:
cookies = json.loads(Path(cookies_file).read_text())
await remote_session._cdp_set_cookies(cookies)
return True
finally:
await remote_session.kill()
try:
loop = asyncio.get_event_loop()
if loop.is_running():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(asyncio.run, import_to_cloud())
future.result()
else:
loop.run_until_complete(import_to_cloud())
except RuntimeError:
asyncio.run(import_to_cloud())
except Exception as e:
log(f' ⚠ Failed to import cookies: {e}')
cleanup_cloud_profile()
return 1
# Cleanup temp file
try:
Path(cookies_file).unlink()
except Exception:
pass
log('✓ Profile synced successfully!')
log(f' Cloud profile ID: {cloud_profile_id}')
log('')
log('To use this profile:')
log(f' browser-use -b remote --profile {cloud_profile_id} open <url>')
if json_output:
print(
json.dumps(
{
'success': True,
'profile_id': cloud_profile_id,
'cookies_synced': cookie_count,
}
)
)
return 0
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
def list_local_chrome_profiles() -> list[dict[str, Any]]:
"""List local Chrome profiles from the Local State file."""
import platform
# Find Chrome Local State file
system = platform.system()
if system == 'Darwin':
local_state = Path.home() / 'Library/Application Support/Google/Chrome/Local State'
elif system == 'Windows':
local_state = Path.home() / 'AppData/Local/Google/Chrome/User Data/Local State'
else:
local_state = Path.home() / '.config/google-chrome/Local State'
if not local_state.exists():
return []
try:
data = json.loads(local_state.read_text())
profiles_info = data.get('profile', {}).get('info_cache', {})
profiles = []
for profile_id, info in profiles_info.items():
profiles.append(
{
'id': profile_id,
'name': info.get('name', profile_id),
'email': info.get('user_name', ''),
}
)
return profiles
except Exception:
return []
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/profile.py",
"license": "MIT License",
"lines": 558,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/commands/python_exec.py | """Python execution command handler."""
import asyncio
import logging
from pathlib import Path
from typing import Any
from browser_use.skill_cli.sessions import SessionInfo
logger = logging.getLogger(__name__)
async def handle(session: SessionInfo, params: dict[str, Any]) -> Any:
"""Handle python command.
Supports:
- python "<code>" - Execute Python code
- python --file script.py - Execute Python file
- python --reset - Reset namespace
- python --vars - Show defined variables
"""
python_session = session.python_session
browser_session = session.browser_session
# Handle --reset
if params.get('reset'):
python_session.reset()
return {'reset': True, 'message': 'Python namespace cleared'}
# Handle --vars
if params.get('vars'):
variables = python_session.get_variables()
return {'variables': variables, 'count': len(variables)}
# Get code to execute
code = params.get('code')
# Handle --file
if params.get('file'):
file_path = Path(params['file'])
if not file_path.exists():
return {'success': False, 'error': f'File not found: {file_path}'}
if file_path.is_dir():
return {'success': False, 'error': f'Path is a directory, not a file: {file_path}'}
code = file_path.read_text()
if not code:
return {'success': False, 'error': 'No code provided. Use: python "<code>" or --file script.py'}
# Execute code in a thread pool so browser operations can schedule back to the event loop
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, python_session.execute, code, browser_session, loop)
if result.success:
# Return raw text output for clean display
return {'_raw_text': result.output} if result.output else {}
else:
return {'error': result.error or 'Unknown error'}
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/python_exec.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/skill_cli/commands/session.py | """Session management command handlers."""
import logging
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from browser_use.skill_cli.sessions import SessionRegistry
logger = logging.getLogger(__name__)
COMMANDS = {'sessions', 'close'}
async def handle(action: str, session_name: str, registry: 'SessionRegistry', params: dict[str, Any]) -> Any:
"""Handle session management command."""
if action == 'sessions':
sessions = registry.list_sessions()
return {
'sessions': sessions,
'count': len(sessions),
}
elif action == 'close':
if params.get('all'):
# Close all sessions and signal shutdown
sessions = registry.list_sessions()
await registry.close_all()
return {
'closed': [s['name'] for s in sessions],
'count': len(sessions),
'_shutdown': True, # Signal to stop server
}
else:
# Close this server's session and shutdown
await registry.close_session(session_name)
return {'closed': session_name, '_shutdown': True}
raise ValueError(f'Unknown session action: {action}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/session.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/skill_cli/commands/utils.py | """Shared utilities for CLI command handlers."""
from datetime import datetime, timezone
from browser_use_sdk import BrowserUse
_client: BrowserUse | None = None
def get_sdk_client() -> BrowserUse:
"""Get authenticated SDK client (singleton)."""
global _client
if _client is None:
from browser_use.skill_cli.api_key import require_api_key
api_key = require_api_key('Cloud API')
_client = BrowserUse(api_key=api_key)
return _client
def format_duration(started_at: datetime | None, finished_at: datetime | None) -> str:
"""Format duration between two timestamps, or elapsed time if still running."""
if not started_at:
return ''
try:
if finished_at:
end = finished_at
else:
end = datetime.now(timezone.utc)
delta = end - started_at
total_seconds = int(delta.total_seconds())
if total_seconds < 60:
return f'{total_seconds}s'
elif total_seconds < 3600:
minutes = total_seconds // 60
seconds = total_seconds % 60
return f'{minutes}m {seconds}s'
else:
hours = total_seconds // 3600
minutes = (total_seconds % 3600) // 60
return f'{hours}h {minutes}m'
except Exception:
return ''
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/commands/utils.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/skill_cli/direct.py | """Serverless CLI for browser-use - runs commands directly without a session server.
Each command reconnects to the browser via CDP WebSocket URL saved to a state file.
The browser process stays alive between commands; only the Python process exits.
Two-tier reconnection:
Tier 1 (Lightweight CDP, ~200ms): Most commands use raw CDPClient + Target.attachToTarget.
No BrowserSession, no watchdogs, no event bus.
Tier 2 (Full BrowserSession, ~3s): Only for `state` (needs DOMWatchdog) and first-time
`open` (needs to launch browser).
Usage:
python -m browser_use.skill_cli.direct open https://example.com
python -m browser_use.skill_cli.direct state
python -m browser_use.skill_cli.direct click 200 400
python -m browser_use.skill_cli.direct screenshot ./shot.png
python -m browser_use.skill_cli.direct close
"""
import asyncio
import base64
import json
import sys
import tempfile
from contextlib import asynccontextmanager
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from cdp_use import CDPClient
from browser_use.browser.session import BrowserSession
STATE_FILE = Path(tempfile.gettempdir()) / 'browser-use-direct.json'
# ---------------------------------------------------------------------------
# State persistence
# ---------------------------------------------------------------------------
def _load_state() -> dict[str, Any]:
if STATE_FILE.exists():
try:
return json.loads(STATE_FILE.read_text())
except (json.JSONDecodeError, OSError):
pass
return {}
def _save_state(state: dict[str, Any]) -> None:
STATE_FILE.write_text(json.dumps(state))
def _clear_state() -> None:
STATE_FILE.unlink(missing_ok=True)
# ---------------------------------------------------------------------------
# Selector map cache (persisted in state file under "selector_map" key)
# ---------------------------------------------------------------------------
def _save_selector_cache(selector_map: dict[int, Any]) -> None:
"""Cache element positions from the selector map into the state file.
Stores absolute_position (document coordinates) so click-by-index can
convert to viewport coords at click time using current scroll offset.
"""
cache: dict[str, dict[str, Any]] = {}
for idx, node in selector_map.items():
pos = getattr(node, 'absolute_position', None)
if pos is None:
continue
text = ''
if hasattr(node, 'ax_node') and node.ax_node and node.ax_node.name:
text = node.ax_node.name
elif hasattr(node, 'node_value') and node.node_value:
text = node.node_value
tag = getattr(node, 'node_name', '') or ''
cache[str(idx)] = {
'x': pos.x,
'y': pos.y,
'w': pos.width,
'h': pos.height,
'tag': tag.lower(),
'text': text[:80],
}
state = _load_state()
state['selector_map'] = cache
_save_state(state)
def _load_selector_cache() -> dict[int, dict[str, Any]]:
"""Load cached element positions. Returns {index: {x, y, w, h, tag, text}}."""
state = _load_state()
raw = state.get('selector_map', {})
return {int(k): v for k, v in raw.items()}
# ---------------------------------------------------------------------------
# Tier 1: Lightweight CDP connection (~200ms)
# ---------------------------------------------------------------------------
@dataclass
class LightCDP:
"""Minimal CDP connection — no BrowserSession, no watchdogs."""
client: 'CDPClient'
session_id: str
target_id: str
@asynccontextmanager
async def _lightweight_cdp():
"""Connect to the browser via raw CDP. ~200ms total.
Raises RuntimeError if no saved state or browser is dead.
"""
from cdp_use import CDPClient
state = _load_state()
cdp_url = state.get('cdp_url')
if not cdp_url:
raise RuntimeError('No active browser session')
client = CDPClient(cdp_url)
try:
await client.start()
except Exception as e:
raise RuntimeError(f'Cannot connect to browser at {cdp_url}: {e}') from e
target_id = state.get('target_id')
# If no saved target, discover one
if not target_id:
targets = await client.send.Target.getTargets()
for t in targets.get('targetInfos', []):
if t.get('type') == 'page' and t.get('url', '').startswith(('http://', 'https://')):
target_id = t['targetId']
break
if not target_id:
await client.stop()
raise RuntimeError('No page target found in browser')
# Attach to the target
attach_result = await client.send.Target.attachToTarget(params={'targetId': target_id, 'flatten': True})
session_id = attach_result.get('sessionId')
if not session_id:
await client.stop()
raise RuntimeError(f'Failed to attach to target {target_id}')
# Enable required domains
await client.send.Page.enable(session_id=session_id)
await client.send.Runtime.enable(session_id=session_id)
try:
yield LightCDP(client=client, session_id=session_id, target_id=target_id)
finally:
try:
await client.stop()
except Exception:
pass
# ---------------------------------------------------------------------------
# Tier 2: Full BrowserSession (for state + first-time open)
# ---------------------------------------------------------------------------
async def _activate_content_target(session: 'BrowserSession', saved_target_id: str | None) -> None:
"""After reconnection, ensure the session focuses on the actual page, not about:blank."""
current_url = await session.get_current_page_url()
if current_url and current_url.startswith(('http://', 'https://')):
return
if saved_target_id and session.session_manager:
target = session.session_manager.get_target(saved_target_id)
if target and target.url and target.url.startswith(('http://', 'https://')):
try:
await session.get_or_create_cdp_session(saved_target_id, focus=True)
return
except (ValueError, Exception):
pass
if session._cdp_client_root:
targets_result = await session._cdp_client_root.send.Target.getTargets()
for t in targets_result.get('targetInfos', []):
if t.get('type') == 'page' and t.get('url', '').startswith(('http://', 'https://')):
try:
await session.get_or_create_cdp_session(t['targetId'], focus=True)
return
except (ValueError, Exception):
pass
@asynccontextmanager
async def browser(use_remote: bool = False):
"""Connect to existing browser or launch a new one. Disconnects CDP on exit."""
from browser_use.browser.session import BrowserSession
state = _load_state()
cdp_url = state.get('cdp_url')
session = None
if cdp_url:
session = BrowserSession(cdp_url=cdp_url)
try:
await session.start()
await _activate_content_target(session, state.get('target_id'))
except Exception:
_clear_state()
session = None
if session is None:
if use_remote:
session = BrowserSession(use_cloud=True)
else:
session = BrowserSession(headless=False)
await session.start()
assert session.cdp_url is not None
_save_state({'cdp_url': session.cdp_url, 'remote': use_remote})
try:
yield session
finally:
if session.agent_focus_target_id:
current_state = _load_state()
current_state['target_id'] = session.agent_focus_target_id
_save_state(current_state)
if session._cdp_client_root:
try:
await session._cdp_client_root.stop()
except Exception:
pass
await session.event_bus.stop(clear=True, timeout=2)
# ---------------------------------------------------------------------------
# Lightweight CDP command functions (Tier 1)
# ---------------------------------------------------------------------------
async def _cdp_navigate(cdp: LightCDP, url: str) -> None:
"""Navigate to URL and invalidate selector cache."""
await cdp.client.send.Page.navigate(params={'url': url}, session_id=cdp.session_id)
# Invalidate selector cache — page changed, elements are gone
state = _load_state()
state.pop('selector_map', None)
_save_state(state)
async def _cdp_screenshot(cdp: LightCDP, path: str | None) -> None:
"""Take screenshot, save to file or print base64+dimensions."""
result = await cdp.client.send.Page.captureScreenshot(params={'format': 'png'}, session_id=cdp.session_id)
data = base64.b64decode(result['data'])
if path:
p = Path(path)
p.write_bytes(data) # noqa: ASYNC240
print(f'Screenshot saved to {p} ({len(data)} bytes)')
else:
# Get viewport dimensions
metrics = await cdp.client.send.Page.getLayoutMetrics(session_id=cdp.session_id)
visual = metrics.get('visualViewport', {})
output: dict[str, Any] = {
'screenshot': result['data'],
'size_bytes': len(data),
}
if visual:
output['viewport'] = {
'width': int(visual.get('clientWidth', 0)),
'height': int(visual.get('clientHeight', 0)),
}
print(json.dumps(output))
async def _cdp_click_coordinate(cdp: LightCDP, x: int, y: int) -> None:
"""Click at viewport coordinates using CDP Input.dispatchMouseEvent."""
sid = cdp.session_id
await cdp.client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': x, 'y': y},
session_id=sid,
)
await asyncio.sleep(0.05)
await cdp.client.send.Input.dispatchMouseEvent(
params={'type': 'mousePressed', 'x': x, 'y': y, 'button': 'left', 'clickCount': 1},
session_id=sid,
)
await asyncio.sleep(0.05)
await cdp.client.send.Input.dispatchMouseEvent(
params={'type': 'mouseReleased', 'x': x, 'y': y, 'button': 'left', 'clickCount': 1},
session_id=sid,
)
async def _get_scroll_offset(cdp: LightCDP) -> tuple[float, float]:
"""Get current scroll position via JS."""
result = await cdp.client.send.Runtime.evaluate(
params={
'expression': 'JSON.stringify({x:window.scrollX,y:window.scrollY})',
'returnByValue': True,
},
session_id=cdp.session_id,
)
data = json.loads(result.get('result', {}).get('value', '{"x":0,"y":0}'))
return (data['x'], data['y'])
async def _cdp_click_index(cdp: LightCDP, index: int) -> None:
"""Click element by cached index. Converts document coords to viewport coords."""
cache = _load_selector_cache()
if index not in cache:
print(f'Error: Element index {index} not in cache. Run "state" first.', file=sys.stderr)
sys.exit(1)
elem = cache[index]
scroll_x, scroll_y = await _get_scroll_offset(cdp)
# Center of element in document coords, converted to viewport coords
viewport_x = int(elem['x'] + elem['w'] / 2 - scroll_x)
viewport_y = int(elem['y'] + elem['h'] / 2 - scroll_y)
await _cdp_click_coordinate(cdp, viewport_x, viewport_y)
tag = elem.get('tag', '')
text = elem.get('text', '')
label = f'{tag}' + (f' "{text}"' if text else '')
print(f'Clicked element [{index}] {label} at ({viewport_x}, {viewport_y})')
async def _cdp_type(cdp: LightCDP, text: str) -> None:
"""Type text into focused element."""
await cdp.client.send.Input.insertText(params={'text': text}, session_id=cdp.session_id)
async def _cdp_input(cdp: LightCDP, index: int, text: str) -> None:
"""Click element by index then type text."""
await _cdp_click_index(cdp, index)
await asyncio.sleep(0.1)
await _cdp_type(cdp, text)
print(f'Typed "{text}" into element [{index}]')
async def _cdp_scroll(cdp: LightCDP, direction: str) -> None:
"""Scroll page up or down by 500px."""
amount = -500 if direction == 'up' else 500
await cdp.client.send.Runtime.evaluate(
params={
'expression': f'window.scrollBy(0, {amount})',
'returnByValue': True,
},
session_id=cdp.session_id,
)
async def _cdp_back(cdp: LightCDP) -> None:
"""Go back in browser history."""
nav = await cdp.client.send.Page.getNavigationHistory(session_id=cdp.session_id)
current_index = nav.get('currentIndex', 0)
entries = nav.get('entries', [])
if current_index > 0:
prev_entry = entries[current_index - 1]
await cdp.client.send.Page.navigateToHistoryEntry(params={'entryId': prev_entry['id']}, session_id=cdp.session_id)
# Invalidate selector cache on navigation
state = _load_state()
state.pop('selector_map', None)
_save_state(state)
else:
print('Already at the beginning of history', file=sys.stderr)
async def _cdp_keys(cdp: LightCDP, keys_str: str) -> None:
"""Send keyboard keys/shortcuts via CDP."""
from browser_use.actor.utils import get_key_info
# Key alias normalization (same as default_action_watchdog)
key_aliases = {
'ctrl': 'Control',
'control': 'Control',
'alt': 'Alt',
'option': 'Alt',
'meta': 'Meta',
'cmd': 'Meta',
'command': 'Meta',
'shift': 'Shift',
'enter': 'Enter',
'return': 'Enter',
'tab': 'Tab',
'delete': 'Delete',
'backspace': 'Backspace',
'escape': 'Escape',
'esc': 'Escape',
'space': ' ',
'up': 'ArrowUp',
'down': 'ArrowDown',
'left': 'ArrowLeft',
'right': 'ArrowRight',
'pageup': 'PageUp',
'pagedown': 'PageDown',
'home': 'Home',
'end': 'End',
}
sid = cdp.session_id
async def dispatch_key(event_type: str, key: str, modifiers: int = 0) -> None:
from cdp_use.cdp.input.commands import DispatchKeyEventParameters
code, vk_code = get_key_info(key)
params: DispatchKeyEventParameters = {'type': event_type, 'key': key, 'code': code}
if modifiers:
params['modifiers'] = modifiers
if vk_code is not None:
params['windowsVirtualKeyCode'] = vk_code
await cdp.client.send.Input.dispatchKeyEvent(params=params, session_id=sid)
# Normalize
if '+' in keys_str:
parts = [key_aliases.get(p.strip().lower(), p.strip()) for p in keys_str.split('+')]
modifiers_list = parts[:-1]
main_key = parts[-1]
modifier_map = {'Alt': 1, 'Control': 2, 'Meta': 4, 'Shift': 8}
modifier_value = 0
for mod in modifiers_list:
modifier_value |= modifier_map.get(mod, 0)
for mod in modifiers_list:
await dispatch_key('keyDown', mod)
await dispatch_key('keyDown', main_key, modifier_value)
await dispatch_key('keyUp', main_key, modifier_value)
for mod in reversed(modifiers_list):
await dispatch_key('keyUp', mod)
else:
normalized = key_aliases.get(keys_str.strip().lower(), keys_str)
special_keys = {
'Enter',
'Tab',
'Delete',
'Backspace',
'Escape',
'ArrowUp',
'ArrowDown',
'ArrowLeft',
'ArrowRight',
'PageUp',
'PageDown',
'Home',
'End',
'Control',
'Alt',
'Meta',
'Shift',
'F1',
'F2',
'F3',
'F4',
'F5',
'F6',
'F7',
'F8',
'F9',
'F10',
'F11',
'F12',
}
if normalized in special_keys:
await dispatch_key('keyDown', normalized)
if normalized == 'Enter':
await cdp.client.send.Input.dispatchKeyEvent(
params={'type': 'char', 'text': '\r', 'key': 'Enter'},
session_id=sid,
)
await dispatch_key('keyUp', normalized)
else:
# Plain text — use insertText for each character
for char in normalized:
await cdp.client.send.Input.insertText(
params={'text': char},
session_id=sid,
)
async def _cdp_html(cdp: LightCDP, selector: str | None) -> None:
"""Get raw HTML of the page or a CSS selector."""
if selector:
js = f'(function(){{ const el = document.querySelector({json.dumps(selector)}); return el ? el.outerHTML : null; }})()'
else:
js = 'document.documentElement.outerHTML'
result = await cdp.client.send.Runtime.evaluate(params={'expression': js, 'returnByValue': True}, session_id=cdp.session_id)
html = result.get('result', {}).get('value')
if html:
print(html)
else:
msg = f'No element found for selector: {selector}' if selector else 'Error: Could not get HTML'
print(msg, file=sys.stderr)
sys.exit(1)
async def _cdp_eval(cdp: LightCDP, js: str) -> None:
"""Execute JavaScript and print result."""
result = await cdp.client.send.Runtime.evaluate(params={'expression': js, 'returnByValue': True}, session_id=cdp.session_id)
value = result.get('result', {}).get('value')
print(json.dumps(value) if value is not None else 'undefined')
# ---------------------------------------------------------------------------
# Command routing
# ---------------------------------------------------------------------------
# Commands that always use lightweight CDP (Tier 1)
_LIGHTWEIGHT_COMMANDS = frozenset(
{
'screenshot',
'click',
'type',
'input',
'scroll',
'back',
'keys',
'html',
'eval',
}
)
async def main() -> int:
args = sys.argv[1:]
if not args or args[0] in ('help', '--help', '-h'):
print("""Usage: python -m browser_use.skill_cli.direct <command> [args]
Commands:
open <url> Navigate to URL
state Get DOM state with viewport info
click <index> Click element by index (uses cached positions)
click <x> <y> Click at viewport coordinates
type <text> Type into focused element
input <index> <text> Click element then type
screenshot [path] Take screenshot (saves to file or prints base64+dimensions)
scroll [up|down] Scroll page (default: down)
back Go back in history
keys <keys> Send keyboard keys
html [selector] Get raw HTML (full page or CSS selector)
eval <js> Execute JavaScript
close Kill browser and clean up
Flags:
--remote Use browser-use cloud browser (requires BROWSER_USE_API_KEY)""")
return 0 if args else 1
# Extract --remote flag
use_remote = '--remote' in args
args = [a for a in args if a != '--remote']
if not args:
print('Error: No command specified', file=sys.stderr)
return 1
command = args[0]
# ── close: lightweight CDP kill ──────────────────────────────────────
if command == 'close':
state = _load_state()
cdp_url = state.get('cdp_url')
if not cdp_url:
print('No active browser session')
else:
closed = False
try:
from cdp_use import CDPClient
client = CDPClient(cdp_url)
await client.start()
await client.send.Browser.close()
await client.stop()
closed = True
except Exception:
pass
if not closed:
try:
from browser_use.browser.session import BrowserSession
session = BrowserSession(cdp_url=cdp_url)
await session.start()
await session.kill()
except Exception:
pass
_clear_state()
print('Browser closed')
return 0
# ── open: lightweight if reconnecting, full session if first launch ──
if command == 'open' and len(args) >= 2:
url = args[1]
if not url.startswith(('http://', 'https://', 'file://')):
url = 'https://' + url
state = _load_state()
if state.get('cdp_url'):
# Reconnect — lightweight CDP navigate
try:
async with _lightweight_cdp() as cdp:
await _cdp_navigate(cdp, url)
# Update target_id in state
current_state = _load_state()
current_state['target_id'] = cdp.target_id
_save_state(current_state)
print(f'Navigated to: {url}')
return 0
except RuntimeError:
# Browser died — fall through to full session launch
_clear_state()
# First launch — needs full session
async with browser(use_remote=use_remote) as session:
from browser_use.browser.events import NavigateToUrlEvent
await session.event_bus.dispatch(NavigateToUrlEvent(url=url))
if session.agent_focus_target_id:
current_state = _load_state()
current_state['target_id'] = session.agent_focus_target_id
_save_state(current_state)
print(f'Navigated to: {url}')
return 0
# ── state: full session (needs DOMWatchdog for DOM tree building) ────
if command == 'state':
async with browser(use_remote=use_remote) as session:
state_summary = await session.get_browser_state_summary()
assert state_summary.dom_state is not None
text = state_summary.dom_state.llm_representation()
if state_summary.page_info:
pi = state_summary.page_info
header = f'viewport: {pi.viewport_width}x{pi.viewport_height}\n'
header += f'page: {pi.page_width}x{pi.page_height}\n'
header += f'scroll: ({pi.scroll_x}, {pi.scroll_y})\n'
text = header + text
print(text)
# Cache selector map for subsequent click-by-index
selector_map = await session.get_selector_map()
if selector_map:
_save_selector_cache(selector_map)
return 0
# ── Lightweight commands (Tier 1) ────────────────────────────────────
if command in _LIGHTWEIGHT_COMMANDS:
try:
async with _lightweight_cdp() as cdp:
if command == 'screenshot':
path = args[1] if len(args) >= 2 else None
await _cdp_screenshot(cdp, path)
elif command == 'click' and len(args) >= 2:
int_args = [int(a) for a in args[1:]]
if len(int_args) == 2:
x, y = int_args
await _cdp_click_coordinate(cdp, x, y)
print(f'Clicked at ({x}, {y})')
elif len(int_args) == 1:
await _cdp_click_index(cdp, int_args[0])
else:
print('Usage: click <index> or click <x> <y>', file=sys.stderr)
return 1
elif command == 'type' and len(args) >= 2:
text = ' '.join(args[1:])
await _cdp_type(cdp, text)
print(f'Typed: {text}')
elif command == 'input' and len(args) >= 3:
index = int(args[1])
text = ' '.join(args[2:])
await _cdp_input(cdp, index, text)
elif command == 'scroll':
direction = args[1] if len(args) >= 2 else 'down'
await _cdp_scroll(cdp, direction)
print(f'Scrolled {direction}')
elif command == 'back':
await _cdp_back(cdp)
print('Navigated back')
elif command == 'keys' and len(args) >= 2:
await _cdp_keys(cdp, ' '.join(args[1:]))
print(f'Sent keys: {" ".join(args[1:])}')
elif command == 'html':
selector = args[1] if len(args) >= 2 else None
await _cdp_html(cdp, selector)
elif command == 'eval' and len(args) >= 2:
js = ' '.join(args[1:])
await _cdp_eval(cdp, js)
else:
print(f'Missing arguments for: {command}', file=sys.stderr)
return 1
except RuntimeError as e:
print(f'Error: {e}', file=sys.stderr)
return 1
return 0
print(f'Unknown command: {command}', file=sys.stderr)
return 1
if __name__ == '__main__':
sys.exit(asyncio.run(main()))
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/direct.py",
"license": "MIT License",
"lines": 594,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/install_config.py | """Install configuration - tracks which browser modes are available.
This module manages the installation configuration that determines which browser
modes (chromium, real, remote) are available based on how browser-use was installed.
Config file: ~/.browser-use/install-config.json
When no config file exists (e.g., pip install users), all modes are available by default.
"""
import json
from pathlib import Path
from typing import Literal
CONFIG_PATH = Path.home() / '.browser-use' / 'install-config.json'
ModeType = Literal['chromium', 'real', 'remote']
# Local modes (both require Chromium to be installed)
LOCAL_MODES: set[str] = {'chromium', 'real'}
def get_config() -> dict:
"""Read install config. Returns default if not found.
Default config enables all modes (for pip install users).
"""
if not CONFIG_PATH.exists():
return {
'installed_modes': ['chromium', 'real', 'remote'],
'default_mode': 'chromium',
}
try:
return json.loads(CONFIG_PATH.read_text())
except (json.JSONDecodeError, OSError):
# Config file corrupt, return default
return {
'installed_modes': ['chromium', 'real', 'remote'],
'default_mode': 'chromium',
}
def save_config(installed_modes: list[str], default_mode: str) -> None:
"""Save install config."""
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
CONFIG_PATH.write_text(
json.dumps(
{
'installed_modes': installed_modes,
'default_mode': default_mode,
},
indent=2,
)
)
def is_mode_available(mode: str) -> bool:
"""Check if a browser mode is available based on installation config.
Args:
mode: The browser mode to check ('chromium', 'real', or 'remote')
Returns:
True if the mode is available, False otherwise
"""
config = get_config()
installed = config.get('installed_modes', [])
# Map 'real' to same category as 'chromium' (both are local)
# If either local mode is installed, both are available
if mode in LOCAL_MODES:
return bool(LOCAL_MODES & set(installed))
return mode in installed
def get_default_mode() -> str:
"""Get the default browser mode based on installation config."""
return get_config().get('default_mode', 'chromium')
def get_available_modes() -> list[str]:
"""Get list of available browser modes."""
return get_config().get('installed_modes', ['chromium', 'real', 'remote'])
def get_mode_unavailable_error(mode: str) -> str:
"""Generate a helpful error message when a mode is not available.
Args:
mode: The unavailable mode that was requested
Returns:
A formatted error message with instructions for reinstalling
"""
available = get_available_modes()
if mode in LOCAL_MODES:
install_flag = '--full'
mode_desc = 'Local browser mode'
else:
install_flag = '--full'
mode_desc = 'Remote browser mode'
return (
f"Error: {mode_desc} '{mode}' not installed.\n"
f'Available modes: {", ".join(available)}\n\n'
f'To install all modes, reinstall with:\n'
f' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- {install_flag}'
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/install_config.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/skill_cli/main.py | #!/usr/bin/env python3
"""Fast CLI for browser-use. STDLIB ONLY - must start in <50ms.
This is the main entry point for the browser-use CLI. It uses only stdlib
imports to ensure fast startup, delegating heavy operations to the session
server which loads once and stays running.
"""
import argparse
import asyncio
import hashlib
import json
import os
import socket
import subprocess
import sys
import tempfile
import time
from pathlib import Path
# =============================================================================
# Early command interception (before heavy imports)
# These commands don't need the session server infrastructure
# =============================================================================
# Handle --mcp flag early to prevent logging initialization
if '--mcp' in sys.argv:
import logging
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
logging.disable(logging.CRITICAL)
import asyncio
from browser_use.mcp.server import main as mcp_main
asyncio.run(mcp_main())
sys.exit(0)
# Helper to find the subcommand (first non-flag argument)
def _get_subcommand() -> str | None:
"""Get the first non-flag argument (the subcommand)."""
for arg in sys.argv[1:]:
if not arg.startswith('-'):
return arg
return None
# Handle 'install' command - installs Chromium browser + system dependencies
if _get_subcommand() == 'install':
import platform
print('📦 Installing Chromium browser + system dependencies...')
print('⏳ This may take a few minutes...\n')
# Build command - only use --with-deps on Linux (it fails on Windows/macOS)
cmd = ['uvx', 'playwright', 'install', 'chromium']
if platform.system() == 'Linux':
cmd.append('--with-deps')
cmd.append('--no-shell')
result = subprocess.run(cmd)
if result.returncode == 0:
print('\n✅ Installation complete!')
print('🚀 Ready to use! Run: uvx browser-use')
else:
print('\n❌ Installation failed')
sys.exit(1)
sys.exit(0)
# Handle 'init' command - generate template files
# Uses _get_subcommand() to check if 'init' is the actual subcommand,
# not just anywhere in argv (prevents hijacking: browser-use run "init something")
if _get_subcommand() == 'init':
from browser_use.init_cmd import main as init_main
# Check if --template or -t flag is present without a value
# If so, just remove it and let init_main handle interactive mode
if '--template' in sys.argv or '-t' in sys.argv:
try:
template_idx = sys.argv.index('--template') if '--template' in sys.argv else sys.argv.index('-t')
template = sys.argv[template_idx + 1] if template_idx + 1 < len(sys.argv) else None
# If template is not provided or is another flag, remove the flag and use interactive mode
if not template or template.startswith('-'):
if '--template' in sys.argv:
sys.argv.remove('--template')
else:
sys.argv.remove('-t')
except (ValueError, IndexError):
pass
# Remove 'init' from sys.argv so click doesn't see it as an unexpected argument
sys.argv.remove('init')
init_main()
sys.exit(0)
# Handle --template flag directly (without 'init' subcommand)
# Delegate to init_main() which handles full template logic (directories, manifests, etc.)
if '--template' in sys.argv:
from browser_use.init_cmd import main as init_main
# Build clean argv for init_main: keep only init-relevant flags
new_argv = [sys.argv[0]] # program name
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
# Keep --template/-t and its value
if arg in ('--template', '-t'):
new_argv.append(arg)
if i + 1 < len(sys.argv) and not sys.argv[i + 1].startswith('-'):
new_argv.append(sys.argv[i + 1])
i += 1
# Keep --output/-o and its value
elif arg in ('--output', '-o'):
new_argv.append(arg)
if i + 1 < len(sys.argv) and not sys.argv[i + 1].startswith('-'):
new_argv.append(sys.argv[i + 1])
i += 1
# Keep --force/-f and --list/-l flags
elif arg in ('--force', '-f', '--list', '-l'):
new_argv.append(arg)
# Skip other flags (--session, --browser, --headed, etc.)
i += 1
sys.argv = new_argv
init_main()
sys.exit(0)
# =============================================================================
# Utility functions (inlined to avoid imports)
# =============================================================================
def get_socket_path(session: str) -> str:
"""Get socket path for session."""
if sys.platform == 'win32':
# Use 127.0.0.1 explicitly (not localhost) to avoid IPv6 binding issues
port = 49152 + (int(hashlib.md5(session.encode()).hexdigest()[:4], 16) % 16383)
return f'tcp://127.0.0.1:{port}'
return str(Path(tempfile.gettempdir()) / f'browser-use-{session}.sock')
def get_pid_path(session: str) -> Path:
"""Get PID file path for session."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.pid'
def _pid_exists(pid: int) -> bool:
"""Check if a process with given PID exists.
On Windows, uses ctypes to call OpenProcess (os.kill doesn't work reliably).
On Unix, uses os.kill(pid, 0) which is the standard approach.
"""
if sys.platform == 'win32':
import ctypes
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, False, pid)
if handle:
ctypes.windll.kernel32.CloseHandle(handle)
return True
return False
else:
try:
os.kill(pid, 0)
return True
except OSError:
return False
def is_server_running(session: str) -> bool:
"""Check if server is running for session."""
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
try:
pid = int(pid_path.read_text().strip())
return _pid_exists(pid)
except (OSError, ValueError):
# Can't read PID file or invalid PID
return False
def connect_to_server(session: str, timeout: float = 60.0) -> socket.socket:
"""Connect to session server."""
sock_path = get_socket_path(session)
if sock_path.startswith('tcp://'):
# Windows: TCP connection
_, hostport = sock_path.split('://', 1)
host, port = hostport.split(':')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((host, int(port)))
else:
# Unix socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect(sock_path)
return sock
def get_session_metadata_path(session: str) -> Path:
"""Get path to session metadata file (stores browser_mode, headed, profile)."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.meta'
def ensure_server(session: str, browser: str, headed: bool, profile: str | None, api_key: str | None) -> bool:
"""Start server if not running. Returns True if started."""
from browser_use.skill_cli.utils import is_session_locked, kill_orphaned_server
meta_path = get_session_metadata_path(session)
# Check if server is already running AND holding its lock (healthy server)
if is_server_running(session) and is_session_locked(session):
try:
sock = connect_to_server(session, timeout=0.5) # Increased from 0.1s
sock.close()
# Check browser mode matches existing session
if meta_path.exists():
try:
meta = json.loads(meta_path.read_text())
existing_mode = meta.get('browser_mode', 'chromium')
if existing_mode != browser:
# Only error if user explicitly requested 'remote' but session is local
# This prevents losing cloud features (live_url, etc.)
# The reverse case (requesting local but having remote) is fine -
# user still gets a working browser, just with more features
if browser == 'remote' and existing_mode != 'remote':
print(
f"Error: Session '{session}' is running with --browser {existing_mode}, "
f'but --browser remote was requested.\n\n'
f'Cloud browser features (live_url) require a remote session.\n\n'
f'Options:\n'
f' 1. Close and restart: browser-use close && browser-use --browser remote open <url>\n'
f' 2. Use different session: browser-use --browser remote --session other <command>\n'
f' 3. Use existing local browser: browser-use --browser {existing_mode} <command>',
file=sys.stderr,
)
sys.exit(1)
except (json.JSONDecodeError, OSError):
pass # Metadata file corrupt, ignore
return False # Already running with correct mode
except Exception:
pass # Server not responsive, continue to restart logic
# Kill any orphaned server (has PID file but no lock)
kill_orphaned_server(session)
# Build server command
cmd = [
sys.executable,
'-m',
'browser_use.skill_cli.server',
'--session',
session,
'--browser',
browser,
]
if headed:
cmd.append('--headed')
if profile:
cmd.extend(['--profile', profile])
# Set up environment
env = os.environ.copy()
if api_key:
env['BROWSER_USE_API_KEY'] = api_key
# Start server as background process
if sys.platform == 'win32':
# Windows: CREATE_NO_WINDOW prevents console window from appearing
# CREATE_NEW_PROCESS_GROUP allows the process to survive parent exit
subprocess.Popen(
cmd,
env=env,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.CREATE_NO_WINDOW,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
else:
# Unix: use start_new_session
subprocess.Popen(
cmd,
env=env,
start_new_session=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Wait for server to be ready (must have PID, lock, and responsive socket)
for _ in range(100): # 5 seconds max
if is_server_running(session) and is_session_locked(session):
try:
sock = connect_to_server(session, timeout=0.5)
sock.close()
# Write metadata file to track session config
meta_path.write_text(
json.dumps(
{
'browser_mode': browser,
'headed': headed,
'profile': profile,
}
)
)
return True
except Exception:
pass
time.sleep(0.05)
print('Error: Failed to start session server', file=sys.stderr)
sys.exit(1)
def send_command(session: str, action: str, params: dict) -> dict:
"""Send command to server and get response."""
request = {
'id': f'r{int(time.time() * 1000000) % 1000000}',
'action': action,
'session': session,
'params': params,
}
sock = connect_to_server(session)
try:
# Send request
sock.sendall((json.dumps(request) + '\n').encode())
# Read response
data = b''
while not data.endswith(b'\n'):
chunk = sock.recv(4096)
if not chunk:
break
data += chunk
if not data:
return {'id': request['id'], 'success': False, 'error': 'No response from server'}
return json.loads(data.decode())
finally:
sock.close()
# =============================================================================
# CLI Commands
# =============================================================================
def build_parser() -> argparse.ArgumentParser:
"""Build argument parser with all commands."""
# Import install config to get available modes and default
from browser_use.skill_cli.install_config import get_available_modes, get_default_mode
available_modes = get_available_modes()
default_mode = get_default_mode()
# Build epilog dynamically based on available modes
epilog_parts = []
if 'chromium' in available_modes or 'real' in available_modes:
epilog_parts.append("""Local Mode (default):
browser-use run "Fill the form" # Uses local browser + your API keys
browser-use run "task" --llm gpt-4o # Specify model (requires API key)
browser-use open https://example.com""")
if 'remote' in available_modes:
if 'chromium' in available_modes:
# Full install - show how to switch to remote
epilog_parts.append("""
Remote Mode (--browser remote):
browser-use -b remote run "task" # Cloud execution (US proxy default)
browser-use -b remote run "task" --llm gpt-4o # Specify cloud model
browser-use -b remote --profile <id> run "task" # Use cloud profile
browser-use -b remote run "task" --proxy-country gb # UK proxy
browser-use -b remote run "task" --session-id <id> # Reuse session
browser-use -b remote run "task" --wait # Wait for completion
Task Management:
browser-use task list # List recent cloud tasks
browser-use task status <task-id> # Check task status
browser-use task stop <task-id> # Stop running task""")
else:
# Remote-only install
epilog_parts.append("""
Examples:
browser-use run "task" # Cloud execution (US proxy default)
browser-use run "task" --llm gpt-4o # Specify model
browser-use --profile <id> run "task" # Use cloud profile
browser-use run "task" --proxy-country gb # UK proxy
browser-use run "task" --session-id <id> # Reuse existing session
browser-use run "task" --wait # Wait for completion
Task Management:
browser-use task list # List recent cloud tasks
browser-use task status <task-id> # Check task status
browser-use task stop <task-id> # Stop running task""")
epilog_parts.append("""
Setup:
browser-use install # Install Chromium browser
browser-use init # Generate template file""")
parser = argparse.ArgumentParser(
prog='browser-use',
description='Browser automation CLI for browser-use',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='\n'.join(epilog_parts),
)
# Global flags
parser.add_argument('--session', '-s', default='default', help='Session name (default: default)')
parser.add_argument(
'--browser',
'-b',
choices=available_modes,
default=default_mode,
help=f'Browser mode (available: {", ".join(available_modes)})',
)
parser.add_argument('--headed', action='store_true', help='Show browser window')
parser.add_argument('--profile', help='Browser profile (local name or cloud ID)')
parser.add_argument('--json', action='store_true', help='Output as JSON')
parser.add_argument('--api-key', help='Browser-Use API key')
parser.add_argument('--mcp', action='store_true', help='Run as MCP server (JSON-RPC via stdin/stdout)')
parser.add_argument('--template', help='Generate template file (use with --output for custom path)')
subparsers = parser.add_subparsers(dest='command', help='Command to execute')
# -------------------------------------------------------------------------
# Setup Commands (handled early, before argparse)
# -------------------------------------------------------------------------
# install
subparsers.add_parser('install', help='Install Chromium browser + system dependencies')
# init
p = subparsers.add_parser('init', help='Generate browser-use template file')
p.add_argument('--template', '-t', help='Template name (interactive if not specified)')
p.add_argument('--output', '-o', help='Output file path')
p.add_argument('--force', '-f', action='store_true', help='Overwrite existing files')
p.add_argument('--list', '-l', action='store_true', help='List available templates')
# setup
p = subparsers.add_parser('setup', help='Configure browser-use for first-time use')
p.add_argument('--mode', choices=['local', 'remote', 'full'], default='local', help='Setup mode (local/remote/full)')
p.add_argument('--api-key', help='Browser-Use API key')
p.add_argument('--yes', '-y', action='store_true', help='Skip interactive prompts')
# doctor
subparsers.add_parser('doctor', help='Check browser-use installation and dependencies')
# -------------------------------------------------------------------------
# Browser Control Commands
# -------------------------------------------------------------------------
# open <url>
p = subparsers.add_parser('open', help='Navigate to URL')
p.add_argument('url', help='URL to navigate to')
# click <index> OR click <x> <y>
p = subparsers.add_parser('click', help='Click element by index or coordinates (x y)')
p.add_argument('args', nargs='+', type=int, help='Element index OR x y coordinates')
# type <text>
p = subparsers.add_parser('type', help='Type text')
p.add_argument('text', help='Text to type')
# input <index> <text>
p = subparsers.add_parser('input', help='Type text into specific element')
p.add_argument('index', type=int, help='Element index')
p.add_argument('text', help='Text to type')
# scroll [up|down]
p = subparsers.add_parser('scroll', help='Scroll page')
p.add_argument('direction', nargs='?', default='down', choices=['up', 'down'], help='Scroll direction')
p.add_argument('--amount', type=int, default=500, help='Scroll amount in pixels')
# back
subparsers.add_parser('back', help='Go back in history')
# screenshot [path]
p = subparsers.add_parser('screenshot', help='Take screenshot')
p.add_argument('path', nargs='?', help='Save path (outputs base64 if not provided)')
p.add_argument('--full', action='store_true', help='Full page screenshot')
# state
subparsers.add_parser('state', help='Get browser state (URL, title, elements)')
# switch <tab>
p = subparsers.add_parser('switch', help='Switch to tab')
p.add_argument('tab', type=int, help='Tab index')
# close-tab [tab]
p = subparsers.add_parser('close-tab', help='Close tab')
p.add_argument('tab', type=int, nargs='?', help='Tab index (current if not specified)')
# keys <keys>
p = subparsers.add_parser('keys', help='Send keyboard keys')
p.add_argument('keys', help='Keys to send (e.g., "Enter", "Control+a")')
# select <index> <value>
p = subparsers.add_parser('select', help='Select dropdown option')
p.add_argument('index', type=int, help='Element index')
p.add_argument('value', help='Value to select')
# eval <js>
p = subparsers.add_parser('eval', help='Execute JavaScript')
p.add_argument('js', help='JavaScript code to execute')
# extract <query>
p = subparsers.add_parser('extract', help='Extract data using LLM')
p.add_argument('query', help='What to extract')
# hover <index>
p = subparsers.add_parser('hover', help='Hover over element')
p.add_argument('index', type=int, help='Element index')
# dblclick <index>
p = subparsers.add_parser('dblclick', help='Double-click element')
p.add_argument('index', type=int, help='Element index')
# rightclick <index>
p = subparsers.add_parser('rightclick', help='Right-click element')
p.add_argument('index', type=int, help='Element index')
# -------------------------------------------------------------------------
# Cookies Commands
# -------------------------------------------------------------------------
cookies_p = subparsers.add_parser('cookies', help='Cookie operations')
cookies_sub = cookies_p.add_subparsers(dest='cookies_command')
# cookies get [--url URL]
p = cookies_sub.add_parser('get', help='Get all cookies')
p.add_argument('--url', help='Filter by URL')
# cookies set <name> <value>
p = cookies_sub.add_parser('set', help='Set a cookie')
p.add_argument('name', help='Cookie name')
p.add_argument('value', help='Cookie value')
p.add_argument('--domain', help='Cookie domain')
p.add_argument('--path', default='/', help='Cookie path')
p.add_argument('--secure', action='store_true', help='Secure cookie')
p.add_argument('--http-only', action='store_true', help='HTTP-only cookie')
p.add_argument('--same-site', choices=['Strict', 'Lax', 'None'], help='SameSite attribute')
p.add_argument('--expires', type=float, help='Expiration timestamp')
# cookies clear [--url URL]
p = cookies_sub.add_parser('clear', help='Clear cookies')
p.add_argument('--url', help='Clear only for URL')
# cookies export <file>
p = cookies_sub.add_parser('export', help='Export cookies to JSON file')
p.add_argument('file', help='Output file path')
p.add_argument('--url', help='Filter by URL')
# cookies import <file>
p = cookies_sub.add_parser('import', help='Import cookies from JSON file')
p.add_argument('file', help='Input file path')
# -------------------------------------------------------------------------
# Wait Commands
# -------------------------------------------------------------------------
wait_p = subparsers.add_parser('wait', help='Wait for conditions')
wait_sub = wait_p.add_subparsers(dest='wait_command')
# wait selector <css>
p = wait_sub.add_parser('selector', help='Wait for CSS selector')
p.add_argument('selector', help='CSS selector')
p.add_argument('--timeout', type=int, default=30000, help='Timeout in ms')
p.add_argument('--state', choices=['attached', 'detached', 'visible', 'hidden'], default='visible', help='Element state')
# wait text <text>
p = wait_sub.add_parser('text', help='Wait for text')
p.add_argument('text', help='Text to wait for')
p.add_argument('--timeout', type=int, default=30000, help='Timeout in ms')
# -------------------------------------------------------------------------
# Get Commands (info retrieval)
# -------------------------------------------------------------------------
get_p = subparsers.add_parser('get', help='Get information')
get_sub = get_p.add_subparsers(dest='get_command')
# get title
get_sub.add_parser('title', help='Get page title')
# get html [--selector SELECTOR]
p = get_sub.add_parser('html', help='Get page HTML')
p.add_argument('--selector', help='CSS selector to scope HTML')
# get text <index>
p = get_sub.add_parser('text', help='Get element text')
p.add_argument('index', type=int, help='Element index')
# get value <index>
p = get_sub.add_parser('value', help='Get input element value')
p.add_argument('index', type=int, help='Element index')
# get attributes <index>
p = get_sub.add_parser('attributes', help='Get element attributes')
p.add_argument('index', type=int, help='Element index')
# get bbox <index>
p = get_sub.add_parser('bbox', help='Get element bounding box')
p.add_argument('index', type=int, help='Element index')
# -------------------------------------------------------------------------
# Python Execution
# -------------------------------------------------------------------------
p = subparsers.add_parser('python', help='Execute Python code')
p.add_argument('code', nargs='?', help='Python code to execute')
p.add_argument('--file', '-f', help='Execute Python file')
p.add_argument('--reset', action='store_true', help='Reset Python namespace')
p.add_argument('--vars', action='store_true', help='Show defined variables')
# -------------------------------------------------------------------------
# Agent Tasks
# -------------------------------------------------------------------------
from browser_use.skill_cli.install_config import is_mode_available
remote_available = is_mode_available('remote')
local_available = is_mode_available('chromium')
p = subparsers.add_parser('run', help='Run agent task (requires API key)')
p.add_argument('task', help='Task description')
p.add_argument('--max-steps', type=int, help='Maximum steps')
# Model selection (works both locally and remotely)
p.add_argument('--llm', help='LLM model (gpt-4o, claude-sonnet-4-20250514, gemini-2.0-flash)')
# Cloud-only flags - only show if remote mode is available
if remote_available:
# Add [remote] hint only if both modes are available (--full install)
remote_hint = '[remote] ' if local_available else ''
p.add_argument('--session-id', help=f'{remote_hint}Reuse existing cloud session ID')
p.add_argument('--proxy-country', help=f'{remote_hint}Proxy country code')
p.add_argument('--stream', action='store_true', help=f'{remote_hint}Stream output in real-time')
p.add_argument('--wait', action='store_true', help=f'{remote_hint}Wait for task to complete (default: async)')
p.add_argument('--flash', action='store_true', help=f'{remote_hint}Enable flash mode')
p.add_argument('--keep-alive', action='store_true', help=f'{remote_hint}Keep session alive after task')
p.add_argument('--thinking', action='store_true', help=f'{remote_hint}Enable extended reasoning')
p.add_argument('--vision', action='store_true', default=None, help=f'{remote_hint}Enable vision')
p.add_argument('--no-vision', action='store_true', help=f'{remote_hint}Disable vision')
# New SDK features
p.add_argument('--start-url', help=f'{remote_hint}URL to start the task from')
p.add_argument('--metadata', action='append', metavar='KEY=VALUE', help=f'{remote_hint}Task metadata (can repeat)')
p.add_argument('--secret', action='append', metavar='KEY=VALUE', help=f'{remote_hint}Task secrets (can repeat)')
p.add_argument(
'--allowed-domain',
action='append',
metavar='DOMAIN',
help=f'{remote_hint}Restrict navigation to domains (can repeat)',
)
p.add_argument('--skill-id', action='append', metavar='ID', help=f'{remote_hint}Enable skill IDs (can repeat)')
p.add_argument('--structured-output', metavar='SCHEMA', help=f'{remote_hint}JSON schema for structured output')
p.add_argument('--judge', action='store_true', help=f'{remote_hint}Enable judge mode')
p.add_argument('--judge-ground-truth', metavar='TEXT', help=f'{remote_hint}Expected answer for judge evaluation')
# -------------------------------------------------------------------------
# Task Management (Cloud) - only available if remote mode is installed
# -------------------------------------------------------------------------
if remote_available:
task_p = subparsers.add_parser('task', help='Manage cloud tasks')
task_sub = task_p.add_subparsers(dest='task_command')
# task list
p = task_sub.add_parser('list', help='List recent tasks')
p.add_argument('--limit', type=int, default=10, help='Maximum number of tasks to list')
p.add_argument('--status', choices=['running', 'finished', 'stopped', 'failed'], help='Filter by status')
p.add_argument('--session', help='Filter by session ID')
p.add_argument('--json', action='store_true', help='Output as JSON')
# task status <task_id>
p = task_sub.add_parser('status', help='Get task status')
p.add_argument('task_id', help='Task ID')
p.add_argument('--compact', '-c', action='store_true', help='Show all steps with reasoning')
p.add_argument('--verbose', '-v', action='store_true', help='Show all steps with full details (URLs, actions)')
p.add_argument('--last', '-n', type=int, metavar='N', help='Show only the last N steps')
p.add_argument('--reverse', '-r', action='store_true', help='Show steps newest first (100, 99, 98...)')
p.add_argument('--step', '-s', type=int, metavar='N', help='Show specific step number')
p.add_argument('--json', action='store_true', help='Output as JSON')
# task stop <task_id>
p = task_sub.add_parser('stop', help='Stop running task')
p.add_argument('task_id', help='Task ID')
p.add_argument('--json', action='store_true', help='Output as JSON')
# task logs <task_id>
p = task_sub.add_parser('logs', help='Get task logs')
p.add_argument('task_id', help='Task ID')
p.add_argument('--json', action='store_true', help='Output as JSON')
# -------------------------------------------------------------------------
# Cloud Session Management - only available if remote mode is installed
# -------------------------------------------------------------------------
if remote_available:
session_p = subparsers.add_parser('session', help='Manage cloud sessions')
session_sub = session_p.add_subparsers(dest='session_command')
# session list
p = session_sub.add_parser('list', help='List cloud sessions')
p.add_argument('--limit', type=int, default=10, help='Maximum number of sessions to list')
p.add_argument('--status', choices=['active', 'stopped'], help='Filter by status')
p.add_argument('--json', action='store_true', help='Output as JSON')
# session get <session_id>
p = session_sub.add_parser('get', help='Get session details')
p.add_argument('session_id', help='Session ID')
p.add_argument('--json', action='store_true', help='Output as JSON')
# session stop <session_id> or session stop --all
p = session_sub.add_parser('stop', help='Stop cloud session(s)')
p.add_argument('session_id', nargs='?', help='Session ID (or use --all)')
p.add_argument('--all', action='store_true', help='Stop all active sessions')
p.add_argument('--json', action='store_true', help='Output as JSON')
# session create - Create session without task
p = session_sub.add_parser('create', help='Create a new cloud session')
p.add_argument('--profile', help='Cloud profile ID')
p.add_argument('--proxy-country', help='Proxy country code')
p.add_argument('--start-url', help='Initial URL to navigate to')
p.add_argument('--screen-size', metavar='WxH', help='Screen size (e.g., 1920x1080)')
p.add_argument('--keep-alive', action='store_true', default=None, help='Keep session alive')
p.add_argument('--no-keep-alive', dest='keep_alive', action='store_false', help='Do not keep session alive')
p.add_argument('--persist-memory', action='store_true', default=None, help='Persist memory between tasks')
p.add_argument('--no-persist-memory', dest='persist_memory', action='store_false', help='Do not persist memory')
p.add_argument('--json', action='store_true', help='Output as JSON')
# session share <session_id> - Create or delete public share
p = session_sub.add_parser('share', help='Manage public share URL')
p.add_argument('session_id', help='Session ID')
p.add_argument('--delete', action='store_true', help='Delete the public share')
p.add_argument('--json', action='store_true', help='Output as JSON')
# -------------------------------------------------------------------------
# Tunnel Commands
# -------------------------------------------------------------------------
tunnel_p = subparsers.add_parser('tunnel', help='Expose localhost via Cloudflare tunnel')
tunnel_p.add_argument(
'port_or_subcommand',
nargs='?',
default=None,
help='Port number to tunnel, or subcommand (list, stop)',
)
tunnel_p.add_argument('port_arg', nargs='?', type=int, help='Port number (for stop subcommand)')
tunnel_p.add_argument('--all', action='store_true', help='Stop all tunnels (use with: tunnel stop --all)')
# -------------------------------------------------------------------------
# Session Management
# -------------------------------------------------------------------------
# sessions
subparsers.add_parser('sessions', help='List active sessions')
# close
p = subparsers.add_parser('close', help='Close session')
p.add_argument('--all', action='store_true', help='Close all sessions')
# -------------------------------------------------------------------------
# Server Control
# -------------------------------------------------------------------------
server_p = subparsers.add_parser('server', help='Server control')
server_sub = server_p.add_subparsers(dest='server_command')
server_sub.add_parser('status', help='Check server status')
server_sub.add_parser('stop', help='Stop server')
server_sub.add_parser('logs', help='View server logs')
# -------------------------------------------------------------------------
# Profile Management (mode-aware: use -b real or -b remote)
# -------------------------------------------------------------------------
profile_p = subparsers.add_parser('profile', help='Manage browser profiles (use -b real or -b remote)')
profile_sub = profile_p.add_subparsers(dest='profile_command')
# profile list - lists local or cloud profiles based on -b flag
p = profile_sub.add_parser('list', help='List profiles (local with -b real, cloud with -b remote)')
p.add_argument('--page', type=int, default=1, help='Page number (cloud only)')
p.add_argument('--page-size', type=int, default=20, help='Items per page (cloud only)')
# profile get <id>
p = profile_sub.add_parser('get', help='Get profile details')
p.add_argument('id', help='Profile ID or name')
# profile create (cloud only)
p = profile_sub.add_parser('create', help='Create profile (cloud only)')
p.add_argument('--name', help='Profile name')
# profile update <id> (cloud only)
p = profile_sub.add_parser('update', help='Update profile (cloud only)')
p.add_argument('id', help='Profile ID')
p.add_argument('--name', required=True, help='New profile name')
# profile delete <id> (cloud only)
p = profile_sub.add_parser('delete', help='Delete profile (cloud only)')
p.add_argument('id', help='Profile ID')
# profile cookies <id> - list cookies by domain (local only)
p = profile_sub.add_parser('cookies', help='List cookies by domain (local only, requires -b real)')
p.add_argument('id', help='Profile ID or name (e.g. "Default", "Profile 1")')
# profile sync - sync local profile to cloud
p = profile_sub.add_parser('sync', help='Sync local Chrome profile to cloud')
p.add_argument('--from', dest='from_profile', help='Local profile name (e.g. "Default", "Profile 1")')
p.add_argument('--name', help='Cloud profile name (default: auto-generated)')
p.add_argument('--domain', help='Only sync cookies for this domain (e.g. "youtube.com")')
return parser
def handle_server_command(args: argparse.Namespace) -> int:
"""Handle server subcommands."""
if args.server_command == 'status':
if is_server_running(args.session):
print(f'Server for session "{args.session}" is running')
return 0
else:
print(f'Server for session "{args.session}" is not running')
return 1
elif args.server_command == 'stop':
if not is_server_running(args.session):
print(f'Server for session "{args.session}" is not running')
return 0
response = send_command(args.session, 'shutdown', {})
if response.get('success'):
print(f'Server for session "{args.session}" stopped')
return 0
else:
print(f'Error: {response.get("error")}', file=sys.stderr)
return 1
elif args.server_command == 'logs':
log_path = Path(tempfile.gettempdir()) / f'browser-use-{args.session}.log'
if log_path.exists():
print(log_path.read_text())
else:
print('No logs found')
return 0
return 0
def _parse_key_value_list(items: list[str] | None) -> dict[str, str | None] | None:
"""Parse a list of 'key=value' strings into a dict."""
if not items:
return None
result: dict[str, str | None] = {}
for item in items:
if '=' in item:
key, value = item.split('=', 1)
result[key] = value
return result if result else None
def _handle_remote_run_with_wait(args: argparse.Namespace) -> int:
"""Handle remote run with --wait directly (prints task info immediately, then waits)."""
import asyncio
from browser_use.skill_cli.commands import cloud_session, cloud_task
if not args.task:
print('Error: No task provided', file=sys.stderr)
return 1
try:
# Handle vision flag (--vision vs --no-vision)
vision: bool | None = None
if getattr(args, 'vision', False):
vision = True
elif getattr(args, 'no_vision', False):
vision = False
# Parse key=value list params
metadata = _parse_key_value_list(getattr(args, 'metadata', None))
secrets = _parse_key_value_list(getattr(args, 'secret', None))
# Build session params
session_id = getattr(args, 'session_id', None)
profile_id = getattr(args, 'profile', None)
proxy_country = getattr(args, 'proxy_country', None)
# Create session first if profile or proxy specified and no session_id
if (profile_id or proxy_country) and not session_id:
session = cloud_session.create_session(
profile_id=profile_id,
proxy_country=proxy_country,
keep_alive=getattr(args, 'keep_alive', None),
)
session_id = session.id
# Create task with all cloud-only flags
task_response = cloud_task.create_task(
task=args.task,
llm=args.llm,
session_id=session_id,
max_steps=args.max_steps,
flash_mode=getattr(args, 'flash', None),
thinking=getattr(args, 'thinking', None),
vision=vision,
start_url=getattr(args, 'start_url', None),
metadata=metadata,
secrets=secrets,
allowed_domains=getattr(args, 'allowed_domain', None),
skill_ids=getattr(args, 'skill_id', None),
structured_output=getattr(args, 'structured_output', None),
judge=getattr(args, 'judge', None),
judge_ground_truth=getattr(args, 'judge_ground_truth', None),
)
# Print initial info immediately
print(f'mode: {args.browser}')
print(f'task_id: {task_response.id}')
print(f'session_id: {task_response.session_id}')
print('waiting...', end='', flush=True)
# Wait for completion
try:
result = asyncio.run(cloud_task.poll_until_complete(task_response.id))
except KeyboardInterrupt:
print(f'\nInterrupted. Task {task_response.id} continues remotely.')
return 0
# Print final result
print(' done.')
print(f'status: {result.status}')
print(f'output: {result.output}')
if result.cost:
print(f'cost: {result.cost}')
return 0
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
def main() -> int:
"""Main entry point."""
parser = build_parser()
args = parser.parse_args()
if not args.command:
parser.print_help()
return 0
# Handle server subcommands without starting server
if args.command == 'server':
return handle_server_command(args)
# Handle profile subcommands without starting server
if args.command == 'profile':
from browser_use.skill_cli.commands.profile import handle_profile_command
return handle_profile_command(args)
# Handle sessions list - find all running sessions
if args.command == 'sessions':
from browser_use.skill_cli.utils import find_all_sessions
session_names = find_all_sessions()
sessions = [{'name': name, 'status': 'running'} for name in session_names]
if args.json:
print(json.dumps(sessions))
else:
if sessions:
for s in sessions:
print(f' {s["name"]}: {s["status"]}')
else:
print('No active sessions')
return 0
# Handle close --all by closing all running sessions
if args.command == 'close' and getattr(args, 'all', False):
from browser_use.skill_cli.utils import find_all_sessions
session_names = find_all_sessions()
closed = []
for name in session_names:
try:
response = send_command(name, 'close', {})
if response.get('success'):
closed.append(name)
# Clean up metadata file
meta_path = get_session_metadata_path(name)
if meta_path.exists():
meta_path.unlink()
except Exception:
pass # Server may already be stopping
if args.json:
print(json.dumps({'closed': closed, 'count': len(closed)}))
else:
if closed:
print(f'Closed {len(closed)} session(s): {", ".join(closed)}')
else:
print('No active sessions')
return 0
# Handle setup command
if args.command == 'setup':
from browser_use.skill_cli.commands import setup
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
setup.handle(
'setup',
{
'mode': args.mode,
'api_key': args.api_key,
'yes': args.yes,
'json': args.json,
},
)
)
if args.json:
print(json.dumps(result))
elif 'error' in result:
print(f'Error: {result["error"]}', file=sys.stderr)
return 1
else:
if result.get('status') == 'success':
print('\n✓ Setup complete!')
print(f'\nMode: {result["mode"]}')
print('Next: browser-use open https://example.com')
return 0
# Handle doctor command
if args.command == 'doctor':
from browser_use.skill_cli.commands import doctor
loop = asyncio.get_event_loop()
result = loop.run_until_complete(doctor.handle())
if args.json:
print(json.dumps(result))
else:
# Print check results
checks = result.get('checks', {})
print('\nDiagnostics:\n')
for name, check in checks.items():
status = check.get('status', 'unknown')
message = check.get('message', '')
note = check.get('note', '')
fix = check.get('fix', '')
if status == 'ok':
icon = '✓'
elif status == 'warning':
icon = '⚠'
elif status == 'missing':
icon = '○'
else:
icon = '✗'
print(f' {icon} {name}: {message}')
if note:
print(f' {note}')
if fix:
print(f' Fix: {fix}')
print('')
if result.get('status') == 'healthy':
print('✓ All checks passed!')
else:
print(f'⚠ {result.get("summary", "Some checks need attention")}')
return 0
# Handle task command - cloud task management
if args.command == 'task':
from browser_use.skill_cli.commands.cloud_task import handle_task_command
return handle_task_command(args)
# Handle session command - cloud session management
if args.command == 'session':
from browser_use.skill_cli.commands.cloud_session import handle_session_command
return handle_session_command(args)
# Handle tunnel command - runs independently of browser session
if args.command == 'tunnel':
from browser_use.skill_cli import tunnel
pos = getattr(args, 'port_or_subcommand', None)
if pos == 'list':
result = tunnel.list_tunnels()
elif pos == 'stop':
port_arg = getattr(args, 'port_arg', None)
if getattr(args, 'all', False):
# stop --all
result = asyncio.get_event_loop().run_until_complete(tunnel.stop_all_tunnels())
elif port_arg is not None:
result = asyncio.get_event_loop().run_until_complete(tunnel.stop_tunnel(port_arg))
else:
print('Usage: browser-use tunnel stop <port> | --all', file=sys.stderr)
return 1
elif pos is not None:
try:
port = int(pos)
except ValueError:
print(f'Unknown tunnel subcommand: {pos}', file=sys.stderr)
return 1
result = asyncio.get_event_loop().run_until_complete(tunnel.start_tunnel(port))
else:
print('Usage: browser-use tunnel <port> | list | stop <port>', file=sys.stderr)
return 0
# Output result
if args.json:
print(json.dumps(result))
else:
if 'error' in result:
print(f'Error: {result["error"]}', file=sys.stderr)
return 1
elif 'url' in result:
existing = ' (existing)' if result.get('existing') else ''
print(f'url: {result["url"]}{existing}')
elif 'tunnels' in result:
if result['tunnels']:
for t in result['tunnels']:
print(f' port {t["port"]}: {t["url"]}')
else:
print('No active tunnels')
elif 'stopped' in result:
if isinstance(result['stopped'], list):
if result['stopped']:
print(f'Stopped {len(result["stopped"])} tunnel(s): {", ".join(map(str, result["stopped"]))}')
else:
print('No tunnels to stop')
else:
print(f'Stopped tunnel on port {result["stopped"]}')
return 0
# Validate requested mode is available based on installation config
from browser_use.skill_cli.install_config import get_mode_unavailable_error, is_mode_available
if not is_mode_available(args.browser):
print(get_mode_unavailable_error(args.browser), file=sys.stderr)
return 1
# Set API key in environment if provided
if args.api_key:
os.environ['BROWSER_USE_API_KEY'] = args.api_key
# Validate API key for remote browser mode upfront
if args.browser == 'remote':
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
try:
api_key = require_api_key('Remote browser')
# Ensure it's in environment for the cloud client
os.environ['BROWSER_USE_API_KEY'] = api_key
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
# Validate --profile flag usage
if args.profile and args.browser == 'chromium':
print(
'Error: --profile is not supported in chromium mode.\n'
'Use -b real for local Chrome profiles or -b remote for cloud profiles.',
file=sys.stderr,
)
return 1
# Handle remote run with --wait directly (prints task_id immediately, then waits)
if args.browser == 'remote' and args.command == 'run' and getattr(args, 'wait', False):
return _handle_remote_run_with_wait(args)
# Ensure server is running
ensure_server(args.session, args.browser, args.headed, args.profile, args.api_key)
# Build params from args
params = {}
skip_keys = {'command', 'session', 'browser', 'headed', 'json', 'api_key', 'server_command'}
for key, value in vars(args).items():
if key not in skip_keys and value is not None:
params[key] = value
# Add profile to params for commands that need it (agent tasks, etc.)
# Note: profile is passed to ensure_server for local browser profile,
# but also needs to be in params for cloud profile ID in remote mode
if args.profile:
params['profile'] = args.profile
# Send command to server
response = send_command(args.session, args.command, params)
# Clean up metadata file on successful close
if args.command == 'close' and response.get('success'):
meta_path = get_session_metadata_path(args.session)
if meta_path.exists():
meta_path.unlink()
# Output response
if args.json:
# Add mode to JSON output for browser-related commands
if args.command in ('open', 'run', 'state', 'click', 'type', 'input', 'scroll', 'screenshot'):
response['mode'] = args.browser
print(json.dumps(response))
else:
if response.get('success'):
data = response.get('data')
# Show mode for browser-related commands (first line of output)
if args.command in ('open', 'run'):
print(f'mode: {args.browser}')
if data is not None:
if isinstance(data, dict):
# Special case: raw text output (e.g., state command)
if '_raw_text' in data:
print(data['_raw_text'])
else:
for key, value in data.items():
# Skip internal fields
if key.startswith('_'):
continue
if key == 'screenshot' and len(str(value)) > 100:
print(f'{key}: <{len(value)} bytes>')
else:
print(f'{key}: {value}')
elif isinstance(data, str):
print(data)
else:
print(data)
else:
print(f'Error: {response.get("error")}', file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/main.py",
"license": "MIT License",
"lines": 1034,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/protocol.py | """Wire protocol for CLI↔Server communication.
Uses JSON over Unix sockets (or TCP on Windows) with newline-delimited messages.
"""
import json
from dataclasses import asdict, dataclass, field
from typing import Any
@dataclass
class Request:
"""Command request from CLI to server."""
id: str
action: str
session: str
params: dict[str, Any] = field(default_factory=dict)
def to_json(self) -> str:
return json.dumps(asdict(self))
@classmethod
def from_json(cls, data: str) -> 'Request':
d = json.loads(data)
return cls(
id=d['id'],
action=d['action'],
session=d['session'],
params=d.get('params', {}),
)
@dataclass
class Response:
"""Response from server to CLI."""
id: str
success: bool
data: Any = None
error: str | None = None
def to_json(self) -> str:
return json.dumps(asdict(self))
@classmethod
def from_json(cls, data: str) -> 'Response':
d = json.loads(data)
return cls(
id=d['id'],
success=d['success'],
data=d.get('data'),
error=d.get('error'),
)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/protocol.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/skill_cli/python_session.py | """Jupyter-like persistent Python execution for browser-use CLI."""
import asyncio
import io
import traceback
from contextlib import redirect_stderr, redirect_stdout
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal
if TYPE_CHECKING:
from browser_use.browser.session import BrowserSession
@dataclass
class ExecutionResult:
"""Result of Python code execution."""
success: bool
output: str = ''
error: str | None = None
@dataclass
class PythonSession:
"""Jupyter-like persistent Python execution.
Maintains a namespace across multiple code executions, allowing variables
to persist between commands. Provides a `browser` object for browser control.
"""
namespace: dict[str, Any] = field(default_factory=dict)
execution_count: int = 0
history: list[tuple[str, ExecutionResult]] = field(default_factory=list)
def __post_init__(self) -> None:
"""Initialize namespace with useful imports."""
self.namespace.update(
{
'__name__': '__main__',
'__doc__': None,
'json': __import__('json'),
're': __import__('re'),
'os': __import__('os'),
'Path': Path,
'asyncio': asyncio,
}
)
def execute(
self, code: str, browser_session: 'BrowserSession', loop: asyncio.AbstractEventLoop | None = None
) -> ExecutionResult:
"""Execute code in persistent namespace.
The `browser` variable is injected into the namespace before each execution,
providing a convenient wrapper around the BrowserSession.
Args:
code: Python code to execute
browser_session: The browser session for browser operations
loop: The event loop for async operations (required for browser access)
"""
# Inject browser wrapper with the event loop for async operations
if loop is not None:
self.namespace['browser'] = BrowserWrapper(browser_session, loop)
self.execution_count += 1
stdout = io.StringIO()
stderr = io.StringIO()
try:
with redirect_stdout(stdout), redirect_stderr(stderr):
try:
# First try to compile as expression (for REPL-like behavior)
compiled = compile(code, '<input>', 'eval')
result = eval(compiled, self.namespace)
if result is not None:
print(repr(result))
except SyntaxError:
# Compile as statements
compiled = compile(code, '<input>', 'exec')
exec(compiled, self.namespace)
output = stdout.getvalue()
if stderr.getvalue():
output += stderr.getvalue()
result = ExecutionResult(success=True, output=output)
except Exception as e:
output = stdout.getvalue()
error_msg = traceback.format_exc()
result = ExecutionResult(success=False, output=output, error=error_msg)
self.history.append((code, result))
return result
def reset(self) -> None:
"""Clear namespace and history."""
self.namespace.clear()
self.history.clear()
self.execution_count = 0
self.__post_init__()
def get_variables(self) -> dict[str, str]:
"""Get user-defined variables and their types."""
skip = {'__name__', '__doc__', 'json', 're', 'os', 'Path', 'asyncio', 'browser'}
return {k: type(v).__name__ for k, v in self.namespace.items() if not k.startswith('_') and k not in skip}
class BrowserWrapper:
"""Convenient browser access for Python code.
Provides synchronous methods that wrap async BrowserSession operations.
Runs coroutines on the server's event loop using run_coroutine_threadsafe.
"""
def __init__(self, session: 'BrowserSession', loop: asyncio.AbstractEventLoop) -> None:
self._session = session
self._loop = loop
def _run(self, coro: Any) -> Any:
"""Run coroutine on the server's event loop."""
future = asyncio.run_coroutine_threadsafe(coro, self._loop)
return future.result(timeout=60)
@property
def url(self) -> str:
"""Get current page URL."""
return self._run(self._get_url())
async def _get_url(self) -> str:
state = await self._session.get_browser_state_summary(include_screenshot=False)
return state.url if state else ''
@property
def title(self) -> str:
"""Get current page title."""
return self._run(self._get_title())
async def _get_title(self) -> str:
state = await self._session.get_browser_state_summary(include_screenshot=False)
return state.title if state else ''
def goto(self, url: str) -> None:
"""Navigate to URL."""
self._run(self._goto_async(url))
async def _goto_async(self, url: str) -> None:
from browser_use.browser.events import NavigateToUrlEvent
await self._session.event_bus.dispatch(NavigateToUrlEvent(url=url))
def click(self, index: int) -> None:
"""Click element by index."""
self._run(self._click_async(index))
async def _click_async(self, index: int) -> None:
from browser_use.browser.events import ClickElementEvent
node = await self._session.get_element_by_index(index)
if node is None:
raise ValueError(f'Element index {index} not found')
await self._session.event_bus.dispatch(ClickElementEvent(node=node))
def type(self, text: str) -> None:
"""Type text into focused element."""
self._run(self._type_async(text))
async def _type_async(self, text: str) -> None:
cdp_session = await self._session.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
raise RuntimeError('No active browser session')
await cdp_session.cdp_client.send.Input.insertText(
params={'text': text},
session_id=cdp_session.session_id,
)
def input(self, index: int, text: str) -> None:
"""Click element and type text."""
self._run(self._input_async(index, text))
async def _input_async(self, index: int, text: str) -> None:
from browser_use.browser.events import ClickElementEvent, TypeTextEvent
node = await self._session.get_element_by_index(index)
if node is None:
raise ValueError(f'Element index {index} not found')
await self._session.event_bus.dispatch(ClickElementEvent(node=node))
await self._session.event_bus.dispatch(TypeTextEvent(node=node, text=text))
def scroll(self, direction: Literal['up', 'down', 'left', 'right'] = 'down', amount: int = 500) -> None:
"""Scroll the page."""
self._run(self._scroll_async(direction, amount))
async def _scroll_async(self, direction: Literal['up', 'down', 'left', 'right'], amount: int) -> None:
from browser_use.browser.events import ScrollEvent
await self._session.event_bus.dispatch(ScrollEvent(direction=direction, amount=amount))
def screenshot(self, path: str | None = None) -> bytes:
"""Take screenshot, optionally save to file."""
data = self._run(self._session.take_screenshot())
if path:
Path(path).write_bytes(data)
return data
@property
def html(self) -> str:
"""Get page HTML."""
return self._run(self._get_html())
async def _get_html(self) -> str:
cdp_session = await self._session.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
return ''
# Get the document root
doc = await cdp_session.cdp_client.send.DOM.getDocument(
params={},
session_id=cdp_session.session_id,
)
if not doc or 'root' not in doc:
return ''
# Get outer HTML of the root node
result = await cdp_session.cdp_client.send.DOM.getOuterHTML(
params={'nodeId': doc['root']['nodeId']},
session_id=cdp_session.session_id,
)
return result.get('outerHTML', '') if result else ''
def keys(self, keys: str) -> None:
"""Send keyboard keys."""
self._run(self._keys_async(keys))
async def _keys_async(self, keys: str) -> None:
from browser_use.browser.events import SendKeysEvent
await self._session.event_bus.dispatch(SendKeysEvent(keys=keys))
def back(self) -> None:
"""Go back in history."""
self._run(self._back_async())
async def _back_async(self) -> None:
from browser_use.browser.events import GoBackEvent
await self._session.event_bus.dispatch(GoBackEvent())
def wait(self, seconds: float) -> None:
"""Wait for specified seconds."""
import time
time.sleep(seconds)
def extract(self, query: str) -> Any:
"""Extract data using LLM (requires API key)."""
# This would need LLM integration
raise NotImplementedError('extract() requires LLM integration - use agent.run() instead')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/python_session.py",
"license": "MIT License",
"lines": 202,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/server.py | """Session server - keeps BrowserSession instances alive.
This server runs as a background process, managing browser sessions and
handling commands from the CLI. It uses Unix sockets (or TCP on Windows)
for IPC communication.
"""
import argparse
import asyncio
import json
import logging
import os
import signal
import sys
from pathlib import Path
from typing import IO
import portalocker
# Configure logging before imports
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
handlers=[logging.StreamHandler()],
)
logger = logging.getLogger('browser_use.skill_cli.server')
class SessionServer:
"""Server that manages browser sessions and handles CLI commands."""
def __init__(
self,
session_name: str,
browser_mode: str,
headed: bool,
profile: str | None,
) -> None:
self.session_name = session_name
self.browser_mode = browser_mode
self.headed = headed
self.profile = profile
self.running = True
self._server: asyncio.Server | None = None
self._shutdown_event: asyncio.Event | None = None
self._lock_file: IO | None = None
# Lazy import to avoid loading everything at startup
from browser_use.skill_cli.sessions import SessionRegistry
self.registry = SessionRegistry()
async def handle_connection(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Handle a client connection."""
addr = writer.get_extra_info('peername')
logger.debug(f'Connection from {addr}')
try:
while self.running:
try:
line = await asyncio.wait_for(reader.readline(), timeout=300) # 5 min timeout
except TimeoutError:
logger.debug(f'Connection timeout from {addr}')
break
if not line:
break
request = {}
try:
request = json.loads(line.decode())
response = await self.dispatch(request)
except json.JSONDecodeError as e:
response = {'id': '', 'success': False, 'error': f'Invalid JSON: {e}'}
except Exception as e:
logger.exception(f'Error handling request: {e}')
response = {'id': '', 'success': False, 'error': str(e)}
writer.write((json.dumps(response) + '\n').encode())
await writer.drain()
# Check for shutdown command
if request.get('action') == 'shutdown':
await self.shutdown()
break
except Exception as e:
logger.exception(f'Connection error: {e}')
finally:
writer.close()
try:
await writer.wait_closed()
except Exception:
pass
async def dispatch(self, request: dict) -> dict:
"""Dispatch command to appropriate handler."""
action = request.get('action', '')
params = request.get('params', {})
req_id = request.get('id', '')
logger.info(f'Dispatch: {action} (id={req_id})')
try:
# Import command handlers
from browser_use.skill_cli.commands import agent, browser, python_exec, session
# Handle shutdown
if action == 'shutdown':
return {'id': req_id, 'success': True, 'data': {'shutdown': True}}
# Session commands don't need a browser session
if action in session.COMMANDS:
result = await session.handle(action, self.session_name, self.registry, params)
# Check if command wants to shutdown server
if result.get('_shutdown'):
asyncio.create_task(self.shutdown())
return {'id': req_id, 'success': True, 'data': result}
# Get or create session for browser commands
session_info = await self.registry.get_or_create(
self.session_name,
self.browser_mode,
self.headed,
self.profile,
)
# Dispatch to handler
if action in browser.COMMANDS:
result = await browser.handle(action, session_info, params)
elif action == 'python':
result = await python_exec.handle(session_info, params)
elif action == 'run':
result = await agent.handle(session_info, params)
else:
return {'id': req_id, 'success': False, 'error': f'Unknown action: {action}'}
return {'id': req_id, 'success': True, 'data': result}
except Exception as e:
logger.exception(f'Error dispatching {action}: {e}')
return {'id': req_id, 'success': False, 'error': str(e)}
async def shutdown(self) -> None:
"""Graceful shutdown."""
logger.info('Shutting down server...')
self.running = False
# Signal the shutdown event
if self._shutdown_event:
self._shutdown_event.set()
# Close all sessions
await self.registry.close_all()
# Stop the server
if self._server:
self._server.close()
await self._server.wait_closed()
# Clean up files
from browser_use.skill_cli.utils import cleanup_session_files
cleanup_session_files(self.session_name)
async def run(self) -> None:
"""Run the server."""
from browser_use.skill_cli.utils import get_lock_path, get_pid_path, get_socket_path
# Acquire exclusive lock BEFORE writing PID - this prevents race conditions
lock_path = get_lock_path(self.session_name)
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.touch(exist_ok=True)
self._lock_file = open(lock_path, 'r+') # noqa: ASYNC230 - blocking ok at startup
try:
portalocker.lock(self._lock_file, portalocker.LOCK_EX | portalocker.LOCK_NB)
except portalocker.LockException:
logger.error(f'Another server is already running for session: {self.session_name}')
self._lock_file.close()
self._lock_file = None
sys.exit(1)
logger.info(f'Acquired exclusive lock for session: {self.session_name}')
# NOW safe to write PID file
pid_path = get_pid_path(self.session_name)
pid_path.write_text(str(os.getpid()))
logger.info(f'PID file: {pid_path}')
# Setup signal handlers
loop = asyncio.get_running_loop()
def signal_handler():
asyncio.create_task(self.shutdown())
for sig in (signal.SIGINT, signal.SIGTERM):
try:
loop.add_signal_handler(sig, signal_handler)
except NotImplementedError:
# Windows doesn't support add_signal_handler
pass
# Also handle SIGHUP on Unix
if hasattr(signal, 'SIGHUP'):
try:
loop.add_signal_handler(signal.SIGHUP, signal_handler)
except NotImplementedError:
pass
# Get socket path
sock_path = get_socket_path(self.session_name)
logger.info(f'Socket: {sock_path}')
# Start server
if sock_path.startswith('tcp://'):
# Windows: TCP server
_, hostport = sock_path.split('://', 1)
host, port = hostport.split(':')
self._server = await asyncio.start_server(
self.handle_connection,
host,
int(port),
reuse_address=True, # Allow rebinding ports in TIME_WAIT state
)
logger.info(f'Listening on TCP {host}:{port}')
else:
# Unix: socket server
# Remove stale socket file
sock_file = Path(sock_path)
if sock_file.exists():
sock_file.unlink()
self._server = await asyncio.start_unix_server(
self.handle_connection,
sock_path,
)
logger.info(f'Listening on Unix socket {sock_path}')
# Run until shutdown
self._shutdown_event = asyncio.Event()
try:
async with self._server:
await self._shutdown_event.wait()
except asyncio.CancelledError:
pass
finally:
# Release lock on shutdown
if self._lock_file:
try:
portalocker.unlock(self._lock_file)
self._lock_file.close()
except Exception:
pass
self._lock_file = None
logger.info('Server stopped')
def main() -> None:
"""Main entry point for server process."""
parser = argparse.ArgumentParser(description='Browser-use session server')
parser.add_argument('--session', required=True, help='Session name')
parser.add_argument('--browser', default='chromium', choices=['chromium', 'real', 'remote'])
parser.add_argument('--headed', action='store_true', help='Show browser window')
parser.add_argument('--profile', help='Chrome profile (real browser mode)')
args = parser.parse_args()
logger.info(f'Starting server for session: {args.session}')
logger.info(f'Browser mode: {args.browser}, headed: {args.headed}')
server = SessionServer(
session_name=args.session,
browser_mode=args.browser,
headed=args.headed,
profile=args.profile,
)
try:
asyncio.run(server.run())
except KeyboardInterrupt:
logger.info('Interrupted')
except Exception as e:
logger.exception(f'Server error: {e}')
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/server.py",
"license": "MIT License",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/sessions.py | """Session registry - manages BrowserSession instances."""
import logging
from dataclasses import dataclass, field
from typing import Any
from browser_use.browser.session import BrowserSession
from browser_use.skill_cli.python_session import PythonSession
logger = logging.getLogger(__name__)
@dataclass
class SessionInfo:
"""Information about a browser session."""
name: str
browser_mode: str
headed: bool
profile: str | None
browser_session: BrowserSession
python_session: PythonSession = field(default_factory=PythonSession)
class SessionRegistry:
"""Registry of active browser sessions.
Sessions are created on-demand when first accessed. Each named session
is isolated with its own BrowserSession and Python namespace.
"""
def __init__(self) -> None:
self._sessions: dict[str, SessionInfo] = {}
async def get_or_create(
self,
name: str,
browser_mode: str,
headed: bool,
profile: str | None,
) -> SessionInfo:
"""Get existing session or create new one."""
if name in self._sessions:
return self._sessions[name]
logger.info(f'Creating new session: {name} (mode={browser_mode}, headed={headed})')
browser_session = await create_browser_session(browser_mode, headed, profile)
await browser_session.start()
session_info = SessionInfo(
name=name,
browser_mode=browser_mode,
headed=headed,
profile=profile,
browser_session=browser_session,
)
self._sessions[name] = session_info
return session_info
def get(self, name: str) -> SessionInfo | None:
"""Get session by name."""
return self._sessions.get(name)
def list_sessions(self) -> list[dict[str, Any]]:
"""List all active sessions."""
return [
{
'name': s.name,
'browser_mode': s.browser_mode,
'headed': s.headed,
'profile': s.profile,
}
for s in self._sessions.values()
]
async def close_session(self, name: str) -> bool:
"""Close and remove a session."""
if name not in self._sessions:
return False
session = self._sessions.pop(name)
logger.info(f'Closing session: {name}')
# Note: Tunnels are managed independently via tunnel.py
# They persist across session close/open cycles
try:
await session.browser_session.kill()
except Exception as e:
logger.warning(f'Error closing session {name}: {e}')
return True
async def close_all(self) -> None:
"""Close all sessions."""
for name in list(self._sessions.keys()):
await self.close_session(name)
async def create_browser_session(
mode: str,
headed: bool,
profile: str | None,
) -> BrowserSession:
"""Create BrowserSession based on mode.
Modes:
- chromium: Playwright-managed Chromium (default)
- real: User's Chrome with profile
- remote: Browser-Use Cloud (requires API key)
Raises:
RuntimeError: If the requested mode is not available based on installation config
"""
from browser_use.skill_cli.install_config import get_mode_unavailable_error, is_mode_available
# Validate mode is available based on installation config
if not is_mode_available(mode):
raise RuntimeError(get_mode_unavailable_error(mode))
if mode == 'chromium':
return BrowserSession(
headless=not headed,
)
elif mode == 'real':
from browser_use.skill_cli.utils import find_chrome_executable, get_chrome_profile_path
chrome_path = find_chrome_executable()
if not chrome_path:
raise RuntimeError('Could not find Chrome executable. Please install Chrome or specify --browser chromium')
# Always get the Chrome user data directory (not the profile subdirectory)
user_data_dir = get_chrome_profile_path(None)
# Profile directory defaults to 'Default', or use the specified profile name
profile_directory = profile if profile else 'Default'
return BrowserSession(
executable_path=chrome_path,
user_data_dir=user_data_dir,
profile_directory=profile_directory,
headless=not headed, # Headless by default, --headed for visible
)
elif mode == 'remote':
from browser_use.skill_cli.api_key import require_api_key
require_api_key('Remote browser')
# Profile is used as cloud_profile_id for remote mode
return BrowserSession(
use_cloud=True,
cloud_profile_id=profile,
)
else:
raise ValueError(f'Unknown browser mode: {mode}')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/sessions.py",
"license": "MIT License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/tunnel.py | """Cloudflared tunnel binary management.
This module manages the cloudflared binary for tunnel support.
Cloudflared must be installed via install.sh or manually by the user.
Tunnels are managed independently of browser sessions - they are purely
a network utility for exposing local ports via Cloudflare quick tunnels.
Tunnels survive CLI process exit by:
1. Spawning cloudflared as a daemon (start_new_session=True)
2. Tracking tunnel info via PID files in ~/.browser-use/tunnels/
"""
import asyncio
import json
import logging
import os
import re
import shutil
import signal
from pathlib import Path
from typing import Any
logger = logging.getLogger(__name__)
# Pattern to extract tunnel URL from cloudflared output
_URL_PATTERN = re.compile(r'(https://\S+\.trycloudflare\.com)')
# Directory for tunnel PID files
_TUNNELS_DIR = Path.home() / '.browser-use' / 'tunnels'
class TunnelManager:
"""Manages cloudflared binary location."""
def __init__(self) -> None:
self._binary_path: str | None = None
def get_binary_path(self) -> str:
"""Get cloudflared binary path.
Returns:
Absolute path to cloudflared binary
Raises:
RuntimeError: If cloudflared is not installed
"""
# Cached result from previous call
if self._binary_path:
return self._binary_path
# Check system installation
system_binary = shutil.which('cloudflared')
if system_binary:
logger.info('Using cloudflared: %s', system_binary)
self._binary_path = system_binary
return system_binary
# Not found
raise RuntimeError(
'cloudflared not installed.\n\n'
'Install cloudflared:\n'
' macOS: brew install cloudflared\n'
' Linux: curl -L https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64 -o ~/.local/bin/cloudflared && chmod +x ~/.local/bin/cloudflared\n'
' Windows: winget install Cloudflare.cloudflared\n\n'
'Or re-run install.sh which installs cloudflared automatically.\n\n'
'Then retry: browser-use tunnel <port>'
)
def is_available(self) -> bool:
"""Check if cloudflared is available."""
if self._binary_path:
return True
return shutil.which('cloudflared') is not None
def get_status(self) -> dict[str, Any]:
"""Get tunnel capability status for doctor command."""
system_binary = shutil.which('cloudflared')
if system_binary:
return {
'available': True,
'source': 'system',
'path': system_binary,
'note': 'cloudflared installed',
}
return {
'available': False,
'source': None,
'path': None,
'note': 'cloudflared not installed - run install.sh or install manually',
}
# Global singleton instance
_tunnel_manager: TunnelManager | None = None
def get_tunnel_manager() -> TunnelManager:
"""Get the global TunnelManager instance (singleton pattern)."""
global _tunnel_manager
if _tunnel_manager is None:
_tunnel_manager = TunnelManager()
return _tunnel_manager
# =============================================================================
# PID File Management
# =============================================================================
def _get_tunnel_file(port: int) -> Path:
"""Get the path to a tunnel's info file."""
return _TUNNELS_DIR / f'{port}.json'
def _save_tunnel_info(port: int, pid: int, url: str) -> None:
"""Save tunnel info to disk."""
_TUNNELS_DIR.mkdir(parents=True, exist_ok=True)
_get_tunnel_file(port).write_text(json.dumps({'port': port, 'pid': pid, 'url': url}))
def _load_tunnel_info(port: int) -> dict[str, Any] | None:
"""Load tunnel info from disk, returning None if not found or process dead."""
tunnel_file = _get_tunnel_file(port)
if not tunnel_file.exists():
return None
try:
info = json.loads(tunnel_file.read_text())
pid = info.get('pid')
if pid and _is_process_alive(pid):
return info
# Process dead, clean up stale file
tunnel_file.unlink(missing_ok=True)
return None
except (json.JSONDecodeError, OSError):
tunnel_file.unlink(missing_ok=True)
return None
def _delete_tunnel_info(port: int) -> None:
"""Delete tunnel info file."""
_get_tunnel_file(port).unlink(missing_ok=True)
def _is_process_alive(pid: int) -> bool:
"""Check if a process is still running."""
try:
os.kill(pid, 0)
return True
except (OSError, ProcessLookupError):
return False
def _kill_process(pid: int) -> bool:
"""Kill a process by PID. Returns True if killed, False if already dead."""
try:
os.kill(pid, signal.SIGTERM)
# Give it a moment to terminate gracefully
for _ in range(10):
if not _is_process_alive(pid):
return True
import time
time.sleep(0.1)
# Force kill if still alive
os.kill(pid, signal.SIGKILL)
return True
except (OSError, ProcessLookupError):
return False
# =============================================================================
# Standalone Tunnel Functions (no browser session required)
# =============================================================================
async def start_tunnel(port: int) -> dict[str, Any]:
"""Start a cloudflare quick tunnel for a local port.
The tunnel runs as a daemon process that survives CLI exit.
Args:
port: Local port to tunnel
Returns:
Dict with 'url' and 'port' on success, or 'error' on failure
"""
# Check if tunnel already exists for this port
existing = _load_tunnel_info(port)
if existing:
return {'url': existing['url'], 'port': port, 'existing': True}
# Get cloudflared binary
try:
tunnel_manager = get_tunnel_manager()
cloudflared_binary = tunnel_manager.get_binary_path()
except RuntimeError as e:
return {'error': str(e)}
# Create log file for cloudflared stderr (avoids SIGPIPE when parent exits)
_TUNNELS_DIR.mkdir(parents=True, exist_ok=True)
log_file_path = _TUNNELS_DIR / f'{port}.log'
log_file = open(log_file_path, 'w') # noqa: ASYNC230
# Spawn cloudflared as a daemon
# - start_new_session=True: survives parent exit
# - stderr to file: avoids SIGPIPE when parent's pipe closes
process = await asyncio.create_subprocess_exec(
cloudflared_binary,
'tunnel',
'--url',
f'http://localhost:{port}',
stdout=asyncio.subprocess.DEVNULL,
stderr=log_file,
start_new_session=True,
)
# Poll the log file until we find the tunnel URL
url: str | None = None
try:
import time
deadline = time.time() + 15
while time.time() < deadline:
# Check if process died
if process.returncode is not None:
log_file.close()
content = log_file_path.read_text() if log_file_path.exists() else ''
return {'error': f'cloudflared exited unexpectedly: {content[:500]}'}
# Read log file content
try:
content = log_file_path.read_text()
match = _URL_PATTERN.search(content)
if match:
url = match.group(1)
break
except OSError:
pass
await asyncio.sleep(0.2)
except Exception as e:
process.terminate()
log_file.close()
return {'error': f'Failed to start tunnel: {e}'}
if url is None:
process.terminate()
log_file.close()
return {'error': 'Timed out waiting for cloudflare tunnel URL (15s)'}
# Close log file handle to avoid leaking file descriptors
log_file.close()
# Save tunnel info to disk so it persists across CLI invocations
_save_tunnel_info(port, process.pid, url)
logger.info(f'Tunnel started: localhost:{port} -> {url} (pid={process.pid})')
return {'url': url, 'port': port}
def list_tunnels() -> dict[str, Any]:
"""List active tunnels.
Returns:
Dict with 'tunnels' list and 'count'
"""
tunnels = []
if _TUNNELS_DIR.exists():
for tunnel_file in _TUNNELS_DIR.glob('*.json'):
try:
port = int(tunnel_file.stem)
info = _load_tunnel_info(port)
if info:
tunnels.append({'port': info['port'], 'url': info['url']})
except (ValueError, json.JSONDecodeError):
continue
return {'tunnels': tunnels, 'count': len(tunnels)}
async def stop_tunnel(port: int) -> dict[str, Any]:
"""Stop a tunnel for a specific port.
Args:
port: Port number to stop tunnel for
Returns:
Dict with 'stopped' port and 'url' on success, or 'error'
"""
info = _load_tunnel_info(port)
if not info:
return {'error': f'No tunnel running on port {port}'}
url = info['url']
pid = info['pid']
_kill_process(pid)
_delete_tunnel_info(port)
# Clean up log file
log_file = _TUNNELS_DIR / f'{port}.log'
log_file.unlink(missing_ok=True)
logger.info(f'Tunnel stopped: localhost:{port}')
return {'stopped': port, 'url': url}
async def stop_all_tunnels() -> dict[str, Any]:
"""Stop all active tunnels.
Returns:
Dict with 'stopped' list of ports
"""
stopped = []
if _TUNNELS_DIR.exists():
for tunnel_file in _TUNNELS_DIR.glob('*.json'):
try:
port = int(tunnel_file.stem)
result = await stop_tunnel(port)
if 'stopped' in result:
stopped.append(port)
except ValueError:
continue
return {'stopped': stopped, 'count': len(stopped)}
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/tunnel.py",
"license": "MIT License",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skill_cli/utils.py | """Platform utilities for CLI and server."""
import hashlib
import os
import platform
import signal
import subprocess
import sys
import tempfile
from pathlib import Path
from typing import IO
import portalocker
def get_socket_path(session: str) -> str:
"""Get socket path for session.
On Windows, returns a TCP address (tcp://127.0.0.1:PORT).
On Unix, returns a Unix socket path.
"""
if sys.platform == 'win32':
# Windows: use TCP on deterministic port (49152-65535)
# Use 127.0.0.1 explicitly (not localhost) to avoid IPv6 binding issues
port = 49152 + (int(hashlib.md5(session.encode()).hexdigest()[:4], 16) % 16383)
return f'tcp://127.0.0.1:{port}'
return str(Path(tempfile.gettempdir()) / f'browser-use-{session}.sock')
def get_pid_path(session: str) -> Path:
"""Get PID file path for session."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.pid'
def get_log_path(session: str) -> Path:
"""Get log file path for session."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.log'
def get_lock_path(session: str) -> Path:
"""Get lock file path for session."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.lock'
def _pid_exists(pid: int) -> bool:
"""Check if a process with given PID exists.
On Windows, uses ctypes to call OpenProcess (os.kill doesn't work reliably).
On Unix, uses os.kill(pid, 0) which is the standard approach.
"""
if sys.platform == 'win32':
import ctypes
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, False, pid)
if handle:
ctypes.windll.kernel32.CloseHandle(handle)
return True
return False
else:
try:
os.kill(pid, 0)
return True
except OSError:
return False
def is_server_running(session: str) -> bool:
"""Check if server is running for session."""
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
try:
pid = int(pid_path.read_text().strip())
return _pid_exists(pid)
except (OSError, ValueError):
# Can't read PID file or invalid PID
return False
def try_acquire_server_lock(session: str) -> IO | None:
"""Try to acquire the server lock non-blocking.
Returns:
Lock file handle if acquired (caller must keep in scope to maintain lock),
None if lock is already held by another process.
"""
lock_path = get_lock_path(session)
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.touch(exist_ok=True)
lock_file = open(lock_path, 'r+')
try:
portalocker.lock(lock_file, portalocker.LOCK_EX | portalocker.LOCK_NB)
return lock_file
except portalocker.LockException:
lock_file.close()
return None
def is_session_locked(session: str) -> bool:
"""Check if session has an active lock (server is holding it)."""
lock_path = get_lock_path(session)
if not lock_path.exists():
return False
try:
with open(lock_path, 'r+') as f:
portalocker.lock(f, portalocker.LOCK_EX | portalocker.LOCK_NB)
portalocker.unlock(f)
return False # Lock acquired = no one holding it
except portalocker.LockException:
return True # Lock failed = someone holding it
except OSError:
return False # File access error
def kill_orphaned_server(session: str) -> bool:
"""Kill an orphaned server (has PID file but no lock).
An orphaned server is one where the process is running but it doesn't
hold the session lock (e.g., because a newer server took over the lock
file but didn't kill the old process).
Returns:
True if an orphan was found and killed.
"""
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
# Check if session is locked (server alive and holding lock)
if is_session_locked(session):
return False # Not an orphan - server is healthy
# PID exists but no lock - orphan situation
try:
pid = int(pid_path.read_text().strip())
if _pid_exists(pid):
# Kill the orphaned process
if sys.platform == 'win32':
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
if handle:
ctypes.windll.kernel32.TerminateProcess(handle, 1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
os.kill(pid, signal.SIGKILL)
return True
except (OSError, ValueError):
pass
# Clean up stale files even if we couldn't kill (process may be gone)
cleanup_session_files(session)
return False
def find_all_sessions() -> list[str]:
"""Find all running browser-use sessions by scanning PID files."""
sessions = []
tmpdir = Path(tempfile.gettempdir())
for pid_file in tmpdir.glob('browser-use-*.pid'):
# Extract session name from filename: browser-use-{session}.pid
name = pid_file.stem.replace('browser-use-', '', 1)
if is_server_running(name):
sessions.append(name)
return sessions
def cleanup_session_files(session: str) -> None:
"""Remove session socket, PID, lock, and metadata files."""
sock_path = get_socket_path(session)
pid_path = get_pid_path(session)
lock_path = get_lock_path(session)
meta_path = Path(tempfile.gettempdir()) / f'browser-use-{session}.meta'
# Remove socket file (Unix only)
if not sock_path.startswith('tcp://'):
try:
os.unlink(sock_path)
except OSError:
pass
# Remove PID file
try:
pid_path.unlink()
except OSError:
pass
# Remove lock file
try:
lock_path.unlink()
except OSError:
pass
# Remove metadata file
try:
meta_path.unlink()
except OSError:
pass
def find_chrome_executable() -> str | None:
"""Find Chrome/Chromium executable on the system."""
system = platform.system()
if system == 'Darwin':
# macOS
paths = [
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
'/Applications/Chromium.app/Contents/MacOS/Chromium',
'/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary',
]
for path in paths:
if os.path.exists(path):
return path
elif system == 'Linux':
# Linux: try common commands
for cmd in ['google-chrome', 'google-chrome-stable', 'chromium', 'chromium-browser']:
try:
result = subprocess.run(['which', cmd], capture_output=True, text=True)
if result.returncode == 0:
return result.stdout.strip()
except Exception:
pass
elif system == 'Windows':
# Windows: check common paths
paths = [
os.path.expandvars(r'%ProgramFiles%\Google\Chrome\Application\chrome.exe'),
os.path.expandvars(r'%ProgramFiles(x86)%\Google\Chrome\Application\chrome.exe'),
os.path.expandvars(r'%LocalAppData%\Google\Chrome\Application\chrome.exe'),
]
for path in paths:
if os.path.exists(path):
return path
return None
def get_chrome_profile_path(profile: str | None) -> str | None:
"""Get Chrome user data directory for a profile.
If profile is None, returns the default Chrome user data directory.
"""
if profile is None:
# Use default Chrome profile location
system = platform.system()
if system == 'Darwin':
return str(Path.home() / 'Library' / 'Application Support' / 'Google' / 'Chrome')
elif system == 'Linux':
return str(Path.home() / '.config' / 'google-chrome')
elif system == 'Windows':
return os.path.expandvars(r'%LocalAppData%\Google\Chrome\User Data')
else:
# Return the profile name - Chrome will use it as a subdirectory
# The actual path will be user_data_dir/profile
return profile
return None
def list_chrome_profiles() -> list[dict[str, str]]:
"""List available Chrome profiles with their names.
Returns:
List of dicts with 'directory' and 'name' keys, ex:
[{'directory': 'Default', 'name': 'Person 1'}, {'directory': 'Profile 1', 'name': 'Work'}]
"""
import json
user_data_dir = get_chrome_profile_path(None)
if user_data_dir is None:
return []
local_state_path = Path(user_data_dir) / 'Local State'
if not local_state_path.exists():
return []
try:
with open(local_state_path) as f:
local_state = json.load(f)
info_cache = local_state.get('profile', {}).get('info_cache', {})
profiles = []
for directory, info in info_cache.items():
profiles.append(
{
'directory': directory,
'name': info.get('name', directory),
}
)
return sorted(profiles, key=lambda p: p['directory'])
except (json.JSONDecodeError, KeyError, OSError):
return []
def get_config_dir() -> Path:
"""Get browser-use config directory."""
if sys.platform == 'win32':
base = Path(os.environ.get('APPDATA', Path.home()))
else:
base = Path(os.environ.get('XDG_CONFIG_HOME', Path.home() / '.config'))
return base / 'browser-use'
def get_config_path() -> Path:
"""Get browser-use config file path."""
return get_config_dir() / 'config.json'
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skill_cli/utils.py",
"license": "MIT License",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/tools/extraction/schema_utils.py | """Converts a JSON Schema dict to a runtime Pydantic model for structured extraction."""
import logging
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, create_model
logger = logging.getLogger(__name__)
# Keywords that indicate composition/reference patterns we don't support
_UNSUPPORTED_KEYWORDS = frozenset(
{
'$ref',
'allOf',
'anyOf',
'oneOf',
'not',
'$defs',
'definitions',
'if',
'then',
'else',
'dependentSchemas',
'dependentRequired',
}
)
# Primitive JSON Schema type → Python type
_PRIMITIVE_MAP: dict[str, type] = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'null': type(None),
}
class _StrictBase(BaseModel):
model_config = ConfigDict(extra='forbid', validate_by_name=True, validate_by_alias=True)
def _check_unsupported(schema: dict) -> None:
"""Raise ValueError if the schema uses unsupported composition keywords."""
for kw in _UNSUPPORTED_KEYWORDS:
if kw in schema:
raise ValueError(f'Unsupported JSON Schema keyword: {kw}')
def _resolve_type(schema: dict, name: str) -> Any:
"""Recursively resolve a JSON Schema node to a Python type.
Returns a Python type suitable for use as a field type in pydantic.create_model.
"""
_check_unsupported(schema)
json_type = schema.get('type', 'string')
# Enums — constrain to str (Literal would be stricter but LLMs are flaky)
if 'enum' in schema:
return str
# Object with properties → nested pydantic model
if json_type == 'object':
properties = schema.get('properties', {})
if properties:
return _build_model(schema, name)
return dict
# Array
if json_type == 'array':
items_schema = schema.get('items')
if items_schema:
item_type = _resolve_type(items_schema, f'{name}_item')
return list[item_type]
return list
# Primitive
base = _PRIMITIVE_MAP.get(json_type, str)
# Nullable
if schema.get('nullable', False):
return base | None
return base
_PRIMITIVE_DEFAULTS: dict[str, Any] = {
'string': '',
'number': 0.0,
'integer': 0,
'boolean': False,
}
def _build_model(schema: dict, name: str) -> type[BaseModel]:
"""Build a pydantic model from an object-type JSON Schema node."""
_check_unsupported(schema)
properties = schema.get('properties', {})
required_fields = set(schema.get('required', []))
fields: dict[str, Any] = {}
for prop_name, prop_schema in properties.items():
prop_type = _resolve_type(prop_schema, f'{name}_{prop_name}')
if prop_name in required_fields:
default = ...
elif 'default' in prop_schema:
default = prop_schema['default']
elif prop_schema.get('nullable', False):
# _resolve_type already made the type include None
default = None
else:
# Non-required, non-nullable, no explicit default.
# Use a type-appropriate zero value for primitives/arrays;
# fall back to None (with | None) for enums and nested objects
# where no in-set or constructible default exists.
json_type = prop_schema.get('type', 'string')
if 'enum' in prop_schema:
# Can't pick an arbitrary enum member as default — use None
# so absent fields serialize as null, not an out-of-set value.
prop_type = prop_type | None
default = None
elif json_type in _PRIMITIVE_DEFAULTS:
default = _PRIMITIVE_DEFAULTS[json_type]
elif json_type == 'array':
default = []
else:
# Nested object or unknown — must allow None as sentinel
prop_type = prop_type | None
default = None
field_kwargs: dict[str, Any] = {}
if 'description' in prop_schema:
field_kwargs['description'] = prop_schema['description']
if isinstance(default, list) and not default:
fields[prop_name] = (prop_type, Field(default_factory=list, **field_kwargs))
else:
fields[prop_name] = (prop_type, Field(default, **field_kwargs))
return create_model(name, __base__=_StrictBase, **fields)
def schema_dict_to_pydantic_model(schema: dict) -> type[BaseModel]:
"""Convert a JSON Schema dict to a runtime Pydantic model.
The schema must be ``{"type": "object", "properties": {...}, ...}``.
Unsupported keywords ($ref, allOf, anyOf, oneOf, etc.) raise ValueError.
Returns:
A dynamically-created Pydantic BaseModel subclass.
Raises:
ValueError: If the schema is invalid or uses unsupported features.
"""
_check_unsupported(schema)
top_type = schema.get('type')
if top_type != 'object':
raise ValueError(f'Top-level schema must have type "object", got {top_type!r}')
properties = schema.get('properties')
if not properties:
raise ValueError('Top-level schema must have at least one property')
model_name = schema.get('title', 'DynamicExtractionModel')
return _build_model(schema, model_name)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tools/extraction/schema_utils.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/tools/extraction/views.py | """Pydantic models for the extraction subsystem."""
from typing import Any
from pydantic import BaseModel, ConfigDict, Field
class ExtractionResult(BaseModel):
"""Metadata about a structured extraction, stored in ActionResult.metadata."""
model_config = ConfigDict(extra='forbid')
data: dict[str, Any] = Field(description='The validated extraction payload')
schema_used: dict[str, Any] = Field(description='The JSON Schema that was enforced')
is_partial: bool = Field(default=False, description='True if content was truncated before extraction')
source_url: str | None = Field(default=None, description='URL the content was extracted from')
content_stats: dict[str, Any] = Field(default_factory=dict, description='Content processing statistics')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/tools/extraction/views.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/browser/custom_headers.py | """
Custom HTTP Headers via a custom Watchdog.
Creates a custom watchdog that listens to TabCreatedEvent and injects
custom HTTP headers into every new tab using Network.setExtraHTTPHeaders.
Note: The CDP EventRegistry only supports one handler per event method,
so registering directly on Target.attachedToTarget would replace the
internal SessionManager handler. Using the browser-use event system
(TabCreatedEvent) avoids this and fires after the target is fully set up.
Note: Network.setExtraHTTPHeaders is a full replacement (not additive).
Verified by navigating to https://httpbin.org/headers in a new tab.
"""
import asyncio
import os
import sys
from typing import ClassVar
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from bubus import BaseEvent
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, Browser, ChatBrowserUse
from browser_use.browser.events import AgentFocusChangedEvent, TabCreatedEvent
from browser_use.browser.watchdog_base import BaseWatchdog
CUSTOM_HEADERS = {
'X-Custom-Auth': 'Bearer my-secret-token',
'X-Request-Source': 'browser-use-agent',
'X-Trace-Id': 'example-trace-12345',
}
class CustomHeadersWatchdog(BaseWatchdog):
"""Injects custom HTTP headers on every new tab and focus change.
Listens to both TabCreatedEvent (new tabs) and AgentFocusChangedEvent
(tab switches) because headers are bound to a CDP session, and sessions
can be recreated on cross-origin navigations or tab switches.
"""
LISTENS_TO: ClassVar[list[type[BaseEvent]]] = [TabCreatedEvent, AgentFocusChangedEvent]
EMITS: ClassVar[list[type[BaseEvent]]] = []
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None:
"""Set extra headers when a new tab is created."""
try:
await self.browser_session.set_extra_headers(CUSTOM_HEADERS, target_id=event.target_id)
except Exception as e:
self.logger.debug(f'Could not set headers on {event.target_id[:8]}: {e}')
async def on_AgentFocusChangedEvent(self, event: AgentFocusChangedEvent) -> None:
"""Re-apply headers when the agent switches to a different tab."""
try:
await self.browser_session.set_extra_headers(CUSTOM_HEADERS, target_id=event.target_id)
except Exception as e:
self.logger.debug(f'Could not set headers on {event.target_id[:8]}: {e}')
async def main():
browser = Browser(headless=False)
# Start the browser so watchdogs are initialized
await browser.start()
# Attach our custom watchdog to the browser session
CustomHeadersWatchdog.model_rebuild()
headers_watchdog = CustomHeadersWatchdog(event_bus=browser.event_bus, browser_session=browser)
headers_watchdog.attach_to_session()
# The watchdog only fires for tabs created AFTER registration.
# To apply headers to an already-existing tab, call set_extra_headers():
#
# await browser.set_extra_headers(CUSTOM_HEADERS)
# await browser.set_extra_headers(CUSTOM_HEADERS, target_id=some_target_id)
#
# Keep in mind that setExtraHTTPHeaders is a full replacement – each
# call overwrites all previously set extra headers on that target.
# Run the agent – open httpbin.org/headers in a new tab so the
# watchdog fires and injects the custom headers.
agent = Agent(
task=(
'Open https://httpbin.org/headers in two different tabs and extract the full JSON response. '
'Look for the custom headers X-Custom-Auth, X-Request-Source, and X-Trace-Id in the output and compare the results.'
),
llm=ChatBrowserUse(model='bu-2-0'),
browser=browser,
)
result = await agent.run()
print(result.final_result())
await browser.kill()
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/browser/custom_headers.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/features/csv_file_generation.py | """
Generate CSV files with automatic normalization.
The agent's file system automatically normalizes CSV output using Python's csv module,
so fields containing commas, quotes, or empty values are properly handled per RFC 4180.
This means the agent doesn't need to worry about manual quoting — it's fixed at the
infrastructure level.
Common LLM mistakes that are auto-corrected:
- Unquoted fields containing commas (e.g. "San Francisco, CA" without quotes)
- Unescaped double quotes inside fields
- Inconsistent empty field handling
- Stray blank lines
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
agent = Agent(
task=(
'Go to https://en.wikipedia.org/wiki/List_of_largest_cities and extract the top 10 cities. '
'Create a CSV file called "top_cities.csv" with columns: rank, city name, country, population. '
'Make sure to include all cities even if some data is missing — leave those cells empty.'
),
llm=ChatBrowserUse(model='bu-2-0'),
)
history = await agent.run()
# Check the generated CSV file
if agent.file_system:
csv_file = agent.file_system.get_file('top_cities.csv')
if csv_file:
print('\nGenerated CSV content:')
print(csv_file.content)
print(f'\nFile saved to: {agent.file_system.get_dir() / csv_file.full_name}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/csv_file_generation.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/features/save_as_pdf.py | """
Save any webpage as a PDF using the save_as_pdf action.
The agent can save the current page as a PDF at any point during a task.
Supports custom filenames, paper sizes (Letter, A4, Legal, A3, Tabloid),
landscape orientation, and background printing.
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent, ChatBrowserUse
async def main():
agent = Agent(
task=(
'Go to https://news.ycombinator.com and save the front page as a PDF named "hackernews". '
'Then go to https://en.wikipedia.org/wiki/Web_browser and save just that article as a PDF in A4 format.'
),
llm=ChatBrowserUse(model='bu-2-0'),
)
history = await agent.run()
# Print paths of any PDF files the agent saved
print('\nSaved files:')
for result in history.action_results():
if result.attachments:
for path in result.attachments:
print(f' {path}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/save_as_pdf.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:tests/ci/browser/test_navigation_slow_pages.py | """
Test navigation on heavy/slow-loading pages (e.g. e-commerce PDPs).
Reproduces the issue where navigating to heavy pages like stevemadden.com PDPs
fails due to NavigateToUrlEvent timing out.
Usage:
uv run pytest tests/ci/browser/test_navigation_slow_pages.py -v -s
"""
import asyncio
import time
import pytest
from pytest_httpserver import HTTPServer
from werkzeug import Response
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.events import NavigateToUrlEvent
from browser_use.browser.profile import BrowserProfile
from tests.ci.conftest import create_mock_llm
HEAVY_PDP_HTML = """
<!DOCTYPE html>
<html>
<head><title>Frosting Black Velvet - Steve Madden</title></head>
<body>
<h1>FROSTING</h1>
<p class="price">$129.95</p>
<button id="add-to-cart">ADD TO BAG</button>
</body>
</html>
"""
@pytest.fixture(scope='session')
def heavy_page_server():
server = HTTPServer()
server.start()
def slow_initial_response(request):
time.sleep(6)
return Response(HEAVY_PDP_HTML, content_type='text/html')
server.expect_request('/slow-server-pdp').respond_with_handler(slow_initial_response)
def redirect_step1(request):
return Response('', status=302, headers={'Location': f'http://{server.host}:{server.port}/redirect-step2'})
def redirect_step2(request):
return Response('', status=302, headers={'Location': f'http://{server.host}:{server.port}/redirect-final'})
def redirect_final(request):
time.sleep(3)
return Response(HEAVY_PDP_HTML, content_type='text/html')
server.expect_request('/redirect-step1').respond_with_handler(redirect_step1)
server.expect_request('/redirect-step2').respond_with_handler(redirect_step2)
server.expect_request('/redirect-final').respond_with_handler(redirect_final)
server.expect_request('/fast-dom-slow-load').respond_with_data(HEAVY_PDP_HTML, content_type='text/html')
server.expect_request('/quick-page').respond_with_data(
'<html><body><h1>Quick Page</h1></body></html>', content_type='text/html'
)
yield server
server.stop()
@pytest.fixture(scope='session')
def heavy_base_url(heavy_page_server):
return f'http://{heavy_page_server.host}:{heavy_page_server.port}'
@pytest.fixture(scope='function')
async def browser_session():
session = BrowserSession(browser_profile=BrowserProfile(headless=True, user_data_dir=None, keep_alive=True))
await session.start()
yield session
await session.kill()
def _nav_actions(url: str, msg: str = 'Done') -> list[str]:
"""Helper to build a navigate-then-done action sequence."""
return [
f"""
{{
"thinking": "Navigate to the page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating",
"next_goal": "Navigate",
"action": [{{"navigate": {{"url": "{url}"}}}}]
}}
""",
f"""
{{
"thinking": "Page loaded",
"evaluation_previous_goal": "Navigation completed",
"memory": "Page loaded",
"next_goal": "Done",
"action": [{{"done": {{"text": "{msg}", "success": true}}}}]
}}
""",
]
class TestHeavyPageNavigation:
async def test_slow_server_response_completes(self, browser_session, heavy_base_url):
"""Navigation succeeds even when server takes 6s to respond."""
url = f'{heavy_base_url}/slow-server-pdp'
agent = Agent(
task=f'Navigate to {url}',
llm=create_mock_llm(actions=_nav_actions(url)),
browser_session=browser_session,
)
start = time.time()
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=60)
assert len(history) > 0
assert history.final_result() is not None
assert time.time() - start >= 5, 'Should have waited for slow server'
async def test_redirect_chain_completes(self, browser_session, heavy_base_url):
"""Navigation handles multi-step redirects + slow final response."""
url = f'{heavy_base_url}/redirect-step1'
agent = Agent(
task=f'Navigate to {url}',
llm=create_mock_llm(actions=_nav_actions(url)),
browser_session=browser_session,
)
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=60)
assert len(history) > 0
assert history.final_result() is not None
async def test_navigate_event_accepts_domcontentloaded(self, browser_session, heavy_base_url):
"""NavigateToUrlEvent with fast page should complete quickly via DOMContentLoaded/load."""
url = f'{heavy_base_url}/fast-dom-slow-load'
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url))
await asyncio.wait_for(event, timeout=15)
await event.event_result(raise_if_any=True, raise_if_none=False)
async def test_recovery_after_slow_navigation(self, browser_session, heavy_base_url):
"""Agent recovers and navigates to a fast page after a slow one."""
slow_url = f'{heavy_base_url}/slow-server-pdp'
quick_url = f'{heavy_base_url}/quick-page'
actions = [
f"""
{{
"thinking": "Navigate to slow page",
"evaluation_previous_goal": "Starting",
"memory": "Going to slow page",
"next_goal": "Navigate",
"action": [{{"navigate": {{"url": "{slow_url}"}}}}]
}}
""",
f"""
{{
"thinking": "Now navigate to quick page",
"evaluation_previous_goal": "Slow page loaded",
"memory": "Trying quick page",
"next_goal": "Navigate",
"action": [{{"navigate": {{"url": "{quick_url}"}}}}]
}}
""",
"""
{
"thinking": "Both done",
"evaluation_previous_goal": "Quick page loaded",
"memory": "Recovery successful",
"next_goal": "Done",
"action": [{"done": {"text": "Recovery succeeded", "success": true}}]
}
""",
]
agent = Agent(
task='Navigate to slow then quick page',
llm=create_mock_llm(actions=actions),
browser_session=browser_session,
)
history = await asyncio.wait_for(agent.run(max_steps=4), timeout=90)
assert len(history) >= 2
assert history.final_result() is not None
async def test_event_timeout_sufficient_for_heavy_pages(self, browser_session):
"""event_timeout should be >= 30s to handle slow servers + redirect chains."""
event = NavigateToUrlEvent(url='http://example.com')
assert event.event_timeout is not None
assert event.event_timeout >= 30.0, f'event_timeout={event.event_timeout}s is too low for heavy pages (need >= 30s)'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/browser/test_navigation_slow_pages.py",
"license": "MIT License",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/interactions/test_autocomplete_interaction.py | """Test autocomplete/combobox field detection, value readback, and input clearing.
Tests cover:
- Value mismatch detection when JS rewrites input value
- Combobox field detection (role=combobox + aria-autocomplete)
- Datalist field detection (input with list attribute)
- No false positives on plain inputs
- Sensitive data skips value verification
- Pre-filled input clearing (clear=True default)
- Pre-filled input appending (clear=False)
- Concatenation auto-retry when clear fails
- Autocomplete delay before next action
"""
import asyncio
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.tools.service import Tools
@pytest.fixture(scope='session')
def http_server():
"""Create and provide a test HTTP server with autocomplete test pages."""
server = HTTPServer()
server.start()
# Page 1: Input with JS that rewrites value on change (simulates autocomplete replacing text)
server.expect_request('/autocomplete-rewrite').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Autocomplete Rewrite Test</title></head>
<body>
<input id="search" type="text" />
<script>
const input = document.getElementById('search');
input.addEventListener('change', function() {
// Simulate autocomplete rewriting the value
this.value = 'REWRITTEN_' + this.value;
});
</script>
</body>
</html>
""",
content_type='text/html',
)
# Page 2: Input with role=combobox + aria-autocomplete=list + aria-controls + listbox
server.expect_request('/combobox-field').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Combobox Field Test</title></head>
<body>
<div>
<input id="combo" type="text" role="combobox"
aria-autocomplete="list" aria-controls="suggestions-list"
aria-expanded="false" />
<ul id="suggestions-list" role="listbox" style="display:none;">
<li role="option">Option A</li>
<li role="option">Option B</li>
</ul>
</div>
</body>
</html>
""",
content_type='text/html',
)
# Page 3: Input with list attribute pointing to a datalist
server.expect_request('/datalist-field').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Datalist Field Test</title></head>
<body>
<input id="city" type="text" list="suggestions" />
<datalist id="suggestions">
<option value="New York">
<option value="Los Angeles">
<option value="Chicago">
</datalist>
</body>
</html>
""",
content_type='text/html',
)
# Page 4: Plain input with no autocomplete attributes
server.expect_request('/normal-input').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Normal Input Test</title></head>
<body>
<input id="plain" type="text" placeholder="Just a normal input" />
</body>
</html>
""",
content_type='text/html',
)
# Page 5: Pre-filled input to test clear=True behavior
server.expect_request('/prefilled-input').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Pre-filled Input Test</title></head>
<body>
<input id="prefilled" type="text" value="old value" />
</body>
</html>
""",
content_type='text/html',
)
# Page 6: Input where clear fails — input event listener restores old text
# Simulates a framework-controlled input where clearing triggers re-render with old state
server.expect_request('/sticky-input').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Sticky Input Test</title></head>
<body>
<input id="sticky" type="text" value="prefix_" />
<script>
var el = document.getElementById('sticky');
var clearAttempts = 0;
// Intercept value clears: restore old value on first two clears
// (simulates framework re-rendering with stale state)
el.addEventListener('input', function() {
if (el.value === '' && clearAttempts < 2) {
clearAttempts++;
el.value = 'prefix_';
}
});
</script>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
"""Return the base URL for the test HTTP server."""
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
"""Create and provide a Browser instance for testing."""
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
chromium_sandbox=False,
)
)
await browser_session.start()
yield browser_session
await browser_session.kill()
@pytest.fixture(scope='function')
def tools():
"""Create and provide a Tools instance."""
return Tools()
class TestAutocompleteInteraction:
"""Test autocomplete/combobox detection and value readback."""
async def test_value_mismatch_detected(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Type into a field whose JS rewrites the value on change. Assert the ActionResult notes the mismatch."""
await tools.navigate(url=f'{base_url}/autocomplete-rewrite', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
input_index = await browser_session.get_index_by_id('search')
assert input_index is not None, 'Could not find search input'
result = await tools.input(index=input_index, text='hello', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'differs from typed text' in result.extracted_content, (
f'Expected mismatch note in extracted_content, got: {result.extracted_content}'
)
async def test_combobox_field_detected(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Type into a combobox field. Assert the ActionResult includes autocomplete guidance."""
await tools.navigate(url=f'{base_url}/combobox-field', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
combo_index = await browser_session.get_index_by_id('combo')
assert combo_index is not None, 'Could not find combobox input'
result = await tools.input(index=combo_index, text='test', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'autocomplete field' in result.extracted_content, (
f'Expected autocomplete guidance in extracted_content, got: {result.extracted_content}'
)
async def test_datalist_field_detected(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Type into a datalist-backed field. Assert the ActionResult includes autocomplete guidance."""
await tools.navigate(url=f'{base_url}/datalist-field', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
city_index = await browser_session.get_index_by_id('city')
assert city_index is not None, 'Could not find datalist input'
result = await tools.input(index=city_index, text='New', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'autocomplete field' in result.extracted_content, (
f'Expected autocomplete guidance in extracted_content, got: {result.extracted_content}'
)
async def test_normal_input_no_false_positive(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Type into a plain input. Assert the ActionResult does NOT contain autocomplete guidance."""
await tools.navigate(url=f'{base_url}/normal-input', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
plain_index = await browser_session.get_index_by_id('plain')
assert plain_index is not None, 'Could not find plain input'
result = await tools.input(index=plain_index, text='hello', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'autocomplete field' not in result.extracted_content, (
f'Got false positive autocomplete guidance on plain input: {result.extracted_content}'
)
async def test_sensitive_data_skips_value_verification(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Type sensitive data into the rewrite field. Assert no 'differs from typed text' note appears."""
await tools.navigate(url=f'{base_url}/autocomplete-rewrite', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
input_index = await browser_session.get_index_by_id('search')
assert input_index is not None, 'Could not find search input'
# Use tools.act() with sensitive_data to trigger the sensitive code path
result = await tools.input(
index=input_index,
text='secret123',
browser_session=browser_session,
sensitive_data={'password': 'secret123'},
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'differs from typed text' not in result.extracted_content, (
f'Sensitive data should not show value mismatch: {result.extracted_content}'
)
async def test_prefilled_input_cleared_by_default(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Type into a pre-filled input with clear=True (default). Field should contain only the new text."""
await tools.navigate(url=f'{base_url}/prefilled-input', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
idx = await browser_session.get_index_by_id('prefilled')
assert idx is not None, 'Could not find prefilled input'
result = await tools.input(index=idx, text='new value', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None, f'Input action failed: {result.error}'
# Read back the actual DOM value via CDP
cdp_session = await browser_session.get_or_create_cdp_session()
readback = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('prefilled').value"},
session_id=cdp_session.session_id,
)
actual = readback.get('result', {}).get('value', '')
assert actual == 'new value', f'Expected "new value", got "{actual}" — clear=True did not remove old text'
async def test_prefilled_input_append_with_clear_false(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Type into a pre-filled input with clear=False. Field should contain old + new text."""
await tools.navigate(url=f'{base_url}/prefilled-input', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
idx = await browser_session.get_index_by_id('prefilled')
assert idx is not None, 'Could not find prefilled input'
result = await tools.input(index=idx, text=' appended', clear=False, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None, f'Input action failed: {result.error}'
# Read back the actual DOM value via CDP
cdp_session = await browser_session.get_or_create_cdp_session()
readback = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('prefilled').value"},
session_id=cdp_session.session_id,
)
actual = readback.get('result', {}).get('value', '')
assert 'old value' in actual and 'appended' in actual, f'Expected old text + appended text, got "{actual}"'
async def test_concatenation_retry_on_sticky_field(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Type into a field where clearing is resisted by JS. The retry should fix the value."""
await tools.navigate(url=f'{base_url}/sticky-input', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
idx = await browser_session.get_index_by_id('sticky')
assert idx is not None, 'Could not find sticky input'
result = await tools.input(index=idx, text='typed_text', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None, f'Input action failed: {result.error}'
# The retry mechanism uses a native setter to bypass the event listener.
# Read back the final DOM value.
cdp_session = await browser_session.get_or_create_cdp_session()
readback = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': "document.getElementById('sticky').value"},
session_id=cdp_session.session_id,
)
actual = readback.get('result', {}).get('value', '')
# The retry should have set the value to just "typed_text" via the native setter.
# Even if the event listener fires on the retry's dispatched events, the native setter
# bypasses instance-level interception. The value may or may not be perfect depending
# on how the JS listener interacts, but it should not be "prefix_typed_text" (raw concatenation).
assert actual != 'prefix_typed_text', f'Got raw concatenation "{actual}" — retry should have prevented this'
async def test_combobox_field_adds_delay(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Typing into a combobox (role=combobox) field should take >= 400ms due to the mechanical delay."""
import time
await tools.navigate(url=f'{base_url}/combobox-field', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
combo_idx = await browser_session.get_index_by_id('combo')
assert combo_idx is not None
t0 = time.monotonic()
await tools.input(index=combo_idx, text='hi', browser_session=browser_session)
duration = time.monotonic() - t0
# The 400ms sleep is a hard floor — total duration must exceed it
assert duration >= 0.4, f'Combobox delay not present: input took only {duration:.3f}s (expected >= 0.4s)'
async def test_datalist_field_no_delay(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Native datalist fields should NOT get the 400ms delay — browser handles them instantly."""
import time
await tools.navigate(url=f'{base_url}/datalist-field', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
city_idx = await browser_session.get_index_by_id('city')
assert city_idx is not None
t0 = time.monotonic()
await tools.input(index=city_idx, text='Chi', browser_session=browser_session)
duration = time.monotonic() - t0
# Datalist fields should complete without the 400ms tax.
# Normal typing for 3 chars takes well under 400ms.
assert duration < 0.4, f'Datalist field got unexpected delay: {duration:.3f}s (should be < 0.4s)'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/interactions/test_autocomplete_interaction.py",
"license": "MIT License",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_action_loop_detection.py | """Tests for action loop detection — behavioral cycle breaking (PR #4)."""
from browser_use.agent.service import Agent
from browser_use.agent.views import (
ActionLoopDetector,
PageFingerprint,
compute_action_hash,
)
from browser_use.llm.messages import UserMessage
from tests.ci.conftest import create_mock_llm
def _get_context_messages(agent: Agent) -> list[str]:
"""Extract text content from the agent's context messages."""
msgs = agent._message_manager.state.history.context_messages
return [m.content for m in msgs if isinstance(m, UserMessage) and isinstance(m.content, str)]
# ─── Action hash normalization tests ─────────────────────────────────────────
def test_search_normalization_ignores_keyword_order():
"""Two searches with the same keywords in different order should produce the same hash."""
h1 = compute_action_hash('search', {'query': 'site:example.com answers votes'})
h2 = compute_action_hash('search', {'query': 'votes answers site:example.com'})
assert h1 == h2
def test_search_normalization_ignores_case():
"""Search normalization is case-insensitive."""
h1 = compute_action_hash('search', {'query': 'Python Tutorial'})
h2 = compute_action_hash('search', {'query': 'python tutorial'})
assert h1 == h2
def test_search_normalization_ignores_punctuation():
"""Search normalization strips punctuation."""
h1 = compute_action_hash('search', {'query': 'site:hinative.com "answers" votes'})
h2 = compute_action_hash('search', {'query': 'site:hinative.com answers, votes'})
assert h1 == h2
def test_search_normalization_deduplicates_tokens():
"""Duplicate tokens in a search query produce the same hash as single tokens."""
h1 = compute_action_hash('search', {'query': 'python python tutorial'})
h2 = compute_action_hash('search', {'query': 'python tutorial'})
assert h1 == h2
def test_search_different_queries_produce_different_hashes():
"""Fundamentally different search queries should NOT match."""
h1 = compute_action_hash('search', {'query': 'python web scraping'})
h2 = compute_action_hash('search', {'query': 'javascript testing framework'})
assert h1 != h2
def test_click_same_index_same_hash():
"""Clicking the same element index produces the same hash."""
h1 = compute_action_hash('click', {'index': 5})
h2 = compute_action_hash('click', {'index': 5})
assert h1 == h2
def test_click_different_index_different_hash():
"""Clicking different element indices produces different hashes."""
h1 = compute_action_hash('click', {'index': 5})
h2 = compute_action_hash('click', {'index': 12})
assert h1 != h2
def test_input_same_element_same_text():
"""Same element + same text = same hash."""
h1 = compute_action_hash('input', {'index': 3, 'text': 'hello world', 'clear': True})
h2 = compute_action_hash('input', {'index': 3, 'text': 'hello world', 'clear': False})
assert h1 == h2 # clear flag doesn't affect hash
def test_input_different_text_different_hash():
"""Same element but different text = different hash."""
h1 = compute_action_hash('input', {'index': 3, 'text': 'hello'})
h2 = compute_action_hash('input', {'index': 3, 'text': 'goodbye'})
assert h1 != h2
def test_navigate_same_url_same_hash():
"""Navigating to the exact same URL produces the same hash."""
h1 = compute_action_hash('navigate', {'url': 'https://example.com/page1'})
h2 = compute_action_hash('navigate', {'url': 'https://example.com/page1'})
assert h1 == h2
def test_navigate_different_paths_different_hash():
"""Navigating to different paths on the same domain produces different hashes — this is genuine exploration."""
h1 = compute_action_hash('navigate', {'url': 'https://example.com/page1'})
h2 = compute_action_hash('navigate', {'url': 'https://example.com/page2'})
assert h1 != h2
def test_navigate_different_domain_different_hash():
"""Navigate to different domains produces different hashes."""
h1 = compute_action_hash('navigate', {'url': 'https://example.com/page1'})
h2 = compute_action_hash('navigate', {'url': 'https://other.com/page1'})
assert h1 != h2
def test_scroll_direction_matters():
"""Scroll up and scroll down are different actions."""
h1 = compute_action_hash('scroll', {'down': True, 'index': None})
h2 = compute_action_hash('scroll', {'down': False, 'index': None})
assert h1 != h2
def test_scroll_different_elements_different_hash():
"""Scrolling different elements produces different hashes."""
h1 = compute_action_hash('scroll', {'down': True, 'index': 5})
h2 = compute_action_hash('scroll', {'down': True, 'index': 10})
assert h1 != h2
def test_scroll_same_element_same_hash():
"""Scrolling the same element in the same direction produces the same hash."""
h1 = compute_action_hash('scroll', {'down': True, 'index': 5})
h2 = compute_action_hash('scroll', {'down': True, 'index': 5})
assert h1 == h2
def test_different_action_types_different_hashes():
"""Different action types always produce different hashes."""
h1 = compute_action_hash('click', {'index': 5})
h2 = compute_action_hash('scroll', {'down': True, 'index': None})
h3 = compute_action_hash('search', {'query': 'test'})
assert len({h1, h2, h3}) == 3
# ─── ActionLoopDetector unit tests ───────────────────────────────────────────
def test_detector_no_nudge_for_diverse_actions():
"""No nudge when actions are all different."""
detector = ActionLoopDetector(window_size=20)
detector.record_action('click', {'index': 1})
detector.record_action('scroll', {'down': True, 'index': None})
detector.record_action('click', {'index': 2})
detector.record_action('search', {'query': 'something'})
detector.record_action('navigate', {'url': 'https://example.com'})
assert detector.get_nudge_message() is None
def test_detector_nudge_at_5_repeats():
"""Nudge triggers at 5 repetitions of the same action."""
detector = ActionLoopDetector(window_size=20)
for _ in range(5):
detector.record_action('search', {'query': 'site:hinative.com answers votes'})
msg = detector.get_nudge_message()
assert msg is not None
assert 'repeated a similar action' in msg
assert '5 times' in msg
def test_detector_no_nudge_at_4_repeats():
"""No nudge at only 4 repetitions (below threshold)."""
detector = ActionLoopDetector(window_size=20)
for _ in range(4):
detector.record_action('search', {'query': 'site:hinative.com answers votes'})
assert detector.get_nudge_message() is None
def test_detector_nudge_escalates_at_8_repeats():
"""Stronger nudge at 8 repetitions."""
detector = ActionLoopDetector(window_size=20)
for _ in range(8):
detector.record_action('search', {'query': 'site:hinative.com answers votes'})
msg = detector.get_nudge_message()
assert msg is not None
assert 'still making progress' in msg
assert '8 times' in msg
def test_detector_nudge_escalates_at_12_repeats():
"""Most urgent nudge at 12 repetitions."""
detector = ActionLoopDetector(window_size=20)
for _ in range(12):
detector.record_action('search', {'query': 'site:hinative.com answers votes'})
msg = detector.get_nudge_message()
assert msg is not None
assert 'making progress with each repetition' in msg
assert '12 times' in msg
def test_detector_critical_message_no_done_directive():
"""Critical nudge should NOT tell the agent to call done — just a gentle heads up."""
detector = ActionLoopDetector(window_size=20)
for _ in range(12):
detector.record_action('search', {'query': 'site:hinative.com answers votes'})
msg = detector.get_nudge_message()
assert msg is not None
assert 'done action' not in msg
assert 'different approach' in msg
def test_detector_first_nudge_no_cannot_complete():
"""First nudge should NOT say task 'cannot be completed' — just raise awareness."""
detector = ActionLoopDetector(window_size=20)
for _ in range(5):
detector.record_action('search', {'query': 'site:hinative.com answers votes'})
msg = detector.get_nudge_message()
assert msg is not None
assert 'cannot be completed' not in msg
assert 'reconsidering your approach' in msg
def test_detector_window_slides():
"""Old actions fall out of the window."""
detector = ActionLoopDetector(window_size=10)
# Fill window with repeated actions
for _ in range(5):
detector.record_action('click', {'index': 7})
assert detector.max_repetition_count == 5
# Push them out with diverse actions
for i in range(10):
detector.record_action('click', {'index': 100 + i})
# The 5 old repeated actions should have been pushed out
assert detector.max_repetition_count < 5
assert detector.get_nudge_message() is None
def test_detector_search_variations_detected_as_same():
"""Minor variations of the same search (the hinative pattern) are detected as repetition."""
detector = ActionLoopDetector(window_size=20)
# These are the kind of variations the agent produces
queries = [
'site:hinative.com answers votes questions',
'site:hinative.com questions answers votes',
'site:hinative.com votes answers questions',
'site:hinative.com questions votes answers',
'site:hinative.com answers questions votes',
]
for q in queries:
detector.record_action('search', {'query': q})
assert detector.max_repetition_count == 5
assert detector.get_nudge_message() is not None
# ─── Page stagnation detection tests ─────────────────────────────────────────
def test_page_stagnation_no_nudge_when_pages_change():
"""No stagnation nudge when page content changes each step."""
detector = ActionLoopDetector(window_size=20)
detector.record_page_state('https://example.com', 'page content 1', 50)
detector.record_page_state('https://example.com', 'page content 2', 55)
detector.record_page_state('https://example.com', 'page content 3', 60)
assert detector.consecutive_stagnant_pages == 0
assert detector.get_nudge_message() is None
def test_page_stagnation_nudge_at_5_identical_pages():
"""Stagnation nudge fires after 5 consecutive identical page states."""
detector = ActionLoopDetector(window_size=20)
# First recording establishes baseline (doesn't count as stagnant)
for _ in range(6):
detector.record_page_state('https://example.com', 'same content', 50)
assert detector.consecutive_stagnant_pages >= 5
msg = detector.get_nudge_message()
assert msg is not None
assert 'page content has not changed' in msg
def test_page_stagnation_no_nudge_at_4_identical_pages():
"""No stagnation nudge at only 4 consecutive identical pages (below threshold)."""
detector = ActionLoopDetector(window_size=20)
# First recording establishes baseline, then 4 stagnant = 5 total recordings
for _ in range(5):
detector.record_page_state('https://example.com', 'same content', 50)
assert detector.consecutive_stagnant_pages == 4
assert detector.get_nudge_message() is None
def test_page_stagnation_resets_on_change():
"""Stagnation counter resets when page content changes."""
detector = ActionLoopDetector(window_size=20)
detector.record_page_state('https://example.com', 'same content', 50)
detector.record_page_state('https://example.com', 'same content', 50)
detector.record_page_state('https://example.com', 'same content', 50)
assert detector.consecutive_stagnant_pages == 2
# Page changes
detector.record_page_state('https://example.com', 'different content', 55)
assert detector.consecutive_stagnant_pages == 0
def test_combined_loop_and_stagnation():
"""Both action loop and page stagnation messages appear together."""
detector = ActionLoopDetector(window_size=20)
# Create action repetition (8 for STRONG LOOP WARNING)
for _ in range(8):
detector.record_action('click', {'index': 7})
# Create page stagnation (need 5 consecutive stagnant)
detector.record_page_state('https://example.com', 'same', 50)
for _ in range(5):
detector.record_page_state('https://example.com', 'same', 50)
msg = detector.get_nudge_message()
assert msg is not None
assert 'still making progress' in msg
assert 'page content has not changed' in msg
# ─── PageFingerprint tests ───────────────────────────────────────────────────
def test_page_fingerprint_same_content_equal():
"""Same content produces equal fingerprints."""
fp1 = PageFingerprint.from_browser_state('https://example.com', 'hello world', 50)
fp2 = PageFingerprint.from_browser_state('https://example.com', 'hello world', 50)
assert fp1 == fp2
def test_page_fingerprint_different_content_not_equal():
"""Different content produces different fingerprints."""
fp1 = PageFingerprint.from_browser_state('https://example.com', 'hello world', 50)
fp2 = PageFingerprint.from_browser_state('https://example.com', 'goodbye world', 50)
assert fp1 != fp2
def test_page_fingerprint_different_url_not_equal():
"""Different URL produces different fingerprint even with same content."""
fp1 = PageFingerprint.from_browser_state('https://example.com', 'hello world', 50)
fp2 = PageFingerprint.from_browser_state('https://other.com', 'hello world', 50)
assert fp1 != fp2
def test_page_fingerprint_different_element_count_not_equal():
"""Different element count produces different fingerprint."""
fp1 = PageFingerprint.from_browser_state('https://example.com', 'hello world', 50)
fp2 = PageFingerprint.from_browser_state('https://example.com', 'hello world', 51)
assert fp1 != fp2
# ─── Agent integration tests ─────────────────────────────────────────────────
async def test_loop_nudge_injected_into_context():
"""Loop detection nudge is injected as a context message in the agent."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
# Simulate 5 repeated actions (new threshold)
for _ in range(5):
agent.state.loop_detector.record_action('search', {'query': 'site:example.com answers'})
agent._inject_loop_detection_nudge()
messages = _get_context_messages(agent)
assert len(messages) == 1
assert 'repeated a similar action' in messages[0]
async def test_no_loop_nudge_when_disabled():
"""No loop nudge when loop_detection_enabled is False."""
llm = create_mock_llm()
agent = Agent(
task='Test task',
llm=llm,
loop_detection_enabled=False,
)
# Simulate 8 repeated actions
for _ in range(8):
agent.state.loop_detector.record_action('search', {'query': 'site:example.com answers'})
agent._inject_loop_detection_nudge()
messages = _get_context_messages(agent)
assert len(messages) == 0
async def test_no_loop_nudge_for_diverse_actions():
"""No loop nudge when actions are diverse."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
agent.state.loop_detector.record_action('click', {'index': 1})
agent.state.loop_detector.record_action('scroll', {'down': True, 'index': None})
agent.state.loop_detector.record_action('click', {'index': 2})
agent._inject_loop_detection_nudge()
messages = _get_context_messages(agent)
assert len(messages) == 0
async def test_loop_detector_initialized_from_settings():
"""Loop detector window size is set from agent settings."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm, loop_detection_window=30)
assert agent.state.loop_detector.window_size == 30
async def test_loop_detector_default_window_size():
"""Loop detection default window size is 20."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
assert agent.settings.loop_detection_enabled is True
assert agent.state.loop_detector.window_size == 20
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_action_loop_detection.py",
"license": "MIT License",
"lines": 303,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_action_save_as_pdf.py | import asyncio
import tempfile
import anyio
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.filesystem.file_system import FileSystem
from browser_use.tools.service import Tools
@pytest.fixture(scope='session')
def http_server():
server = HTTPServer()
server.start()
server.expect_request('/pdf-test').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>PDF Test Page</title></head>
<body>
<h1>Hello PDF</h1>
<p>This page should be saved as a PDF document.</p>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
</body>
</html>
""",
content_type='text/html',
)
server.expect_request('/pdf-styled').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head>
<title>Styled PDF Page</title>
<style>
body { background-color: #f0f0f0; font-family: sans-serif; }
h1 { color: navy; }
.highlight { background-color: yellow; padding: 10px; }
</style>
</head>
<body>
<h1>Styled Content</h1>
<div class="highlight">This has a background color that should appear when print_background=True.</div>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
browser_session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await browser_session.start()
yield browser_session
await browser_session.kill()
@pytest.fixture(scope='function')
def tools():
return Tools()
def _get_attachments(result: ActionResult) -> list[str]:
"""Helper to extract attachments with pyright-safe narrowing."""
assert result.attachments is not None
return result.attachments
class TestSaveAsPdf:
"""Tests for the save_as_pdf action."""
async def test_save_as_pdf_registered(self, tools):
"""save_as_pdf action is in the default action registry."""
assert 'save_as_pdf' in tools.registry.registry.actions
action = tools.registry.registry.actions['save_as_pdf']
assert action.function is not None
assert 'PDF' in action.description
async def test_save_as_pdf_default_filename(self, tools, browser_session, base_url):
"""save_as_pdf with no filename uses the page title."""
await tools.navigate(url=f'{base_url}/pdf-test', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
result = await tools.save_as_pdf(browser_session=browser_session, file_system=file_system)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert 'Saved page as PDF' in result.extracted_content
attachments = _get_attachments(result)
assert len(attachments) == 1
pdf_path = attachments[0]
assert pdf_path.endswith('.pdf')
assert await anyio.Path(pdf_path).exists()
# Verify it's actually a PDF (starts with %PDF magic bytes)
header = await anyio.Path(pdf_path).read_bytes()
assert header[:5] == b'%PDF-', f'File does not start with PDF magic bytes: {header[:5]!r}'
async def test_save_as_pdf_custom_filename(self, tools, browser_session, base_url):
"""save_as_pdf with a custom filename uses that name."""
await tools.navigate(url=f'{base_url}/pdf-test', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
result = await tools.save_as_pdf(
file_name='my-report',
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
attachments = _get_attachments(result)
assert len(attachments) == 1
pdf_path = attachments[0]
assert 'my-report.pdf' in pdf_path
assert await anyio.Path(pdf_path).exists()
async def test_save_as_pdf_custom_filename_with_extension(self, tools, browser_session, base_url):
"""save_as_pdf doesn't double the .pdf extension."""
await tools.navigate(url=f'{base_url}/pdf-test', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
result = await tools.save_as_pdf(
file_name='already.pdf',
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
attachments = _get_attachments(result)
pdf_path = attachments[0]
# Should not be "already.pdf.pdf"
assert pdf_path.endswith('already.pdf')
assert not pdf_path.endswith('.pdf.pdf')
async def test_save_as_pdf_duplicate_filename(self, tools, browser_session, base_url):
"""save_as_pdf increments filename when a duplicate exists."""
await tools.navigate(url=f'{base_url}/pdf-test', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
# Save first PDF
result1 = await tools.save_as_pdf(
file_name='duplicate',
browser_session=browser_session,
file_system=file_system,
)
attachments1 = _get_attachments(result1)
assert await anyio.Path(attachments1[0]).exists()
assert attachments1[0].endswith('duplicate.pdf')
# Save second PDF with same name
result2 = await tools.save_as_pdf(
file_name='duplicate',
browser_session=browser_session,
file_system=file_system,
)
attachments2 = _get_attachments(result2)
assert await anyio.Path(attachments2[0]).exists()
assert 'duplicate (1).pdf' in attachments2[0]
async def test_save_as_pdf_landscape(self, tools, browser_session, base_url):
"""save_as_pdf with landscape=True produces a valid PDF."""
await tools.navigate(url=f'{base_url}/pdf-test', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
result = await tools.save_as_pdf(
file_name='landscape-test',
landscape=True,
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
attachments = _get_attachments(result)
assert await anyio.Path(attachments[0]).exists()
header = await anyio.Path(attachments[0]).read_bytes()
assert header[:5] == b'%PDF-'
async def test_save_as_pdf_a4_format(self, tools, browser_session, base_url):
"""save_as_pdf with paper_format='A4' produces a valid PDF."""
await tools.navigate(url=f'{base_url}/pdf-test', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
result = await tools.save_as_pdf(
file_name='a4-test',
paper_format='A4',
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
attachments = _get_attachments(result)
assert await anyio.Path(attachments[0]).exists()
async def test_save_as_pdf_with_background(self, tools, browser_session, base_url):
"""save_as_pdf with print_background=True on a styled page produces a valid PDF."""
await tools.navigate(url=f'{base_url}/pdf-styled', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
with tempfile.TemporaryDirectory() as temp_dir:
file_system = FileSystem(temp_dir)
result = await tools.save_as_pdf(
file_name='styled-with-bg',
print_background=True,
browser_session=browser_session,
file_system=file_system,
)
assert isinstance(result, ActionResult)
attachments = _get_attachments(result)
pdf_path = attachments[0]
assert await anyio.Path(pdf_path).exists()
# Verify file size is non-trivial (has actual rendered content)
stat = await anyio.Path(pdf_path).stat()
assert stat.st_size > 1000, f'PDF seems too small ({stat.st_size} bytes), may be empty'
async def test_save_as_pdf_param_model_schema(self):
"""SaveAsPdfAction schema exposes the right fields with defaults."""
from browser_use.tools.views import SaveAsPdfAction
schema = SaveAsPdfAction.model_json_schema()
props = schema['properties']
assert 'file_name' in props
assert 'print_background' in props
assert 'landscape' in props
assert 'scale' in props
assert 'paper_format' in props
# Check defaults
assert props['print_background']['default'] is True
assert props['landscape']['default'] is False
assert props['scale']['default'] == 1.0
assert props['paper_format']['default'] == 'Letter'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_action_save_as_pdf.py",
"license": "MIT License",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_agent_planning.py | """Tests for inline task planning feature.
Covers: plan generation, step advancement, replanning, rendering,
disabled planning, replan nudge, flash mode schema, and edge cases.
"""
import json
from browser_use.agent.views import (
AgentOutput,
PlanItem,
)
from browser_use.tools.service import Tools
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_agent_output(**overrides) -> AgentOutput:
"""Build a minimal AgentOutput with plan fields."""
tools = Tools()
ActionModel = tools.registry.create_action_model()
OutputType = AgentOutput.type_with_custom_actions(ActionModel)
action_json = json.dumps(
{
'evaluation_previous_goal': 'Success',
'memory': 'mem',
'next_goal': 'goal',
**{k: v for k, v in overrides.items() if k in ('current_plan_item', 'plan_update')},
'action': [{'done': {'text': 'ok', 'success': True}}],
}
)
return OutputType.model_validate_json(action_json)
def _make_agent(browser_session, mock_llm, **kwargs):
"""Create an Agent with defaults suitable for unit tests."""
from browser_use import Agent
return Agent(task='Test task', llm=mock_llm, browser_session=browser_session, **kwargs)
# ---------------------------------------------------------------------------
# 1. Plan generation from plan_update on step 1
# ---------------------------------------------------------------------------
async def test_plan_generation_from_plan_update(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm)
output = _make_agent_output(plan_update=['Navigate to page', 'Search for item', 'Extract price'])
agent._update_plan_from_model_output(output)
assert agent.state.plan is not None
assert len(agent.state.plan) == 3
assert agent.state.plan[0].status == 'current'
assert agent.state.plan[1].status == 'pending'
assert agent.state.plan[2].status == 'pending'
assert agent.state.current_plan_item_index == 0
assert agent.state.plan_generation_step == agent.state.n_steps
# ---------------------------------------------------------------------------
# 2. Plan step advancement via current_plan_item
# ---------------------------------------------------------------------------
async def test_plan_step_advancement(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm)
# Seed a plan
agent.state.plan = [
PlanItem(text='Step A', status='current'),
PlanItem(text='Step B'),
PlanItem(text='Step C'),
]
agent.state.current_plan_item_index = 0
output = _make_agent_output(current_plan_item=2)
agent._update_plan_from_model_output(output)
assert agent.state.plan[0].status == 'done'
assert agent.state.plan[1].status == 'done'
assert agent.state.plan[2].status == 'current'
assert agent.state.current_plan_item_index == 2
# ---------------------------------------------------------------------------
# 3. Replanning replaces old plan
# ---------------------------------------------------------------------------
async def test_replanning_replaces_old_plan(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm)
agent.state.plan = [
PlanItem(text='Old step 1', status='done'),
PlanItem(text='Old step 2', status='current'),
]
agent.state.current_plan_item_index = 1
agent.state.plan_generation_step = 1
output = _make_agent_output(plan_update=['New step A', 'New step B', 'New step C'])
agent._update_plan_from_model_output(output)
assert len(agent.state.plan) == 3
assert agent.state.plan[0].text == 'New step A'
assert agent.state.plan[0].status == 'current'
assert agent.state.current_plan_item_index == 0
# ---------------------------------------------------------------------------
# 4. _render_plan_description output format
# ---------------------------------------------------------------------------
async def test_render_plan_description(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm)
agent.state.plan = [
PlanItem(text='Navigate to search page', status='done'),
PlanItem(text='Search for "laptop"', status='current'),
PlanItem(text='Extract price from results', status='pending'),
PlanItem(text='Skipped step', status='skipped'),
]
result = agent._render_plan_description()
assert result is not None
lines = result.split('\n')
assert lines[0] == '[x] 0: Navigate to search page'
assert lines[1] == '[>] 1: Search for "laptop"'
assert lines[2] == '[ ] 2: Extract price from results'
assert lines[3] == '[-] 3: Skipped step'
# ---------------------------------------------------------------------------
# 5. Planning disabled returns None
# ---------------------------------------------------------------------------
async def test_planning_disabled_returns_none(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, enable_planning=False)
agent.state.plan = [PlanItem(text='Should not render')]
assert agent._render_plan_description() is None
# Also verify update is a no-op
output = _make_agent_output(plan_update=['New plan'])
agent._update_plan_from_model_output(output)
# Plan should remain unchanged (the method returns early)
assert agent.state.plan[0].text == 'Should not render'
# ---------------------------------------------------------------------------
# 6. Replan nudge injection at threshold
# ---------------------------------------------------------------------------
async def test_replan_nudge_injected_at_threshold(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, planning_replan_on_stall=3)
agent.state.plan = [PlanItem(text='Step 1', status='current')]
agent.state.consecutive_failures = 3
# Track context messages
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_replan_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count + 1
msg = agent._message_manager.state.history.context_messages[-1]
assert isinstance(msg.content, str) and 'REPLAN SUGGESTED' in msg.content
# ---------------------------------------------------------------------------
# 7. No nudge below threshold
# ---------------------------------------------------------------------------
async def test_no_replan_nudge_below_threshold(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, planning_replan_on_stall=3)
agent.state.plan = [PlanItem(text='Step 1', status='current')]
agent.state.consecutive_failures = 2
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_replan_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count
# ---------------------------------------------------------------------------
# 8. Flash mode schema excludes plan fields
# ---------------------------------------------------------------------------
async def test_flash_mode_schema_excludes_plan_fields():
tools = Tools()
ActionModel = tools.registry.create_action_model()
FlashOutput = AgentOutput.type_with_custom_actions_flash_mode(ActionModel)
schema = FlashOutput.model_json_schema()
assert 'current_plan_item' not in schema['properties']
assert 'plan_update' not in schema['properties']
assert 'thinking' not in schema['properties']
# ---------------------------------------------------------------------------
# 9. Full mode schema includes plan fields as optional
# ---------------------------------------------------------------------------
async def test_full_mode_schema_includes_plan_fields_optional():
tools = Tools()
ActionModel = tools.registry.create_action_model()
FullOutput = AgentOutput.type_with_custom_actions(ActionModel)
schema = FullOutput.model_json_schema()
assert 'current_plan_item' in schema['properties']
assert 'plan_update' in schema['properties']
# They should NOT be in required
assert 'current_plan_item' not in schema.get('required', [])
assert 'plan_update' not in schema.get('required', [])
# ---------------------------------------------------------------------------
# 10. Out-of-bounds current_plan_item handled gracefully
# ---------------------------------------------------------------------------
async def test_out_of_bounds_plan_step_clamped(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm)
agent.state.plan = [
PlanItem(text='Step A', status='current'),
PlanItem(text='Step B'),
]
agent.state.current_plan_item_index = 0
# Way out of bounds high
output = _make_agent_output(current_plan_item=999)
agent._update_plan_from_model_output(output)
assert agent.state.current_plan_item_index == 1 # clamped to last valid index
assert agent.state.plan[0].status == 'done'
assert agent.state.plan[1].status == 'current'
# Negative index
agent.state.plan = [
PlanItem(text='Step X', status='current'),
PlanItem(text='Step Y'),
]
agent.state.current_plan_item_index = 1
output2 = _make_agent_output(current_plan_item=-5)
agent._update_plan_from_model_output(output2)
assert agent.state.current_plan_item_index == 0 # clamped to 0
assert agent.state.plan[0].status == 'current'
# ---------------------------------------------------------------------------
# 11. No plan means render returns None
# ---------------------------------------------------------------------------
async def test_no_plan_render_returns_none(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm)
assert agent.state.plan is None
assert agent._render_plan_description() is None
# ---------------------------------------------------------------------------
# 12. Replan nudge disabled when planning_replan_on_stall=0
# ---------------------------------------------------------------------------
async def test_replan_nudge_disabled_when_zero(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, planning_replan_on_stall=0)
agent.state.plan = [PlanItem(text='Step 1', status='current')]
agent.state.consecutive_failures = 100 # high but doesn't matter
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_replan_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count
# ---------------------------------------------------------------------------
# 13. No nudge when no plan exists
# ---------------------------------------------------------------------------
async def test_no_replan_nudge_without_plan(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, planning_replan_on_stall=1)
agent.state.consecutive_failures = 5 # above threshold
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_replan_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count
# ---------------------------------------------------------------------------
# 14. Exploration nudge fires when no plan exists after N steps
# ---------------------------------------------------------------------------
async def test_exploration_nudge_fires_after_limit(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, planning_exploration_limit=3)
agent.state.plan = None
agent.state.n_steps = 3 # at the limit
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_exploration_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count + 1
msg = agent._message_manager.state.history.context_messages[-1]
assert isinstance(msg.content, str) and 'PLANNING NUDGE' in msg.content
# ---------------------------------------------------------------------------
# 15. No exploration nudge when plan already exists
# ---------------------------------------------------------------------------
async def test_no_exploration_nudge_when_plan_exists(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, planning_exploration_limit=3)
agent.state.plan = [PlanItem(text='Step 1', status='current')]
agent.state.n_steps = 10 # well above limit
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_exploration_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count
# ---------------------------------------------------------------------------
# 16. No exploration nudge below the limit
# ---------------------------------------------------------------------------
async def test_no_exploration_nudge_below_limit(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, planning_exploration_limit=5)
agent.state.plan = None
agent.state.n_steps = 4 # below the limit
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_exploration_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count
# ---------------------------------------------------------------------------
# 17. Exploration nudge disabled when planning_exploration_limit=0
# ---------------------------------------------------------------------------
async def test_exploration_nudge_disabled_when_zero(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, planning_exploration_limit=0)
agent.state.plan = None
agent.state.n_steps = 100 # high but doesn't matter
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_exploration_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count
# ---------------------------------------------------------------------------
# 18. Exploration nudge disabled when enable_planning=False
# ---------------------------------------------------------------------------
async def test_exploration_nudge_disabled_when_planning_off(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, enable_planning=False, planning_exploration_limit=3)
agent.state.plan = None
agent.state.n_steps = 10 # above limit
initial_count = len(agent._message_manager.state.history.context_messages)
agent._inject_exploration_nudge()
after_count = len(agent._message_manager.state.history.context_messages)
assert after_count == initial_count
# ---------------------------------------------------------------------------
# 19. Flash mode forces enable_planning=False
# ---------------------------------------------------------------------------
async def test_flash_mode_disables_planning(browser_session, mock_llm):
agent = _make_agent(browser_session, mock_llm, flash_mode=True)
assert agent.settings.enable_planning is False
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_agent_planning.py",
"license": "MIT License",
"lines": 277,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_budget_warning.py | """Tests for step budget warning injection (IMP-7a)."""
from browser_use.agent.service import Agent
from browser_use.agent.views import AgentStepInfo
from browser_use.llm.messages import UserMessage
from tests.ci.conftest import create_mock_llm
def _get_context_messages(agent: Agent) -> list[str]:
"""Extract text content from the agent's context messages."""
msgs = agent._message_manager.state.history.context_messages
return [m.content for m in msgs if isinstance(m, UserMessage) and isinstance(m.content, str)]
async def test_budget_warning_injected_at_75_percent():
"""Budget warning should be injected when step >= 75% of max_steps."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
step_info = AgentStepInfo(step_number=74, max_steps=100) # step 75/100 = 75%
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert len(messages) == 1
assert 'BUDGET WARNING' in messages[0]
assert '75/100' in messages[0]
assert '25 steps remaining' in messages[0]
async def test_budget_warning_injected_at_90_percent():
"""Budget warning should fire at 90% too."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
step_info = AgentStepInfo(step_number=89, max_steps=100) # step 90/100 = 90%
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert len(messages) == 1
assert 'BUDGET WARNING' in messages[0]
assert '90/100' in messages[0]
assert '10 steps remaining' in messages[0]
async def test_no_budget_warning_below_75_percent():
"""No warning should be injected when step < 75% of max_steps."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
step_info = AgentStepInfo(step_number=73, max_steps=100) # step 74/100 = 74%
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert len(messages) == 0
async def test_no_budget_warning_on_last_step():
"""No budget warning on the last step — _force_done_after_last_step handles that."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
step_info = AgentStepInfo(step_number=99, max_steps=100) # last step
assert step_info.is_last_step()
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert len(messages) == 0
async def test_no_budget_warning_when_step_info_is_none():
"""No warning when step_info is None."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
await agent._inject_budget_warning(None)
messages = _get_context_messages(agent)
assert len(messages) == 0
async def test_budget_warning_exact_threshold():
"""The warning should fire at exactly 75% (step 15/20)."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
# step_number=14 means step 15 (1-indexed), 15/20 = 75%
step_info = AgentStepInfo(step_number=14, max_steps=20)
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert len(messages) == 1
assert '15/20' in messages[0]
assert '5 steps remaining' in messages[0]
async def test_budget_warning_just_below_threshold():
"""No warning at 74% — just below threshold."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
# step_number=13 means step 14 (1-indexed), 14/20 = 70%
step_info = AgentStepInfo(step_number=13, max_steps=20)
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert len(messages) == 0
async def test_budget_warning_small_max_steps():
"""Budget warning works correctly with small max_steps values."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
# step_number=3 means step 4 (1-indexed), 4/4 = 100% but is_last_step
step_info = AgentStepInfo(step_number=3, max_steps=4)
assert step_info.is_last_step()
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert len(messages) == 0 # last step, no warning
# step_number=2 means step 3 (1-indexed), 3/4 = 75%
step_info = AgentStepInfo(step_number=2, max_steps=4)
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert len(messages) == 1
assert '3/4' in messages[0]
async def test_budget_warning_percentage_display():
"""The percentage in the warning should be integer (no decimals)."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
# step 76/100 = 76%
step_info = AgentStepInfo(step_number=75, max_steps=100)
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
assert '76%' in messages[0]
# Should not have decimal
assert '76.0%' not in messages[0]
async def test_budget_warning_contains_actionable_guidance():
"""The warning message should include actionable guidance for the agent."""
llm = create_mock_llm()
agent = Agent(task='Test task', llm=llm)
step_info = AgentStepInfo(step_number=74, max_steps=100)
await agent._inject_budget_warning(step_info)
messages = _get_context_messages(agent)
msg = messages[0]
assert 'consolidate your results' in msg.lower()
assert 'done' in msg.lower()
assert 'partial results' in msg.lower()
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_budget_warning.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_cli_coordinate_click.py | """Tests for CLI coordinate clicking support.
Verifies that the CLI correctly parses both index-based and coordinate-based
click commands, that the browser command handler dispatches the right events,
and that the direct CLI selector map cache works correctly.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
if TYPE_CHECKING:
from browser_use.dom.views import DOMRect, EnhancedDOMTreeNode
from browser_use.skill_cli.main import build_parser
class TestClickArgParsing:
"""Test argparse handles click with index and coordinates."""
def test_click_single_index(self):
"""browser-use click 5 -> args.args == [5]"""
parser = build_parser()
args = parser.parse_args(['click', '5'])
assert args.command == 'click'
assert args.args == [5]
def test_click_coordinates(self):
"""browser-use click 200 800 -> args.args == [200, 800]"""
parser = build_parser()
args = parser.parse_args(['click', '200', '800'])
assert args.command == 'click'
assert args.args == [200, 800]
def test_click_no_args_fails(self):
"""browser-use click (no args) should fail."""
parser = build_parser()
with pytest.raises(SystemExit):
parser.parse_args(['click'])
def test_click_three_args_parsed(self):
"""browser-use click 1 2 3 -> args.args == [1, 2, 3] (handler will reject)."""
parser = build_parser()
args = parser.parse_args(['click', '1', '2', '3'])
assert args.args == [1, 2, 3]
def test_click_non_int_fails(self):
"""browser-use click abc should fail (type=int enforced)."""
parser = build_parser()
with pytest.raises(SystemExit):
parser.parse_args(['click', 'abc'])
class TestClickCommandHandler:
"""Test the browser command handler dispatches correctly for click."""
async def test_coordinate_click_handler(self, httpserver):
"""Coordinate click dispatches ClickCoordinateEvent."""
from browser_use.browser.session import BrowserSession
from browser_use.skill_cli.commands.browser import handle
from browser_use.skill_cli.sessions import SessionInfo
httpserver.expect_request('/').respond_with_data(
'<html><body><button>Click me</button></body></html>',
content_type='text/html',
)
session = BrowserSession(headless=True)
await session.start()
try:
from browser_use.browser.events import NavigateToUrlEvent
await session.event_bus.dispatch(NavigateToUrlEvent(url=httpserver.url_for('/')))
session_info = SessionInfo(
name='test',
browser_mode='chromium',
headed=False,
profile=None,
browser_session=session,
)
result = await handle('click', session_info, {'args': [100, 200]})
assert 'clicked_coordinate' in result
assert result['clicked_coordinate'] == {'x': 100, 'y': 200}
finally:
await session.kill()
async def test_index_click_handler(self, httpserver):
"""Index click dispatches ClickElementEvent."""
from browser_use.browser.session import BrowserSession
from browser_use.skill_cli.commands.browser import handle
from browser_use.skill_cli.sessions import SessionInfo
httpserver.expect_request('/').respond_with_data(
'<html><body><button id="btn">Click me</button></body></html>',
content_type='text/html',
)
session = BrowserSession(headless=True)
await session.start()
try:
from browser_use.browser.events import NavigateToUrlEvent
await session.event_bus.dispatch(NavigateToUrlEvent(url=httpserver.url_for('/')))
session_info = SessionInfo(
name='test',
browser_mode='chromium',
headed=False,
profile=None,
browser_session=session,
)
# Index 999 won't exist, so we expect the error path
result = await handle('click', session_info, {'args': [999]})
assert 'error' in result
finally:
await session.kill()
async def test_invalid_args_count(self):
"""Three args returns error without touching the browser."""
from browser_use.browser.session import BrowserSession
from browser_use.skill_cli.commands.browser import handle
from browser_use.skill_cli.sessions import SessionInfo
# BrowserSession constructed but not started — handler hits the
# 3-arg error branch before doing anything with the session.
session_info = SessionInfo(
name='test',
browser_mode='chromium',
headed=False,
profile=None,
browser_session=BrowserSession(headless=True),
)
result = await handle('click', session_info, {'args': [1, 2, 3]})
assert 'error' in result
assert 'Usage' in result['error']
def _make_dom_node(
*,
node_name: str,
absolute_position: DOMRect | None = None,
ax_name: str | None = None,
node_value: str = '',
) -> EnhancedDOMTreeNode:
"""Build a real EnhancedDOMTreeNode for testing."""
from browser_use.dom.views import (
EnhancedAXNode,
EnhancedDOMTreeNode,
NodeType,
)
ax_node = None
if ax_name is not None:
ax_node = EnhancedAXNode(
ax_node_id='ax-0',
ignored=False,
role='button',
name=ax_name,
description=None,
properties=None,
child_ids=None,
)
return EnhancedDOMTreeNode(
node_id=1,
backend_node_id=1,
node_type=NodeType.ELEMENT_NODE,
node_name=node_name,
node_value=node_value,
attributes={},
is_scrollable=None,
is_visible=True,
absolute_position=absolute_position,
target_id='target-0',
frame_id=None,
session_id=None,
content_document=None,
shadow_root_type=None,
shadow_roots=None,
parent_node=None,
children_nodes=None,
ax_node=ax_node,
snapshot_node=None,
)
class TestSelectorCache:
"""Test selector map cache round-trip and coordinate conversion."""
@pytest.fixture(autouse=True)
def _use_tmp_state_file(self, monkeypatch, tmp_path):
"""Redirect STATE_FILE to a temp dir so tests don't clobber real state."""
import browser_use.skill_cli.direct as direct_mod
self.state_file = tmp_path / 'browser-use-direct.json'
monkeypatch.setattr(direct_mod, 'STATE_FILE', self.state_file)
def test_save_and_load_cache_round_trip(self):
"""_save_selector_cache → _load_selector_cache preserves data."""
from browser_use.dom.views import DOMRect
from browser_use.skill_cli.direct import (
_load_selector_cache,
_save_selector_cache,
_save_state,
)
_save_state({'cdp_url': 'ws://localhost:9222'})
node_1 = _make_dom_node(
node_name='BUTTON',
absolute_position=DOMRect(x=100.0, y=200.0, width=80.0, height=32.0),
ax_name='Submit',
)
node_2 = _make_dom_node(
node_name='A',
absolute_position=DOMRect(x=50.0, y=800.5, width=200.0, height=40.0),
node_value='Click here',
)
_save_selector_cache({5: node_1, 12: node_2})
loaded = _load_selector_cache()
assert 5 in loaded
assert 12 in loaded
assert loaded[5]['x'] == 100.0
assert loaded[5]['y'] == 200.0
assert loaded[5]['w'] == 80.0
assert loaded[5]['h'] == 32.0
assert loaded[5]['tag'] == 'button'
assert loaded[5]['text'] == 'Submit'
assert loaded[12]['x'] == 50.0
assert loaded[12]['y'] == 800.5
assert loaded[12]['tag'] == 'a'
assert loaded[12]['text'] == 'Click here'
def test_load_empty_cache(self):
"""_load_selector_cache returns empty dict when no cache exists."""
from browser_use.skill_cli.direct import _load_selector_cache, _save_state
_save_state({'cdp_url': 'ws://localhost:9222'})
loaded = _load_selector_cache()
assert loaded == {}
def test_cache_skips_nodes_without_position(self):
"""Nodes without absolute_position are not cached."""
from browser_use.skill_cli.direct import (
_load_selector_cache,
_save_selector_cache,
_save_state,
)
_save_state({'cdp_url': 'ws://localhost:9222'})
node = _make_dom_node(node_name='DIV', absolute_position=None)
_save_selector_cache({1: node})
loaded = _load_selector_cache()
assert loaded == {}
def test_viewport_coordinate_conversion(self):
"""Document coords + scroll offset → viewport coords."""
elem = {'x': 150.0, 'y': 900.0, 'w': 80.0, 'h': 32.0}
scroll_x, scroll_y = 0.0, 500.0
viewport_x = int(elem['x'] + elem['w'] / 2 - scroll_x)
viewport_y = int(elem['y'] + elem['h'] / 2 - scroll_y)
assert viewport_x == 190
assert viewport_y == 416
def test_viewport_conversion_with_horizontal_scroll(self):
"""Horizontal scroll is also accounted for."""
elem = {'x': 1200.0, 'y': 300.0, 'w': 100.0, 'h': 50.0}
scroll_x, scroll_y = 800.0, 100.0
viewport_x = int(elem['x'] + elem['w'] / 2 - scroll_x)
viewport_y = int(elem['y'] + elem['h'] / 2 - scroll_y)
assert viewport_x == 450
assert viewport_y == 225
def test_cache_invalidated_on_navigate(self):
"""Navigating clears selector_map from state."""
from browser_use.skill_cli.direct import _load_state, _save_state
_save_state(
{
'cdp_url': 'ws://localhost:9222',
'target_id': 'abc',
'selector_map': {'1': {'x': 10, 'y': 20, 'w': 30, 'h': 40, 'tag': 'a', 'text': 'Link'}},
}
)
state = _load_state()
state.pop('selector_map', None)
_save_state(state)
reloaded = _load_state()
assert 'selector_map' not in reloaded
assert reloaded['cdp_url'] == 'ws://localhost:9222'
assert reloaded['target_id'] == 'abc'
def test_state_overwritten_on_fresh_cache(self):
"""Running state overwrites old cache with new data."""
from browser_use.dom.views import DOMRect
from browser_use.skill_cli.direct import (
_load_selector_cache,
_save_selector_cache,
_save_state,
)
_save_state(
{
'cdp_url': 'ws://localhost:9222',
'selector_map': {'99': {'x': 0, 'y': 0, 'w': 0, 'h': 0, 'tag': 'old', 'text': 'old'}},
}
)
node = _make_dom_node(
node_name='SPAN',
absolute_position=DOMRect(x=5.0, y=10.0, width=20.0, height=15.0),
ax_name='New',
)
_save_selector_cache({7: node})
loaded = _load_selector_cache()
assert 99 not in loaded
assert 7 in loaded
assert loaded[7]['tag'] == 'span'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_cli_coordinate_click.py",
"license": "MIT License",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_cli_headed_flag.py | """Tests for CLI argument parsing, specifically the --headed flag behavior."""
from browser_use.skill_cli.main import build_parser
def test_headed_flag_before_open_subcommand():
"""Test that --headed flag before 'open' subcommand is properly parsed.
Regression test for issue #3931: The open subparser had a duplicate --headed
argument that shadowed the global --headed flag, causing the global flag
to be overwritten with False when parsing 'browser-use --headed open <url>'.
"""
parser = build_parser()
# This was the failing case: --headed before 'open' was being ignored
args = parser.parse_args(['--headed', 'open', 'http://example.com'])
assert args.headed is True, 'Global --headed flag should be True when specified before subcommand'
assert args.url == 'http://example.com'
assert args.command == 'open'
def test_headed_flag_with_session():
"""Test that --headed works with other global flags like -s/--session."""
parser = build_parser()
args = parser.parse_args(['--headed', '-s', 'mysession', 'open', 'http://example.com'])
assert args.headed is True
assert args.session == 'mysession'
assert args.url == 'http://example.com'
def test_headed_flag_default_is_false():
"""Test that --headed defaults to False when not specified."""
parser = build_parser()
args = parser.parse_args(['open', 'http://example.com'])
assert args.headed is False, '--headed should default to False'
def test_headed_flag_with_browser_mode():
"""Test --headed works with --browser flag."""
parser = build_parser()
args = parser.parse_args(['--headed', '--browser', 'chromium', 'open', 'http://example.com'])
assert args.headed is True
assert args.browser == 'chromium'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_cli_headed_flag.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_cli_install_init.py | """
Tests for browser-use CLI install and init commands.
These commands are handled early in the CLI before argparse, to avoid loading
heavy dependencies for simple setup tasks.
"""
import subprocess
import sys
def test_install_command_help():
"""Test that the install command is documented in help."""
result = subprocess.run(
[sys.executable, '-m', 'browser_use.skill_cli.main', '--help'],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert 'install' in result.stdout
assert 'Install Chromium browser' in result.stdout
def test_init_command_help():
"""Test that the init command is documented in help."""
result = subprocess.run(
[sys.executable, '-m', 'browser_use.skill_cli.main', '--help'],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert 'init' in result.stdout
assert 'Generate browser-use template file' in result.stdout
def test_init_subcommand_help():
"""Test that the init subcommand has its own help."""
result = subprocess.run(
[sys.executable, '-m', 'browser_use.skill_cli.main', 'init', '--help'],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert '--template' in result.stdout or '-t' in result.stdout
assert '--list' in result.stdout or '-l' in result.stdout
def test_init_list_templates():
"""Test that init --list shows available templates."""
result = subprocess.run(
[sys.executable, '-m', 'browser_use.skill_cli.main', 'init', '--list'],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert 'default' in result.stdout
assert 'advanced' in result.stdout
def test_mcp_flag_help():
"""Test that the --mcp flag is documented in help."""
result = subprocess.run(
[sys.executable, '-m', 'browser_use.skill_cli.main', '--help'],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert '--mcp' in result.stdout
assert 'MCP server' in result.stdout
def test_template_flag_help():
"""Test that the --template flag is documented in help."""
result = subprocess.run(
[sys.executable, '-m', 'browser_use.skill_cli.main', '--help'],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert '--template' in result.stdout
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_cli_install_init.py",
"license": "MIT License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_doctor_command.py | """Tests for doctor command."""
import pytest
from browser_use.skill_cli.commands import doctor
@pytest.mark.asyncio
async def test_doctor_handle_returns_valid_structure():
"""Test that doctor.handle() returns a valid result structure."""
result = await doctor.handle()
# Verify structure
assert 'status' in result
assert result['status'] in ('healthy', 'issues_found')
assert 'checks' in result
assert 'summary' in result
# Verify all expected checks are present
expected_checks = ['package', 'browser', 'api_key', 'cloudflared', 'network']
for check in expected_checks:
assert check in result['checks']
assert 'status' in result['checks'][check]
assert 'message' in result['checks'][check]
def test_check_package_installed():
"""Test _check_package returns ok when browser-use is installed."""
# browser-use is always installed in the test environment
result = doctor._check_package()
assert result['status'] == 'ok'
assert 'browser-use' in result['message']
def test_check_browser_returns_valid_structure():
"""Test _check_browser returns a valid result."""
result = doctor._check_browser()
assert 'status' in result
assert result['status'] in ('ok', 'warning')
assert 'message' in result
def test_check_api_key_with_env_var(monkeypatch):
"""Test _check_api_key_config when API key is set via env var."""
monkeypatch.setenv('BROWSER_USE_API_KEY', 'test_key_12345')
result = doctor._check_api_key_config()
assert result['status'] == 'ok'
assert 'configured' in result['message'].lower()
def test_check_api_key_missing(monkeypatch):
"""Test _check_api_key_config when API key is not available."""
# Remove env var if set
monkeypatch.delenv('BROWSER_USE_API_KEY', raising=False)
# Also need to ensure no config file provides a key
# by temporarily setting XDG_CONFIG_HOME to empty temp dir
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
monkeypatch.setenv('XDG_CONFIG_HOME', tmpdir)
# On macOS, also need to handle ~/Library/Application Support
monkeypatch.setenv('HOME', tmpdir)
# Clear any cached config
from browser_use.skill_cli import api_key
if hasattr(api_key, '_cached_key'):
monkeypatch.setattr(api_key, '_cached_key', None)
result = doctor._check_api_key_config()
assert result['status'] == 'missing'
assert 'no api key' in result['message'].lower()
def test_check_cloudflared_returns_valid_structure():
"""Test _check_cloudflared returns a valid result structure."""
result = doctor._check_cloudflared()
assert 'status' in result
assert result['status'] in ('ok', 'missing')
assert 'message' in result
# If available, should have details
if result['status'] == 'ok':
assert 'available' in result['message'].lower() or 'cloudflared' in result['message'].lower()
@pytest.mark.asyncio
async def test_check_network_returns_valid_structure():
"""Test _check_network returns a valid result structure."""
result = await doctor._check_network()
assert 'status' in result
assert result['status'] in ('ok', 'warning')
assert 'message' in result
def test_summarize_checks_all_ok():
"""Test _summarize_checks when all checks pass."""
checks = {
'check1': {'status': 'ok'},
'check2': {'status': 'ok'},
'check3': {'status': 'ok'},
}
summary = doctor._summarize_checks(checks)
assert '3/3' in summary
def test_summarize_checks_mixed():
"""Test _summarize_checks with mixed results."""
checks = {
'check1': {'status': 'ok'},
'check2': {'status': 'warning'},
'check3': {'status': 'missing'},
}
summary = doctor._summarize_checks(checks)
assert '1/3' in summary
assert '1 warning' in summary
assert '1 missing' in summary
def test_summarize_checks_with_errors():
"""Test _summarize_checks with errors."""
checks = {
'check1': {'status': 'ok'},
'check2': {'status': 'error'},
}
summary = doctor._summarize_checks(checks)
assert '1/2' in summary
assert '1 error' in summary
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_doctor_command.py",
"license": "MIT License",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_install_config.py | """Tests for install configuration module."""
import json
from pathlib import Path
from unittest.mock import patch
import pytest
class TestInstallConfig:
"""Tests for browser_use.skill_cli.install_config module."""
@pytest.fixture
def temp_config_dir(self, tmp_path: Path):
"""Create a temporary config directory and patch CONFIG_PATH."""
config_path = tmp_path / 'install-config.json'
with patch('browser_use.skill_cli.install_config.CONFIG_PATH', config_path):
yield config_path
def test_get_config_default_all_modes(self, temp_config_dir: Path):
"""If no config file, all modes available (pip install users)."""
from browser_use.skill_cli.install_config import get_config
# Config file doesn't exist
assert not temp_config_dir.exists()
config = get_config()
assert config['installed_modes'] == ['chromium', 'real', 'remote']
assert config['default_mode'] == 'chromium'
def test_get_config_reads_existing_file(self, temp_config_dir: Path):
"""Config is read from existing file."""
from browser_use.skill_cli.install_config import get_config
# Create config file with remote-only mode
temp_config_dir.parent.mkdir(parents=True, exist_ok=True)
temp_config_dir.write_text(json.dumps({'installed_modes': ['remote'], 'default_mode': 'remote'}))
config = get_config()
assert config['installed_modes'] == ['remote']
assert config['default_mode'] == 'remote'
def test_get_config_handles_corrupt_file(self, temp_config_dir: Path):
"""Corrupt config file returns default."""
from browser_use.skill_cli.install_config import get_config
# Create corrupt config file
temp_config_dir.parent.mkdir(parents=True, exist_ok=True)
temp_config_dir.write_text('not valid json {{{')
config = get_config()
# Should return default
assert config['installed_modes'] == ['chromium', 'real', 'remote']
assert config['default_mode'] == 'chromium'
def test_save_config_creates_file(self, temp_config_dir: Path):
"""save_config creates the config file."""
from browser_use.skill_cli.install_config import save_config
assert not temp_config_dir.exists()
save_config(['remote'], 'remote')
assert temp_config_dir.exists()
config = json.loads(temp_config_dir.read_text())
assert config['installed_modes'] == ['remote']
assert config['default_mode'] == 'remote'
def test_save_config_creates_parent_directories(self, tmp_path: Path):
"""save_config creates parent directories if needed."""
from browser_use.skill_cli.install_config import save_config
nested_path = tmp_path / 'deep' / 'nested' / 'install-config.json'
with patch('browser_use.skill_cli.install_config.CONFIG_PATH', nested_path):
save_config(['chromium', 'real'], 'chromium')
assert nested_path.exists()
def test_is_mode_available_remote_only(self, temp_config_dir: Path):
"""Config with only remote mode blocks local modes."""
from browser_use.skill_cli.install_config import is_mode_available, save_config
save_config(['remote'], 'remote')
assert is_mode_available('remote') is True
assert is_mode_available('chromium') is False
assert is_mode_available('real') is False
def test_is_mode_available_local_only(self, temp_config_dir: Path):
"""Config with only local modes blocks remote mode."""
from browser_use.skill_cli.install_config import is_mode_available, save_config
save_config(['chromium', 'real'], 'chromium')
assert is_mode_available('chromium') is True
assert is_mode_available('real') is True
assert is_mode_available('remote') is False
def test_is_mode_available_full_install(self, temp_config_dir: Path):
"""Config with all modes allows everything."""
from browser_use.skill_cli.install_config import is_mode_available, save_config
save_config(['chromium', 'real', 'remote'], 'chromium')
assert is_mode_available('chromium') is True
assert is_mode_available('real') is True
assert is_mode_available('remote') is True
def test_is_mode_available_local_modes_linked(self, temp_config_dir: Path):
"""If chromium is installed, real is also available (and vice versa)."""
from browser_use.skill_cli.install_config import is_mode_available, save_config
# Only chromium in the list, but real should also work
save_config(['chromium'], 'chromium')
assert is_mode_available('chromium') is True
assert is_mode_available('real') is True # Linked to chromium
# Only real in the list
save_config(['real'], 'real')
assert is_mode_available('chromium') is True # Linked to real
assert is_mode_available('real') is True
def test_get_default_mode(self, temp_config_dir: Path):
"""Default --browser value comes from config."""
from browser_use.skill_cli.install_config import get_default_mode, save_config
# Remote-only install
save_config(['remote'], 'remote')
assert get_default_mode() == 'remote'
# Local-only install
save_config(['chromium', 'real'], 'chromium')
assert get_default_mode() == 'chromium'
def test_get_available_modes(self, temp_config_dir: Path):
"""get_available_modes returns list from config."""
from browser_use.skill_cli.install_config import get_available_modes, save_config
save_config(['remote'], 'remote')
assert get_available_modes() == ['remote']
save_config(['chromium', 'real', 'remote'], 'chromium')
assert get_available_modes() == ['chromium', 'real', 'remote']
def test_get_mode_unavailable_error_message(self, temp_config_dir: Path):
"""Clear error when requesting unavailable mode."""
from browser_use.skill_cli.install_config import get_mode_unavailable_error, save_config
save_config(['remote'], 'remote')
error = get_mode_unavailable_error('chromium')
assert 'chromium' in error
assert 'not installed' in error.lower()
assert 'remote' in error # Shows available modes
assert '--full' in error # Shows reinstall instructions
def test_no_config_file_means_all_modes_available(self, temp_config_dir: Path):
"""pip install users (no config file) have all modes available."""
from browser_use.skill_cli.install_config import (
get_available_modes,
get_default_mode,
is_mode_available,
)
# Ensure no config exists
assert not temp_config_dir.exists()
# All modes should be available
assert is_mode_available('chromium') is True
assert is_mode_available('real') is True
assert is_mode_available('remote') is True
# Default should be chromium
assert get_default_mode() == 'chromium'
# All modes should be in the list
assert get_available_modes() == ['chromium', 'real', 'remote']
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_install_config.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_markdown_chunking.py | """Tests for structure-aware markdown chunking."""
from markdownify import markdownify as md
from pytest_httpserver import HTTPServer
from browser_use.dom.markdown_extractor import chunk_markdown_by_structure
# ---------------------------------------------------------------------------
# Unit tests — synchronous, no browser needed
# ---------------------------------------------------------------------------
class TestChunkMarkdownBasic:
"""Basic chunking behaviour."""
def test_short_content_single_chunk(self):
content = '# Hello\n\nSome short content.'
chunks = chunk_markdown_by_structure(content, max_chunk_chars=100_000)
assert len(chunks) == 1
assert chunks[0].content == content
assert chunks[0].chunk_index == 0
assert chunks[0].total_chunks == 1
assert chunks[0].has_more is False
def test_empty_content(self):
chunks = chunk_markdown_by_structure('', max_chunk_chars=100)
assert len(chunks) == 1
assert chunks[0].content == ''
assert chunks[0].has_more is False
def test_chunk_offsets_cover_full_content(self):
"""Chunk offsets should cover the entire original content without gaps."""
content = '# Header\n\nParagraph one.\n\n# Header 2\n\nParagraph two.'
chunks = chunk_markdown_by_structure(content, max_chunk_chars=20)
# Verify no gaps between consecutive chunks
for i in range(1, len(chunks)):
assert chunks[i].char_offset_start == chunks[i - 1].char_offset_end, (
f'Gap between chunk {i - 1} end ({chunks[i - 1].char_offset_end}) '
f'and chunk {i} start ({chunks[i].char_offset_start})'
)
# First chunk starts at 0
assert chunks[0].char_offset_start == 0
# Last chunk ends at content length
assert chunks[-1].char_offset_end == len(content)
class TestChunkMarkdownHeaders:
"""Header boundary splitting."""
def test_splits_at_header_boundary(self):
"""Chunks should prefer splitting at header boundaries."""
section_a = '# Section A\n\n' + 'x' * 50
section_b = '\n\n# Section B\n\n' + 'y' * 50
content = section_a + section_b
# Set limit so section_a fits but section_a + section_b doesn't
chunks = chunk_markdown_by_structure(content, max_chunk_chars=len(section_a) + 5)
assert len(chunks) >= 2
# First chunk should contain Section A header
assert '# Section A' in chunks[0].content
# Second chunk should start with or contain Section B header
assert '# Section B' in chunks[1].content
class TestChunkMarkdownHeaderPreferred:
"""Header-preferred splitting ensures chunks start at semantic boundaries."""
def test_header_preferred_split_moves_header_to_next_chunk(self):
"""When a header sits in the middle of an overflowing chunk, split before it."""
# Build content: para_a (big) + header_b + para_b (small)
para_a = 'A' * 600
header_b = '# Section B'
para_b = 'B' * 100
content = f'{para_a}\n\n{header_b}\n\n{para_b}'
# Limit forces a split; header is near end of first chunk
chunks = chunk_markdown_by_structure(content, max_chunk_chars=700)
assert len(chunks) >= 2
# The header should be the START of the second chunk, not the end of the first
assert chunks[1].content.lstrip().startswith('# Section B')
# First chunk should NOT contain the header
assert '# Section B' not in chunks[0].content
def test_header_preferred_split_doesnt_create_tiny_chunks(self):
"""Don't split at a header that would make the prefix chunk < 50% of limit."""
header_a = '# Section A'
para_a = 'A' * 30 # very small before header
header_b = '# Section B'
para_b = 'B' * 600
content = f'{header_a}\n\n{para_a}\n\n{header_b}\n\n{para_b}'
# With a limit of 700, header_b is near the start — splitting there would
# leave a tiny prefix chunk. The algo should NOT split there.
chunks = chunk_markdown_by_structure(content, max_chunk_chars=700)
# First chunk should contain both headers (no tiny split)
assert '# Section A' in chunks[0].content
assert '# Section B' in chunks[0].content
class TestChunkMarkdownCodeFence:
"""Code fence blocks never split."""
def test_code_fence_not_split(self):
code_block = '```python\n' + 'x = 1\n' * 100 + '```'
content = '# Title\n\n' + code_block + '\n\n# Footer\n\nDone.'
# Limit smaller than the code block — it should still stay in one chunk (soft limit)
chunks = chunk_markdown_by_structure(content, max_chunk_chars=50)
# Find the chunk containing the code block
code_chunks = [c for c in chunks if '```python' in c.content and '```' in c.content.split('```python')[1]]
assert len(code_chunks) >= 1, 'Code fence should appear intact in at least one chunk'
def test_unclosed_code_fence(self):
"""Unclosed code fence should still be kept as one block."""
content = '# Title\n\n```python\nx = 1\ny = 2'
chunks = chunk_markdown_by_structure(content, max_chunk_chars=100_000)
assert len(chunks) == 1
assert '```python' in chunks[0].content
assert 'y = 2' in chunks[0].content
class TestChunkMarkdownTable:
"""Table rows never split mid-row."""
def test_table_not_split_mid_row(self):
header = '| Name | Value |'
separator = '| --- | --- |'
rows = [f'| item{i} | val{i} |' for i in range(50)]
table = '\n'.join([header, separator] + rows)
content = '# Data\n\n' + table
# Use a limit that would fall in the middle of the table
chunks = chunk_markdown_by_structure(content, max_chunk_chars=200)
for chunk in chunks:
lines = chunk.content.split('\n')
for line in lines:
stripped = line.strip()
if stripped.startswith('|') and stripped.endswith('|'):
# Each table row line should be complete (start and end with |)
assert stripped.count('|') >= 3, f'Incomplete table row: {stripped}'
def test_table_header_in_overlap_for_continuation(self):
"""When a table spans multiple chunks, the header should be in the overlap prefix."""
header = '| Col1 | Col2 |'
separator = '| --- | --- |'
rows = [f'| r{i} | d{i} |' for i in range(100)]
table = '\n'.join([header, separator] + rows)
content = table
# Force split within the table
chunks = chunk_markdown_by_structure(content, max_chunk_chars=300)
if len(chunks) > 1:
# Second chunk should have table header in overlap
assert '| Col1 | Col2 |' in chunks[1].overlap_prefix
assert '| --- | --- |' in chunks[1].overlap_prefix
def test_table_header_carried_across_three_plus_chunks(self):
"""Table header must persist in overlap for ALL continuation chunks, not just the second."""
header = '| Col1 | Col2 |'
separator = '| --- | --- |'
rows = [f'| row{i} | data{i} |' for i in range(200)]
table = '\n'.join([header, separator] + rows)
content = table
# Force many small chunks
chunks = chunk_markdown_by_structure(content, max_chunk_chars=200)
assert len(chunks) >= 3, f'Expected >=3 chunks, got {len(chunks)}'
# Every chunk after the first should carry the table header in its overlap
for i in range(1, len(chunks)):
assert '| Col1 | Col2 |' in chunks[i].overlap_prefix, f'Chunk {i} missing table header in overlap'
assert '| --- | --- |' in chunks[i].overlap_prefix, f'Chunk {i} missing table separator in overlap'
class TestChunkMarkdownListItems:
"""List item continuations stay together."""
def test_list_items_not_split(self):
items = '\n'.join([f'- Item {i} with some description text' for i in range(50)])
content = '# List\n\n' + items
chunks = chunk_markdown_by_structure(content, max_chunk_chars=200)
for chunk in chunks:
lines = chunk.content.split('\n')
for line in lines:
stripped = line.strip()
if stripped.startswith('- '):
# Each list item should be a complete item
assert 'Item' in stripped
class TestChunkMarkdownStartFromChar:
"""start_from_char parameter returns correct chunk."""
def test_start_from_char_returns_correct_chunk(self):
section_a = '# A\n\nContent A here.'
section_b = '\n\n# B\n\nContent B here.'
content = section_a + section_b
# Chunk at header boundaries
all_chunks = chunk_markdown_by_structure(content, max_chunk_chars=len(section_a) + 5)
if len(all_chunks) > 1:
# Request from char offset within second chunk
mid = all_chunks[1].char_offset_start + 1
filtered = chunk_markdown_by_structure(content, max_chunk_chars=len(section_a) + 5, start_from_char=mid)
assert len(filtered) >= 1
assert filtered[0].chunk_index == all_chunks[1].chunk_index
def test_start_from_char_past_end_returns_empty(self):
content = '# Hello\n\nWorld.'
chunks = chunk_markdown_by_structure(content, max_chunk_chars=100_000, start_from_char=99999)
assert chunks == []
def test_start_from_char_zero_returns_all(self):
content = '# Hello\n\nWorld.'
chunks = chunk_markdown_by_structure(content, max_chunk_chars=100_000, start_from_char=0)
assert len(chunks) == 1
class TestChunkMarkdownOverlap:
"""Overlap lines carry context."""
def test_overlap_lines_carry_context(self):
lines_content = '\n'.join([f'Line {i}' for i in range(100)])
content = lines_content
chunks = chunk_markdown_by_structure(content, max_chunk_chars=200, overlap_lines=3)
if len(chunks) > 1:
# Second chunk should have overlap from first chunk
assert chunks[1].overlap_prefix != ''
# Overlap should contain lines from the end of the previous chunk
overlap_lines = chunks[1].overlap_prefix.split('\n')
assert len(overlap_lines) <= 3 + 2 # some flexibility for table headers etc.
def test_no_overlap_on_first_chunk(self):
content = '# A\n\nSome content.\n\n# B\n\nMore content.'
chunks = chunk_markdown_by_structure(content, max_chunk_chars=25)
assert chunks[0].overlap_prefix == ''
class TestChunkMarkdownMixed:
"""Mixed content scenarios."""
def test_paragraph_splitting(self):
"""Paragraphs separated by blank lines are separate blocks."""
p1 = 'First paragraph with text.'
p2 = 'Second paragraph with more text.'
content = f'{p1}\n\n{p2}'
chunks = chunk_markdown_by_structure(content, max_chunk_chars=30)
# Should produce multiple chunks
assert len(chunks) >= 2
def test_single_oversized_block_allowed(self):
"""A single block bigger than max_chunk_chars is allowed (soft limit)."""
big_para = 'x' * 200
content = big_para
chunks = chunk_markdown_by_structure(content, max_chunk_chars=50)
assert len(chunks) == 1
assert chunks[0].content == big_para
# ---------------------------------------------------------------------------
# HTML → markdown → chunk pipeline tests
# ---------------------------------------------------------------------------
class TestHTMLToMarkdownChunking:
"""End-to-end: HTML table → markdown → chunks."""
def test_large_table_produces_valid_chunks(self):
"""200-row HTML table → markdown → chunks should produce valid table rows in every chunk."""
rows = ''.join(f'<tr><td>Row {i}</td><td>Val {i}</td></tr>' for i in range(200))
html = f'<table><thead><tr><th>Name</th><th>Value</th></tr></thead><tbody>{rows}</tbody></table>'
markdown = md(html, heading_style='ATX')
chunks = chunk_markdown_by_structure(markdown, max_chunk_chars=500)
assert len(chunks) > 1, 'Should produce multiple chunks for 200 rows'
for chunk in chunks:
lines = chunk.content.strip().split('\n')
for line in lines:
s = line.strip()
if s.startswith('|') and s.endswith('|'):
# Every table line should have consistent column count
assert s.count('|') >= 3
def test_table_without_thead_normalization(self):
"""Table with <th> in first <tr> but no <thead> should still produce proper markdown."""
html = '<table><tr><th>A</th><th>B</th></tr><tr><td>1</td><td>2</td></tr><tr><td>3</td><td>4</td></tr></table>'
markdown = md(html, heading_style='ATX')
# Verify markdownify produced a proper table (with separator row)
assert '---' in markdown or '| A |' in markdown
# ---------------------------------------------------------------------------
# Integration tests — require browser + httpserver
# ---------------------------------------------------------------------------
class TestTableNormalizationIntegration:
"""Integration tests using browser session and httpserver."""
async def test_table_without_thead_normalized_via_serializer(self, browser_session, httpserver: HTTPServer):
"""Tables without <thead> should get normalized by HTMLSerializer during extraction."""
html = """
<html><body>
<table>
<tr><th>Header1</th><th>Header2</th></tr>
<tr><td>data1</td><td>data2</td></tr>
<tr><td>data3</td><td>data4</td></tr>
</table>
</body></html>
"""
httpserver.expect_request('/table-test').respond_with_data(html, content_type='text/html')
url = httpserver.url_for('/table-test')
await browser_session.navigate_to(url)
from browser_use.dom.markdown_extractor import extract_clean_markdown
content, _ = await extract_clean_markdown(browser_session=browser_session)
# Should have proper markdown table with separator
assert '|' in content
# The header should be present
assert 'Header1' in content
assert 'Header2' in content
async def test_large_table_extraction_preserves_structure(self, browser_session, httpserver: HTTPServer):
"""Large table extraction should produce structure-aware chunks."""
rows = ''.join(f'<tr><td>Name{i}</td><td>Value{i}</td></tr>' for i in range(300))
html = f"""
<html><body>
<table>
<tr><th>Name</th><th>Value</th></tr>
{rows}
</table>
</body></html>
"""
httpserver.expect_request('/big-table').respond_with_data(html, content_type='text/html')
url = httpserver.url_for('/big-table')
await browser_session.navigate_to(url)
from browser_use.dom.markdown_extractor import extract_clean_markdown
content, _ = await extract_clean_markdown(browser_session=browser_session)
# Chunk with a small limit to force multiple chunks
chunks = chunk_markdown_by_structure(content, max_chunk_chars=2000)
# Should produce multiple chunks
assert len(chunks) > 1
# Each chunk should have complete table rows
for chunk in chunks:
for line in chunk.content.split('\n'):
s = line.strip()
if s.startswith('|') and s.endswith('|'):
assert s.count('|') >= 3, f'Incomplete table row: {s}'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_markdown_chunking.py",
"license": "MIT License",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_multi_act_guards.py | """
Tests for multi_act() page-change guards.
Verifies:
1. Metadata: terminates_sequence flags are set correctly on built-in actions
2. Static guard: actions tagged terminates_sequence abort remaining queued actions
3. Runtime guard: URL/focus changes detected after click-on-link abort remaining actions
4. Safe chain: multiple inputs execute without interruption
Usage:
uv run pytest tests/ci/test_multi_act_guards.py -v -s
"""
import asyncio
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.service import Agent
from browser_use.browser import BrowserSession
from browser_use.browser.profile import BrowserProfile
from browser_use.tools.service import Tools
from tests.ci.conftest import create_mock_llm
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture(scope='session')
def http_server():
"""Test HTTP server with pages for guard tests."""
server = HTTPServer()
server.start()
server.expect_request('/form').respond_with_data(
"""<html><head><title>Form Page</title></head><body>
<h1>Form</h1>
<input id="field1" type="text" placeholder="Field 1" />
<input id="field2" type="text" placeholder="Field 2" />
<input id="field3" type="text" placeholder="Field 3" />
<button id="submit" type="submit">Submit</button>
</body></html>""",
content_type='text/html',
)
server.expect_request('/page_a').respond_with_data(
"""<html><head><title>Page A</title></head><body>
<h1>Page A</h1>
<a id="link_b" href="/page_b">Go to Page B</a>
</body></html>""",
content_type='text/html',
)
server.expect_request('/page_b').respond_with_data(
"""<html><head><title>Page B</title></head><body>
<h1>Page B</h1>
<p>You arrived at Page B</p>
</body></html>""",
content_type='text/html',
)
server.expect_request('/static').respond_with_data(
"""<html><head><title>Static Page</title></head><body>
<h1>Static</h1>
<p>Nothing changes here</p>
<input id="safe_input" type="text" />
</body></html>""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await session.start()
yield session
await session.kill()
await session.event_bus.stop(clear=True, timeout=5)
@pytest.fixture(scope='function')
def tools():
return Tools()
# ---------------------------------------------------------------------------
# 1. Metadata tests — verify terminates_sequence flags
# ---------------------------------------------------------------------------
class TestTerminatesSequenceMetadata:
"""Verify that built-in actions have correct terminates_sequence flags."""
def test_navigate_terminates(self, tools):
action = tools.registry.registry.actions.get('navigate')
assert action is not None
assert action.terminates_sequence is True
def test_search_terminates(self, tools):
action = tools.registry.registry.actions.get('search')
assert action is not None
assert action.terminates_sequence is True
def test_go_back_terminates(self, tools):
action = tools.registry.registry.actions.get('go_back')
assert action is not None
assert action.terminates_sequence is True
def test_switch_terminates(self, tools):
action = tools.registry.registry.actions.get('switch')
assert action is not None
assert action.terminates_sequence is True
def test_click_does_not_terminate(self, tools):
action = tools.registry.registry.actions.get('click')
assert action is not None
assert action.terminates_sequence is False
def test_input_does_not_terminate(self, tools):
action = tools.registry.registry.actions.get('input')
assert action is not None
assert action.terminates_sequence is False
def test_scroll_does_not_terminate(self, tools):
action = tools.registry.registry.actions.get('scroll')
assert action is not None
assert action.terminates_sequence is False
def test_extract_does_not_terminate(self, tools):
action = tools.registry.registry.actions.get('extract')
assert action is not None
assert action.terminates_sequence is False
def test_evaluate_terminates(self, tools):
"""evaluate() can mutate the DOM in unpredictable ways (e.g. dismiss cookie overlays),
so any actions queued after it should be skipped to avoid stale element references."""
action = tools.registry.registry.actions.get('evaluate')
assert action is not None
assert action.terminates_sequence is True
# ---------------------------------------------------------------------------
# 2. Static guard — navigate as non-last action skips remaining
# ---------------------------------------------------------------------------
class TestStaticGuard:
"""Verify that terminates_sequence actions abort the remaining queue."""
async def test_navigate_aborts_remaining_actions(self, browser_session, base_url, tools):
"""When navigate is action 2/3, action 3 should never execute."""
# Start on a known page
await tools.navigate(url=f'{base_url}/static', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
# Build action models: [scroll_down, navigate_to_page_a, scroll_down]
ActionModel = tools.registry.create_action_model()
actions = [
ActionModel.model_validate({'scroll': {'down': True, 'pages': 1}}),
ActionModel.model_validate({'navigate': {'url': f'{base_url}/page_a'}}),
ActionModel.model_validate({'scroll': {'down': True, 'pages': 1}}),
]
mock_llm = create_mock_llm()
agent = Agent(task='test', llm=mock_llm, browser_session=browser_session, tools=tools)
results = await agent.multi_act(actions)
# Should have executed exactly 2 actions (scroll + navigate), third skipped
assert len(results) == 2, f'Expected 2 results but got {len(results)}: {results}'
# Verify we actually navigated
url = await browser_session.get_current_page_url()
assert '/page_a' in url
async def test_go_back_aborts_remaining_actions(self, browser_session, base_url, tools):
"""go_back should abort remaining queued actions."""
# Navigate to page_a then page_b so go_back has somewhere to go
await tools.navigate(url=f'{base_url}/page_a', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await tools.navigate(url=f'{base_url}/page_b', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
ActionModel = tools.registry.create_action_model()
actions = [
ActionModel.model_validate({'go_back': {}}),
ActionModel.model_validate({'scroll': {'down': True, 'pages': 1}}),
]
mock_llm = create_mock_llm()
agent = Agent(task='test', llm=mock_llm, browser_session=browser_session, tools=tools)
results = await agent.multi_act(actions)
# go_back should terminate the sequence — only 1 result
assert len(results) == 1, f'Expected 1 result but got {len(results)}: {results}'
# ---------------------------------------------------------------------------
# 3. Runtime guard — click on link changes URL, remaining actions skipped
# ---------------------------------------------------------------------------
class TestRuntimeGuard:
"""Verify that URL/focus changes detected at runtime abort remaining actions."""
async def test_click_link_aborts_remaining(self, browser_session, base_url, tools):
"""Click a link that navigates to another page — remaining actions skipped."""
await tools.navigate(url=f'{base_url}/page_a', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
# Get the selector map to find the link index
state = await browser_session.get_browser_state_summary()
assert state.dom_state is not None
selector_map = state.dom_state.selector_map
# Find the link element (a#link_b)
link_index = None
for idx, element in selector_map.items():
if hasattr(element, 'tag_name') and element.tag_name == 'a':
link_index = idx
break
assert link_index is not None, 'Could not find link element in selector map'
ActionModel = tools.registry.create_action_model()
actions = [
ActionModel.model_validate({'click': {'index': link_index}}),
ActionModel.model_validate({'scroll': {'down': True, 'pages': 1}}),
ActionModel.model_validate({'scroll': {'down': True, 'pages': 1}}),
]
mock_llm = create_mock_llm()
agent = Agent(task='test', llm=mock_llm, browser_session=browser_session, tools=tools)
results = await agent.multi_act(actions)
# Click navigated to page_b — runtime guard should stop at 1
assert len(results) == 1, f'Expected 1 result but got {len(results)}: {results}'
# Verify we're on page_b
url = await browser_session.get_current_page_url()
assert '/page_b' in url
# ---------------------------------------------------------------------------
# 4. Safe chain — multiple non-page-changing actions all execute
# ---------------------------------------------------------------------------
class TestSafeChain:
"""Verify that non-page-changing actions execute without interruption."""
async def test_multiple_scrolls_all_execute(self, browser_session, base_url, tools):
"""Multiple scroll actions should all execute."""
await tools.navigate(url=f'{base_url}/static', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
ActionModel = tools.registry.create_action_model()
actions = [
ActionModel.model_validate({'scroll': {'down': True, 'pages': 0.5}}),
ActionModel.model_validate({'scroll': {'down': True, 'pages': 0.5}}),
ActionModel.model_validate({'scroll': {'down': False, 'pages': 0.5}}),
]
mock_llm = create_mock_llm()
agent = Agent(task='test', llm=mock_llm, browser_session=browser_session, tools=tools)
results = await agent.multi_act(actions)
# All 3 scrolls should execute
assert len(results) == 3, f'Expected 3 results but got {len(results)}: {results}'
# None should have errors
for r in results:
assert r.error is None, f'Unexpected error: {r.error}'
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_multi_act_guards.py",
"license": "MIT License",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_search_find.py | """Tests for search_page and find_elements actions."""
import asyncio
import pytest
from pytest_httpserver import HTTPServer
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.tools.service import Tools
# --- Fixtures ---
@pytest.fixture(scope='session')
def http_server():
"""Test HTTP server serving pages for search/find tests."""
server = HTTPServer()
server.start()
server.expect_request('/products').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Products</title></head>
<body>
<h1>Product Catalog</h1>
<div id="main">
<table class="products">
<thead>
<tr><th>Name</th><th>Price</th><th>Rating</th></tr>
</thead>
<tbody>
<tr class="product-row"><td>Widget A</td><td>$29.99</td><td>4.5 stars</td></tr>
<tr class="product-row"><td>Widget B</td><td>$49.99</td><td>4.2 stars</td></tr>
<tr class="product-row"><td>Gadget C</td><td>$19.50</td><td>3.8 stars</td></tr>
<tr class="product-row"><td>Gadget D</td><td>$99.00</td><td>4.9 stars</td></tr>
</tbody>
</table>
<div class="pagination">
<a href="/products?page=1" class="page-link active">1</a>
<a href="/products?page=2" class="page-link">2</a>
<a href="/products?page=3" class="page-link">3</a>
</div>
</div>
<footer id="footer">
<p>Best price guarantee on all items.</p>
<p>Contact us at support@example.com</p>
</footer>
</body>
</html>
""",
content_type='text/html',
)
server.expect_request('/articles').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Articles</title></head>
<body>
<article id="post-1">
<h2>Introduction to Python</h2>
<p>Python is a versatile programming language used in web development, data science, and automation.</p>
<a href="/articles/python" class="read-more">Read more</a>
</article>
<article id="post-2">
<h2>JavaScript for Beginners</h2>
<p>JavaScript powers the interactive web. Learn about DOM manipulation and event handling.</p>
<a href="/articles/javascript" class="read-more">Read more</a>
</article>
<article id="post-3">
<h2>Advanced CSS Techniques</h2>
<p>Master CSS Grid, Flexbox, and custom properties for modern web layouts.</p>
<a href="/articles/css" class="read-more">Read more</a>
</article>
</body>
</html>
""",
content_type='text/html',
)
server.expect_request('/empty').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Empty</title></head>
<body>
<div id="content"></div>
</body>
</html>
""",
content_type='text/html',
)
server.expect_request('/case-test').respond_with_data(
"""
<!DOCTYPE html>
<html>
<head><title>Case Test</title></head>
<body>
<p>The Quick Brown Fox jumps over the lazy dog.</p>
<p>QUICK BROWN FOX is an uppercase variant.</p>
<p>quick brown fox is a lowercase variant.</p>
</body>
</html>
""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
return f'http://{http_server.host}:{http_server.port}'
@pytest.fixture(scope='module')
async def browser_session():
session = BrowserSession(
browser_profile=BrowserProfile(
headless=True,
user_data_dir=None,
keep_alive=True,
)
)
await session.start()
yield session
await session.kill()
@pytest.fixture(scope='function')
def tools():
return Tools()
# --- Helper ---
async def _navigate_and_wait(tools, browser_session, url):
"""Navigate to URL and wait for page load."""
await tools.navigate(url=url, new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
# --- search_page tests ---
class TestSearchPage:
"""Tests for the search_page action."""
async def test_literal_text_search(self, tools, browser_session, base_url):
"""Literal text search finds matches with context."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.search_page(pattern='Widget A', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert 'Widget A' in result.extracted_content
assert '1 match' in result.extracted_content
async def test_regex_search_prices(self, tools, browser_session, base_url):
"""Regex search finds all price patterns on the page."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.search_page(pattern=r'\$\d+\.\d{2}', regex=True, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
# Should find $29.99, $49.99, $19.50, $99.00
assert '4 matches' in result.extracted_content
assert '$29.99' in result.extracted_content
assert '$49.99' in result.extracted_content
async def test_css_scope_limits_search(self, tools, browser_session, base_url):
"""css_scope limits search to elements within the selector."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
# Search only in footer
result = await tools.search_page(pattern='price', css_scope='#footer', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
# "Best price guarantee" is in footer
assert '1 match' in result.extracted_content
assert 'guarantee' in result.extracted_content
async def test_case_insensitive_default(self, tools, browser_session, base_url):
"""Search is case-insensitive by default."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/case-test')
result = await tools.search_page(pattern='quick brown fox', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
# Should match all three variants (Quick, QUICK, quick)
assert '3 matches' in result.extracted_content
async def test_case_sensitive(self, tools, browser_session, base_url):
"""case_sensitive=True restricts to exact case."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/case-test')
result = await tools.search_page(pattern='QUICK BROWN FOX', case_sensitive=True, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert '1 match' in result.extracted_content
async def test_max_results(self, tools, browser_session, base_url):
"""max_results limits the number of returned matches."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.search_page(pattern=r'\$\d+\.\d{2}', regex=True, max_results=2, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
# Total should still show 4, but only 2 are displayed
assert '4 matches' in result.extracted_content
assert 'Increase max_results' in result.extracted_content
async def test_no_matches(self, tools, browser_session, base_url):
"""No matches returns a clean message, not an error."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.search_page(pattern='xyznonexistent', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert 'No matches found' in result.extracted_content
async def test_element_path_in_results(self, tools, browser_session, base_url):
"""Matches include the element path for context."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.search_page(pattern='guarantee', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
# Should show element path containing footer
assert '(in' in result.extracted_content
async def test_invalid_css_scope(self, tools, browser_session, base_url):
"""Invalid css_scope returns a clear error."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.search_page(pattern='test', css_scope='#nonexistent-scope', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is not None
assert 'scope' in result.error.lower() or 'not found' in result.error.lower()
async def test_memory_set(self, tools, browser_session, base_url):
"""long_term_memory is set with match count summary."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.search_page(pattern='Widget', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.long_term_memory is not None
assert 'Widget' in result.long_term_memory
assert 'match' in result.long_term_memory
# --- find_elements tests ---
class TestFindElements:
"""Tests for the find_elements action."""
async def test_basic_selector(self, tools, browser_session, base_url):
"""Basic CSS selector returns correct elements."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.find_elements(selector='tr.product-row', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert '4 elements' in result.extracted_content
assert 'Widget A' in result.extracted_content
assert 'Gadget D' in result.extracted_content
async def test_attribute_extraction(self, tools, browser_session, base_url):
"""attributes parameter extracts specific attributes from elements."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.find_elements(
selector='a.page-link',
attributes=['href', 'class'],
browser_session=browser_session,
)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert '3 elements' in result.extracted_content
assert 'href=' in result.extracted_content
assert '/products?page=' in result.extracted_content
async def test_max_results_limiting(self, tools, browser_session, base_url):
"""max_results limits displayed elements while showing total count."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.find_elements(selector='tr.product-row', max_results=2, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert '4 elements' in result.extracted_content
assert 'Showing 2 of 4' in result.extracted_content
async def test_no_matching_elements(self, tools, browser_session, base_url):
"""No matches returns a clean message, not an error."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.find_elements(selector='div.nonexistent', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert 'No elements found' in result.extracted_content
async def test_invalid_selector(self, tools, browser_session, base_url):
"""Invalid CSS selector returns a clear error, not a crash."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.find_elements(selector='[[[invalid', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is not None
assert 'selector' in result.error.lower() or 'invalid' in result.error.lower()
async def test_include_text_false(self, tools, browser_session, base_url):
"""include_text=False omits text content from results."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/articles')
result = await tools.find_elements(selector='article', include_text=False, browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert '3 elements' in result.extracted_content
# Text content should not appear (no article body text)
# But the tag and children count should still be present
assert '<article>' in result.extracted_content
async def test_nested_selectors(self, tools, browser_session, base_url):
"""Nested CSS selectors (child combinator) work correctly."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/articles')
result = await tools.find_elements(
selector='article a.read-more',
attributes=['href'],
browser_session=browser_session,
)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert '3 elements' in result.extracted_content
assert '/articles/python' in result.extracted_content
assert '/articles/javascript' in result.extracted_content
assert '/articles/css' in result.extracted_content
async def test_children_count(self, tools, browser_session, base_url):
"""Elements show children count."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.find_elements(selector='table.products thead tr', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert '1 element' in result.extracted_content
# The header row has 3 <th> children
assert '3 children' in result.extracted_content
async def test_memory_set(self, tools, browser_session, base_url):
"""long_term_memory is set with element count summary."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/products')
result = await tools.find_elements(selector='tr.product-row', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.long_term_memory is not None
assert '4 element' in result.long_term_memory
async def test_empty_page(self, tools, browser_session, base_url):
"""Works on a nearly empty page without errors."""
await _navigate_and_wait(tools, browser_session, f'{base_url}/empty')
result = await tools.find_elements(selector='p', browser_session=browser_session)
assert isinstance(result, ActionResult)
assert result.error is None
assert result.extracted_content is not None
assert 'No elements found' in result.extracted_content
# --- Registration tests ---
class TestRegistration:
"""Test that new actions are properly registered."""
async def test_search_page_registered(self, tools):
"""search_page is in the default action registry."""
assert 'search_page' in tools.registry.registry.actions
async def test_find_elements_registered(self, tools):
"""find_elements is in the default action registry."""
assert 'find_elements' in tools.registry.registry.actions
async def test_excluded_actions(self):
"""New actions can be excluded via exclude_actions."""
excluded_tools = Tools(exclude_actions=['search_page', 'find_elements'])
assert 'search_page' not in excluded_tools.registry.registry.actions
assert 'find_elements' not in excluded_tools.registry.registry.actions
# Other actions still present
assert 'navigate' in excluded_tools.registry.registry.actions
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_search_find.py",
"license": "MIT License",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_server_locking.py | """Tests for server locking to prevent race conditions."""
import os
import subprocess
import sys
import tempfile
import time
from pathlib import Path
import portalocker
import pytest
from browser_use.skill_cli.utils import (
cleanup_session_files,
get_lock_path,
get_pid_path,
is_server_running,
is_session_locked,
kill_orphaned_server,
try_acquire_server_lock,
)
@pytest.fixture
def test_session():
"""Provide a unique test session name and cleanup after."""
session = f'test-lock-{os.getpid()}-{time.time_ns()}'
yield session
cleanup_session_files(session)
def test_lock_path_generation(test_session):
"""Test that lock path is generated correctly."""
path = get_lock_path(test_session)
assert path.parent == Path(tempfile.gettempdir())
assert path.name == f'browser-use-{test_session}.lock'
def test_try_acquire_server_lock_success(test_session):
"""Test acquiring lock when no one holds it."""
lock = try_acquire_server_lock(test_session)
assert lock is not None
# Should block second acquisition
lock2 = try_acquire_server_lock(test_session)
assert lock2 is None
# Release first lock
portalocker.unlock(lock)
lock.close()
# Now should succeed
lock3 = try_acquire_server_lock(test_session)
assert lock3 is not None
portalocker.unlock(lock3)
lock3.close()
def test_is_session_locked(test_session):
"""Test detecting if session is locked."""
# Initially not locked
assert is_session_locked(test_session) is False
# Acquire lock
lock = try_acquire_server_lock(test_session)
assert lock is not None
# Now should be locked
assert is_session_locked(test_session) is True
# Release
portalocker.unlock(lock)
lock.close()
# No longer locked
assert is_session_locked(test_session) is False
def test_kill_orphaned_server_no_pid_file(test_session):
"""Test that kill_orphaned_server returns False when no PID file."""
assert kill_orphaned_server(test_session) is False
def test_kill_orphaned_server_with_lock(test_session):
"""Test that kill_orphaned_server doesn't kill server holding lock."""
# Create PID file pointing to current process
pid_path = get_pid_path(test_session)
pid_path.write_text(str(os.getpid()))
# Acquire lock (simulating a healthy server)
lock = try_acquire_server_lock(test_session)
assert lock is not None
# Should not kill - server is healthy (has lock)
assert kill_orphaned_server(test_session) is False
portalocker.unlock(lock)
lock.close()
def test_cleanup_includes_lock_file(test_session):
"""Test that cleanup removes lock file."""
lock_path = get_lock_path(test_session)
pid_path = get_pid_path(test_session)
# Create files
lock_path.touch()
pid_path.write_text('12345')
assert lock_path.exists()
assert pid_path.exists()
cleanup_session_files(test_session)
assert not lock_path.exists()
assert not pid_path.exists()
def test_concurrent_lock_acquisition(test_session):
"""Test that only one process can hold the lock."""
lock_path = get_lock_path(test_session)
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.touch()
# Acquire lock in current process
lock = try_acquire_server_lock(test_session)
assert lock is not None
# Try to acquire in subprocess - should fail
result = subprocess.run(
[
sys.executable,
'-c',
f'''
import portalocker
from pathlib import Path
lock_path = Path("{lock_path}")
f = open(lock_path, 'r+')
try:
portalocker.lock(f, portalocker.LOCK_EX | portalocker.LOCK_NB)
print("ACQUIRED")
except portalocker.LockException:
print("BLOCKED")
f.close()
''',
],
capture_output=True,
text=True,
timeout=5,
)
assert 'BLOCKED' in result.stdout
# Release lock
portalocker.unlock(lock)
lock.close()
# Now subprocess should succeed
result = subprocess.run(
[
sys.executable,
'-c',
f'''
import portalocker
from pathlib import Path
lock_path = Path("{lock_path}")
f = open(lock_path, 'r+')
try:
portalocker.lock(f, portalocker.LOCK_EX | portalocker.LOCK_NB)
print("ACQUIRED")
portalocker.unlock(f)
except portalocker.LockException:
print("BLOCKED")
f.close()
''',
],
capture_output=True,
text=True,
timeout=5,
)
assert 'ACQUIRED' in result.stdout
def test_lock_released_on_process_death(test_session):
"""Test that lock is automatically released when process dies."""
lock_path = get_lock_path(test_session)
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.touch()
# Start subprocess that holds lock
proc = subprocess.Popen(
[
sys.executable,
'-c',
f'''
import portalocker
import time
from pathlib import Path
lock_path = Path("{lock_path}")
f = open(lock_path, 'r+')
portalocker.lock(f, portalocker.LOCK_EX | portalocker.LOCK_NB)
print("LOCKED", flush=True)
time.sleep(60) # Hold lock
''',
],
stdout=subprocess.PIPE,
text=True,
)
# Wait for lock acquisition
assert proc.stdout is not None
line = proc.stdout.readline()
assert 'LOCKED' in line
# Verify we can't acquire
lock = try_acquire_server_lock(test_session)
assert lock is None
# Kill the process
proc.terminate()
proc.wait(timeout=5)
# Small delay for OS to release lock
time.sleep(0.1)
# Now we should be able to acquire
lock = try_acquire_server_lock(test_session)
assert lock is not None
portalocker.unlock(lock)
lock.close()
def test_is_server_running_without_pid(test_session):
"""Test is_server_running returns False when no PID file."""
assert is_server_running(test_session) is False
def test_is_server_running_with_current_pid(test_session):
"""Test is_server_running returns True when PID file points to live process."""
pid_path = get_pid_path(test_session)
pid_path.write_text(str(os.getpid()))
assert is_server_running(test_session) is True
def test_is_server_running_with_dead_pid(test_session):
"""Test is_server_running returns False when PID file points to dead process."""
pid_path = get_pid_path(test_session)
# Use a PID that's very unlikely to exist
pid_path.write_text('999999999')
assert is_server_running(test_session) is False
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_server_locking.py",
"license": "MIT License",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_session_browser_mode.py | """Tests for session browser mode validation.
When a session is started with a specific browser mode (chromium, remote, real),
subsequent commands with a different mode should error with helpful guidance.
"""
import json
import tempfile
from pathlib import Path
from browser_use.skill_cli.main import get_session_metadata_path
def test_get_session_metadata_path():
"""Test that metadata path is generated correctly."""
path = get_session_metadata_path('default')
assert path.parent == Path(tempfile.gettempdir())
assert path.name == 'browser-use-default.meta'
def test_get_session_metadata_path_custom_session():
"""Test metadata path for custom session names."""
path = get_session_metadata_path('my-session')
assert path.name == 'browser-use-my-session.meta'
def test_metadata_file_format():
"""Test metadata file format matches expected structure."""
meta_path = get_session_metadata_path('test-format')
try:
# Write metadata as the code does
meta_path.write_text(
json.dumps(
{
'browser_mode': 'chromium',
'headed': False,
'profile': None,
}
)
)
# Read and verify
meta = json.loads(meta_path.read_text())
assert meta['browser_mode'] == 'chromium'
assert meta['headed'] is False
assert meta['profile'] is None
finally:
if meta_path.exists():
meta_path.unlink()
def test_metadata_file_remote_mode():
"""Test metadata file with remote browser mode."""
meta_path = get_session_metadata_path('test-remote')
try:
meta_path.write_text(
json.dumps(
{
'browser_mode': 'remote',
'headed': True,
'profile': 'cloud-profile-123',
}
)
)
meta = json.loads(meta_path.read_text())
assert meta['browser_mode'] == 'remote'
assert meta['headed'] is True
assert meta['profile'] == 'cloud-profile-123'
finally:
if meta_path.exists():
meta_path.unlink()
def test_metadata_cleanup():
"""Test that metadata file can be cleaned up."""
meta_path = get_session_metadata_path('test-cleanup')
meta_path.write_text(json.dumps({'browser_mode': 'chromium'}))
assert meta_path.exists()
# Cleanup
meta_path.unlink()
assert not meta_path.exists()
def test_mode_mismatch_remote_on_local_should_error():
"""Test that requesting remote on local session triggers error condition.
This is the problematic case: user wants cloud features (live_url) but
session is running locally. They would silently lose those features.
"""
meta_path = get_session_metadata_path('test-mismatch-error')
try:
# Simulate existing session with chromium (local) mode
meta_path.write_text(json.dumps({'browser_mode': 'chromium'}))
meta = json.loads(meta_path.read_text())
existing_mode = meta.get('browser_mode', 'chromium')
requested_mode = 'remote'
# This combination should trigger an error
should_error = requested_mode == 'remote' and existing_mode != 'remote'
assert should_error is True
finally:
if meta_path.exists():
meta_path.unlink()
def test_mode_mismatch_local_on_remote_should_allow():
"""Test that requesting local on remote session is allowed.
This case is fine: user gets a remote browser (more features than requested).
The remote session works just like a local one, just with extra features.
"""
meta_path = get_session_metadata_path('test-mismatch-allow')
try:
# Simulate existing session with remote mode
meta_path.write_text(json.dumps({'browser_mode': 'remote'}))
meta = json.loads(meta_path.read_text())
existing_mode = meta.get('browser_mode')
assert existing_mode == 'remote'
requested_mode = 'chromium' # Default mode when user doesn't specify --browser
# This combination should NOT trigger an error
# (user requested chromium, but session is remote - that's fine)
should_error = requested_mode == 'remote' and existing_mode != 'remote'
assert should_error is False
finally:
if meta_path.exists():
meta_path.unlink()
def test_mode_match_detection_logic():
"""Test that matching modes pass validation."""
meta_path = get_session_metadata_path('test-match')
try:
# Simulate existing session with chromium mode
meta_path.write_text(json.dumps({'browser_mode': 'chromium'}))
# Check match passes
meta = json.loads(meta_path.read_text())
existing_mode = meta.get('browser_mode', 'chromium')
requested_mode = 'chromium'
assert existing_mode == requested_mode
finally:
if meta_path.exists():
meta_path.unlink()
def test_different_sessions_independent():
"""Test that different session names are independent."""
session1_meta = get_session_metadata_path('session-a')
session2_meta = get_session_metadata_path('session-b')
try:
# Session A with chromium
session1_meta.write_text(json.dumps({'browser_mode': 'chromium'}))
# Session B with remote
session2_meta.write_text(json.dumps({'browser_mode': 'remote'}))
# Verify they are independent
meta1 = json.loads(session1_meta.read_text())
meta2 = json.loads(session2_meta.read_text())
assert meta1['browser_mode'] == 'chromium'
assert meta2['browser_mode'] == 'remote'
finally:
if session1_meta.exists():
session1_meta.unlink()
if session2_meta.exists():
session2_meta.unlink()
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_session_browser_mode.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_setup_command.py | """Tests for setup command.
These tests call real functions without mocking. They verify the
structure and logic of the setup command against actual system state.
"""
from browser_use.skill_cli.commands import setup
async def test_setup_local_mode():
"""Test setup with local mode runs without error."""
result = await setup.handle(
'setup',
{
'mode': 'local',
'api_key': None,
'yes': True,
'json': True,
},
)
# Should return a dict with expected structure
assert isinstance(result, dict)
# Either success or error, but should have a response
assert 'status' in result or 'error' in result
if 'status' in result:
assert result['status'] == 'success'
assert result['mode'] == 'local'
assert 'checks' in result
assert 'validation' in result
async def test_setup_remote_mode():
"""Test setup with remote mode runs without error."""
result = await setup.handle(
'setup',
{
'mode': 'remote',
'api_key': None,
'yes': True,
'json': True,
},
)
# Should return a dict with expected structure
assert isinstance(result, dict)
assert 'status' in result or 'error' in result
if 'status' in result:
assert result['status'] == 'success'
assert result['mode'] == 'remote'
assert 'checks' in result
assert 'validation' in result
async def test_setup_full_mode():
"""Test setup with full mode runs without error."""
result = await setup.handle(
'setup',
{
'mode': 'full',
'api_key': None,
'yes': True,
'json': True,
},
)
assert isinstance(result, dict)
assert 'status' in result or 'error' in result
if 'status' in result:
assert result['status'] == 'success'
assert result['mode'] == 'full'
async def test_setup_invalid_mode():
"""Test setup with invalid mode returns error."""
result = await setup.handle(
'setup',
{
'mode': 'invalid',
'api_key': None,
'yes': False,
'json': False,
},
)
assert 'error' in result
assert 'Invalid mode' in result['error']
async def test_run_checks_local():
"""Test run_checks returns expected structure for local mode."""
checks = await setup.run_checks('local')
assert isinstance(checks, dict)
assert 'browser_use_package' in checks
assert checks['browser_use_package']['status'] in ('ok', 'error')
# Local mode should check browser
assert 'browser' in checks
assert checks['browser']['status'] in ('ok', 'error')
# Local mode should NOT check api_key or cloudflared
assert 'api_key' not in checks
assert 'cloudflared' not in checks
async def test_run_checks_remote():
"""Test run_checks returns expected structure for remote mode."""
checks = await setup.run_checks('remote')
assert isinstance(checks, dict)
assert 'browser_use_package' in checks
# Remote mode should check api_key and cloudflared
assert 'api_key' in checks
assert checks['api_key']['status'] in ('ok', 'missing')
assert 'cloudflared' in checks
assert checks['cloudflared']['status'] in ('ok', 'missing')
# Remote mode should NOT check browser
assert 'browser' not in checks
async def test_run_checks_full():
"""Test run_checks returns expected structure for full mode."""
checks = await setup.run_checks('full')
assert isinstance(checks, dict)
# Full mode should check everything
assert 'browser_use_package' in checks
assert 'browser' in checks
assert 'api_key' in checks
assert 'cloudflared' in checks
def test_plan_actions_no_actions_needed():
"""Test plan_actions when everything is ok."""
checks = {
'browser_use_package': {'status': 'ok'},
'browser': {'status': 'ok'},
'api_key': {'status': 'ok'},
'cloudflared': {'status': 'ok'},
}
actions = setup.plan_actions(checks, 'local', yes=False, api_key=None)
assert actions == []
def test_plan_actions_install_browser():
"""Test plan_actions when browser needs installation."""
checks = {
'browser_use_package': {'status': 'ok'},
'browser': {'status': 'error'},
}
actions = setup.plan_actions(checks, 'local', yes=False, api_key=None)
assert any(a['type'] == 'install_browser' for a in actions)
def test_plan_actions_configure_api_key():
"""Test plan_actions when API key is provided."""
checks = {
'api_key': {'status': 'missing'},
}
actions = setup.plan_actions(checks, 'remote', yes=True, api_key='test_key')
assert any(a['type'] == 'configure_api_key' for a in actions)
def test_plan_actions_prompt_api_key():
"""Test plan_actions prompts for API key when missing and not --yes."""
checks = {
'api_key': {'status': 'missing'},
}
actions = setup.plan_actions(checks, 'remote', yes=False, api_key=None)
assert any(a['type'] == 'prompt_api_key' for a in actions)
def test_plan_actions_install_cloudflared():
"""Test plan_actions when cloudflared is missing."""
checks = {
'cloudflared': {'status': 'missing'},
}
actions = setup.plan_actions(checks, 'remote', yes=True, api_key=None)
assert any(a['type'] == 'install_cloudflared' for a in actions)
async def test_check_browser():
"""Test _check_browser returns valid structure."""
result = await setup._check_browser()
assert isinstance(result, dict)
assert 'status' in result
assert result['status'] in ('ok', 'error')
assert 'message' in result
async def test_validate_setup_local():
"""Test validate_setup returns expected structure for local mode."""
results = await setup.validate_setup('local')
assert isinstance(results, dict)
assert 'browser_use_import' in results
assert 'browser_available' in results
# Should not have remote-only checks
assert 'api_key_available' not in results
async def test_validate_setup_remote():
"""Test validate_setup returns expected structure for remote mode."""
results = await setup.validate_setup('remote')
assert isinstance(results, dict)
assert 'browser_use_import' in results
assert 'api_key_available' in results
assert 'cloudflared_available' in results
# Should not have local-only checks
assert 'browser_available' not in results
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_setup_command.py",
"license": "MIT License",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_structured_extraction.py | """Tests for schema-enforced structured extraction."""
import asyncio
import json
import tempfile
from unittest.mock import AsyncMock
import pytest
from pydantic import ValidationError
from pytest_httpserver import HTTPServer
from browser_use.agent.views import ActionResult
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.llm.views import ChatInvokeCompletion
from browser_use.tools.extraction.schema_utils import schema_dict_to_pydantic_model
from browser_use.tools.extraction.views import ExtractionResult
from browser_use.tools.service import Tools
# ---------------------------------------------------------------------------
# Unit tests: schema_dict_to_pydantic_model
# ---------------------------------------------------------------------------
class TestSchemaDictToPydanticModel:
"""Unit tests for the JSON-Schema → Pydantic model converter."""
def test_flat_object(self):
schema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'age': {'type': 'integer'},
},
'required': ['name', 'age'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(name='Alice', age=30)
assert instance.name == 'Alice' # type: ignore[attr-defined]
assert instance.age == 30 # type: ignore[attr-defined]
def test_nested_object(self):
schema = {
'type': 'object',
'properties': {
'person': {
'type': 'object',
'properties': {
'first': {'type': 'string'},
'last': {'type': 'string'},
},
'required': ['first'],
},
},
'required': ['person'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(person={'first': 'Bob', 'last': 'Smith'})
assert instance.person.first == 'Bob' # type: ignore[attr-defined]
def test_array_of_objects(self):
schema = {
'type': 'object',
'properties': {
'items': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'label': {'type': 'string'},
},
'required': ['id', 'label'],
},
},
},
'required': ['items'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(items=[{'id': 1, 'label': 'a'}, {'id': 2, 'label': 'b'}])
assert len(instance.items) == 2 # type: ignore[attr-defined]
assert instance.items[0].id == 1 # type: ignore[attr-defined]
def test_array_of_primitives(self):
schema = {
'type': 'object',
'properties': {
'tags': {'type': 'array', 'items': {'type': 'string'}},
},
'required': ['tags'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(tags=['a', 'b', 'c'])
assert instance.tags == ['a', 'b', 'c'] # type: ignore[attr-defined]
def test_enum_field(self):
schema = {
'type': 'object',
'properties': {
'status': {'type': 'string', 'enum': ['active', 'inactive']},
},
'required': ['status'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(status='active')
assert instance.status == 'active' # type: ignore[attr-defined]
def test_optional_enum_defaults_to_none(self):
"""Non-required enum fields default to None, not an out-of-set empty string."""
schema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'priority': {'type': 'string', 'enum': ['low', 'medium', 'high']},
},
'required': ['name'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(name='task1')
assert instance.priority is None # type: ignore[attr-defined]
# Serialized output must not contain an out-of-set value
dumped = instance.model_dump(mode='json')
assert dumped['priority'] is None
# When provided, value still works
instance2 = Model(name='task2', priority='high')
assert instance2.priority == 'high' # type: ignore[attr-defined]
def test_optional_fields_get_type_appropriate_defaults(self):
schema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'nickname': {'type': 'string'},
'score': {'type': 'number'},
'rank': {'type': 'integer'},
'active': {'type': 'boolean'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
},
'required': ['name'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(name='Alice')
assert instance.name == 'Alice' # type: ignore[attr-defined]
assert instance.nickname == '' # type: ignore[attr-defined]
assert instance.score == 0.0 # type: ignore[attr-defined]
assert instance.rank == 0 # type: ignore[attr-defined]
assert instance.active is False # type: ignore[attr-defined]
assert instance.tags == [] # type: ignore[attr-defined]
def test_optional_non_nullable_rejects_null(self):
"""Non-required fields that aren't nullable must reject explicit null."""
schema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'nickname': {'type': 'string'},
},
'required': ['name'],
}
Model = schema_dict_to_pydantic_model(schema)
with pytest.raises(ValidationError):
Model(name='Alice', nickname=None)
def test_optional_with_explicit_default(self):
schema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'color': {'type': 'string', 'default': 'blue'},
},
'required': ['name'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(name='Alice')
assert instance.color == 'blue' # type: ignore[attr-defined]
def test_optional_nested_object_defaults_to_none(self):
"""Non-required nested objects fall back to None since constructing a default is not feasible."""
schema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'address': {
'type': 'object',
'properties': {'city': {'type': 'string'}},
'required': ['city'],
},
},
'required': ['name'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(name='Alice')
assert instance.address is None # type: ignore[attr-defined]
def test_model_name_from_title(self):
schema = {
'title': 'ProductInfo',
'type': 'object',
'properties': {'sku': {'type': 'string'}},
'required': ['sku'],
}
Model = schema_dict_to_pydantic_model(schema)
assert Model.__name__ == 'ProductInfo'
def test_model_validate_json_roundtrip(self):
schema = {
'type': 'object',
'properties': {
'x': {'type': 'number'},
'y': {'type': 'boolean'},
},
'required': ['x', 'y'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(x=3.14, y=True)
raw = instance.model_dump_json()
restored = Model.model_validate_json(raw)
assert restored.x == instance.x # type: ignore[attr-defined]
assert restored.y == instance.y # type: ignore[attr-defined]
def test_rejects_ref(self):
schema = {
'type': 'object',
'properties': {'item': {'$ref': '#/$defs/Item'}},
'$defs': {'Item': {'type': 'object', 'properties': {'name': {'type': 'string'}}}},
}
with pytest.raises(ValueError, match='Unsupported JSON Schema keyword'):
schema_dict_to_pydantic_model(schema)
def test_rejects_allOf(self):
schema = {
'type': 'object',
'properties': {'x': {'allOf': [{'type': 'string'}]}},
}
with pytest.raises(ValueError, match='Unsupported JSON Schema keyword'):
schema_dict_to_pydantic_model(schema)
def test_rejects_non_object_toplevel(self):
with pytest.raises(ValueError, match='type "object"'):
schema_dict_to_pydantic_model({'type': 'array', 'items': {'type': 'string'}})
def test_rejects_empty_properties(self):
with pytest.raises(ValueError, match='at least one property'):
schema_dict_to_pydantic_model({'type': 'object', 'properties': {}})
def test_extra_fields_forbidden(self):
schema = {
'type': 'object',
'properties': {'name': {'type': 'string'}},
'required': ['name'],
}
Model = schema_dict_to_pydantic_model(schema)
with pytest.raises(ValidationError):
Model(name='ok', bogus='nope')
def test_nullable_field(self):
schema = {
'type': 'object',
'properties': {
'value': {'type': 'string', 'nullable': True},
},
'required': ['value'],
}
Model = schema_dict_to_pydantic_model(schema)
instance = Model(value=None)
assert instance.value is None # type: ignore[attr-defined]
def test_field_descriptions_preserved(self):
schema = {
'type': 'object',
'properties': {
'price': {'type': 'number', 'description': 'The price in USD'},
},
'required': ['price'],
}
Model = schema_dict_to_pydantic_model(schema)
field_info = Model.model_fields['price']
assert field_info.description == 'The price in USD'
# ---------------------------------------------------------------------------
# Unit tests: ExtractionResult
# ---------------------------------------------------------------------------
class TestExtractionResult:
def test_construction(self):
er = ExtractionResult(
data={'name': 'Alice'},
schema_used={'type': 'object', 'properties': {'name': {'type': 'string'}}},
)
assert er.data == {'name': 'Alice'}
assert er.is_partial is False
assert er.source_url is None
def test_serialization_roundtrip(self):
er = ExtractionResult(
data={'items': [1, 2]},
schema_used={'type': 'object', 'properties': {'items': {'type': 'array'}}},
is_partial=True,
source_url='http://example.com',
content_stats={'original_html_chars': 5000},
)
raw = er.model_dump_json()
restored = ExtractionResult.model_validate_json(raw)
assert restored == er
# ---------------------------------------------------------------------------
# Integration tests: extract action via Tools
# ---------------------------------------------------------------------------
def _make_extraction_llm(structured_response: dict | None = None, freetext_response: str = 'free text result') -> BaseChatModel:
"""Create a mock LLM that handles both structured and freetext extraction calls."""
llm = AsyncMock(spec=BaseChatModel)
llm.model = 'mock-extraction-llm'
llm._verified_api_keys = True
llm.provider = 'mock'
llm.name = 'mock-extraction-llm'
llm.model_name = 'mock-extraction-llm'
async def mock_ainvoke(messages, output_format=None, **kwargs):
if output_format is not None and structured_response is not None:
# Structured path: parse the dict through the model
instance = output_format.model_validate(structured_response)
return ChatInvokeCompletion(completion=instance, usage=None)
# Freetext path
return ChatInvokeCompletion(completion=freetext_response, usage=None)
llm.ainvoke.side_effect = mock_ainvoke
return llm
@pytest.fixture(scope='module')
async def browser_session():
session = BrowserSession(browser_profile=BrowserProfile(headless=True, user_data_dir=None, keep_alive=True))
await session.start()
yield session
await session.kill()
await session.event_bus.stop(clear=True, timeout=5)
@pytest.fixture(scope='session')
def http_server():
server = HTTPServer()
server.start()
server.expect_request('/products').respond_with_data(
"""<html><body>
<h1>Products</h1>
<ul>
<li>Widget A - $9.99</li>
<li>Widget B - $19.99</li>
</ul>
</body></html>""",
content_type='text/html',
)
yield server
server.stop()
@pytest.fixture(scope='session')
def base_url(http_server):
return f'http://{http_server.host}:{http_server.port}'
class TestExtractStructured:
"""Integration tests for the extract action's structured extraction path."""
async def test_structured_extraction_returns_json(self, browser_session, base_url):
"""When output_schema is provided, extract returns structured JSON in <structured_result> tags."""
tools = Tools()
await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
output_schema = {
'type': 'object',
'properties': {
'products': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'price': {'type': 'number'},
},
'required': ['name', 'price'],
},
},
},
'required': ['products'],
}
mock_data = {'products': [{'name': 'Widget A', 'price': 9.99}, {'name': 'Widget B', 'price': 19.99}]}
extraction_llm = _make_extraction_llm(structured_response=mock_data)
with tempfile.TemporaryDirectory() as tmp:
fs = FileSystem(tmp)
result = await tools.extract(
query='List all products with prices',
output_schema=output_schema,
browser_session=browser_session,
page_extraction_llm=extraction_llm,
file_system=fs,
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert '<structured_result>' in result.extracted_content
assert '</structured_result>' in result.extracted_content
# Parse the JSON out of the tags
start = result.extracted_content.index('<structured_result>') + len('<structured_result>')
end = result.extracted_content.index('</structured_result>')
parsed = json.loads(result.extracted_content[start:end].strip())
assert parsed == mock_data
# Metadata
assert result.metadata is not None
assert result.metadata['structured_extraction'] is True
meta = result.metadata['extraction_result']
assert meta['data'] == mock_data
assert meta['schema_used'] == output_schema
async def test_freetext_extraction_unchanged(self, browser_session, base_url):
"""When output_schema is None, extract returns free-text in <result> tags (backward compat)."""
tools = Tools()
await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
extraction_llm = _make_extraction_llm(freetext_response='Widget A costs $9.99, Widget B costs $19.99')
with tempfile.TemporaryDirectory() as tmp:
fs = FileSystem(tmp)
result = await tools.extract(
query='What products are listed?',
browser_session=browser_session,
page_extraction_llm=extraction_llm,
file_system=fs,
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert '<result>' in result.extracted_content
assert '</result>' in result.extracted_content
assert '<structured_result>' not in result.extracted_content
assert result.metadata is None
async def test_invalid_schema_falls_back_to_freetext(self, browser_session, base_url):
"""When output_schema contains unsupported keywords, fall back to free-text gracefully."""
tools = Tools()
await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
bad_schema = {
'type': 'object',
'properties': {'item': {'$ref': '#/$defs/Item'}},
'$defs': {'Item': {'type': 'object', 'properties': {'name': {'type': 'string'}}}},
}
extraction_llm = _make_extraction_llm(freetext_response='fallback text')
with tempfile.TemporaryDirectory() as tmp:
fs = FileSystem(tmp)
result = await tools.extract(
query='Get products',
output_schema=bad_schema,
browser_session=browser_session,
page_extraction_llm=extraction_llm,
file_system=fs,
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
# Should have used the free-text path
assert '<result>' in result.extracted_content
assert '<structured_result>' not in result.extracted_content
assert result.metadata is None
# ---------------------------------------------------------------------------
# Integration tests: extraction_schema injection via special parameter
# ---------------------------------------------------------------------------
PRODUCT_SCHEMA = {
'type': 'object',
'properties': {
'products': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'price': {'type': 'number'},
},
'required': ['name', 'price'],
},
},
},
'required': ['products'],
}
MOCK_PRODUCTS = {'products': [{'name': 'Widget A', 'price': 9.99}, {'name': 'Widget B', 'price': 19.99}]}
class TestExtractionSchemaInjection:
"""Tests that extraction_schema injected as a special parameter triggers structured extraction."""
async def test_injected_extraction_schema_triggers_structured_path(self, browser_session, base_url):
"""extraction_schema passed via act() triggers structured extraction even without output_schema in params."""
tools = Tools()
await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
extraction_llm = _make_extraction_llm(structured_response=MOCK_PRODUCTS)
with tempfile.TemporaryDirectory() as tmp:
fs = FileSystem(tmp)
result = await tools.extract(
query='List all products with prices',
browser_session=browser_session,
page_extraction_llm=extraction_llm,
file_system=fs,
extraction_schema=PRODUCT_SCHEMA,
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert '<structured_result>' in result.extracted_content
# Parse and verify JSON
start = result.extracted_content.index('<structured_result>') + len('<structured_result>')
end = result.extracted_content.index('</structured_result>')
parsed = json.loads(result.extracted_content[start:end].strip())
assert parsed == MOCK_PRODUCTS
assert result.metadata is not None
assert result.metadata['structured_extraction'] is True
async def test_output_schema_takes_precedence_over_extraction_schema(self, browser_session, base_url):
"""When the LLM provides output_schema in params, it should take precedence over injected extraction_schema."""
tools = Tools()
await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
# Different schema than the injected one — just a name list
param_schema = {
'type': 'object',
'properties': {
'names': {'type': 'array', 'items': {'type': 'string'}},
},
'required': ['names'],
}
param_response = {'names': ['Widget A', 'Widget B']}
extraction_llm = _make_extraction_llm(structured_response=param_response)
with tempfile.TemporaryDirectory() as tmp:
fs = FileSystem(tmp)
result = await tools.extract(
query='List product names',
output_schema=param_schema,
browser_session=browser_session,
page_extraction_llm=extraction_llm,
file_system=fs,
extraction_schema=PRODUCT_SCHEMA, # should be ignored
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert '<structured_result>' in result.extracted_content
start = result.extracted_content.index('<structured_result>') + len('<structured_result>')
end = result.extracted_content.index('</structured_result>')
parsed = json.loads(result.extracted_content[start:end].strip())
# Should match param_schema response, NOT PRODUCT_SCHEMA
assert parsed == param_response
assert result.metadata is not None
assert result.metadata['extraction_result']['schema_used'] == param_schema
async def test_no_schema_uses_freetext_path(self, browser_session, base_url):
"""When neither output_schema nor extraction_schema is provided, free-text path is used (backward compat)."""
tools = Tools()
await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
extraction_llm = _make_extraction_llm(freetext_response='Widget A costs $9.99')
with tempfile.TemporaryDirectory() as tmp:
fs = FileSystem(tmp)
result = await tools.extract(
query='What products are listed?',
browser_session=browser_session,
page_extraction_llm=extraction_llm,
file_system=fs,
# No extraction_schema, no output_schema
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert '<result>' in result.extracted_content
assert '<structured_result>' not in result.extracted_content
assert result.metadata is None
async def test_extraction_schema_threads_through_act(self, browser_session, base_url):
"""extraction_schema passed to act() reaches extract() via the registry's special parameter injection."""
tools = Tools()
await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.5)
extraction_llm = _make_extraction_llm(structured_response=MOCK_PRODUCTS)
with tempfile.TemporaryDirectory() as tmp:
fs = FileSystem(tmp)
# Build an ActionModel for the extract action
action_model = tools.registry.create_action_model()
action = action_model.model_validate({'extract': {'query': 'List products'}})
result = await tools.act(
action=action,
browser_session=browser_session,
page_extraction_llm=extraction_llm,
file_system=fs,
extraction_schema=PRODUCT_SCHEMA,
)
assert isinstance(result, ActionResult)
assert result.extracted_content is not None
assert '<structured_result>' in result.extracted_content
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_structured_extraction.py",
"license": "MIT License",
"lines": 543,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:tests/ci/test_tunnel.py | """Tests for tunnel module - cloudflared binary management."""
from unittest.mock import patch
import pytest
from browser_use.skill_cli.tunnel import TunnelManager, get_tunnel_manager
@pytest.fixture
def tunnel_manager():
"""Create a fresh TunnelManager instance for testing."""
return TunnelManager()
def test_tunnel_manager_system_cloudflared(tunnel_manager):
"""Test that system cloudflared is found."""
with patch('shutil.which', return_value='/usr/local/bin/cloudflared'):
binary_path = tunnel_manager.get_binary_path()
assert binary_path == '/usr/local/bin/cloudflared'
def test_tunnel_manager_caches_result(tunnel_manager):
"""Test that binary path is cached after first call."""
with patch('shutil.which', return_value='/usr/local/bin/cloudflared'):
path1 = tunnel_manager.get_binary_path()
# Reset shutil.which to ensure it's not called again
with patch('shutil.which', side_effect=Exception('Should be cached')):
path2 = tunnel_manager.get_binary_path()
assert path1 == path2
def test_tunnel_manager_not_installed(tunnel_manager):
"""Test that RuntimeError is raised when cloudflared not found."""
with patch('shutil.which', return_value=None):
with pytest.raises(RuntimeError) as exc_info:
tunnel_manager.get_binary_path()
assert 'cloudflared not installed' in str(exc_info.value)
def test_tunnel_manager_is_available_cached(tunnel_manager):
"""Test is_available check with cached binary path."""
tunnel_manager._binary_path = '/usr/local/bin/cloudflared'
assert tunnel_manager.is_available() is True
def test_tunnel_manager_is_available_system(tunnel_manager):
"""Test is_available check finds system cloudflared."""
with patch('shutil.which', return_value='/usr/local/bin/cloudflared'):
assert tunnel_manager.is_available() is True
def test_tunnel_manager_is_available_not_found(tunnel_manager):
"""Test is_available when cloudflared not found."""
with patch('shutil.which', return_value=None):
assert tunnel_manager.is_available() is False
def test_tunnel_manager_status_installed(tunnel_manager):
"""Test get_status returns correct info when cloudflared installed."""
with patch('shutil.which', return_value='/usr/local/bin/cloudflared'):
status = tunnel_manager.get_status()
assert status['available'] is True
assert status['source'] == 'system'
assert status['path'] == '/usr/local/bin/cloudflared'
def test_tunnel_manager_status_not_installed(tunnel_manager):
"""Test get_status when cloudflared not installed."""
with patch('shutil.which', return_value=None):
status = tunnel_manager.get_status()
assert status['available'] is False
assert status['source'] is None
assert 'not installed' in status['note']
def test_get_tunnel_manager_singleton():
"""Test that get_tunnel_manager returns a singleton."""
# Reset the global singleton
import browser_use.skill_cli.tunnel as tunnel_module
tunnel_module._tunnel_manager = None
mgr1 = get_tunnel_manager()
mgr2 = get_tunnel_manager()
assert mgr1 is mgr2
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/test_tunnel.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/llm/mistral/chat.py | from __future__ import annotations
import json
import logging
import os
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, cast, overload
import httpx
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.mistral.schema import MistralSchemaOptimizer
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
logger = logging.getLogger(__name__)
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatMistral(BaseChatModel):
"""Mistral /chat/completions wrapper with schema sanitization."""
model: str = 'mistral-medium-latest'
# Generation params
temperature: float | None = 0.2
top_p: float | None = None
max_tokens: int | None = 4096 # Mistral expects max_tokens (not max_completion_tokens)
seed: int | None = None
safe_prompt: bool = False
# Client params
api_key: str | None = None # Falls back to MISTRAL_API_KEY
base_url: str | httpx.URL = 'https://api.mistral.ai/v1'
timeout: float | httpx.Timeout | None = None
max_retries: int = 5
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
@property
def provider(self) -> str:
return 'mistral'
@property
def name(self) -> str:
return str(self.model)
def _get_api_key(self) -> str:
key = self.api_key or os.getenv('MISTRAL_API_KEY')
if not key:
raise ModelProviderError('Missing Mistral API key', status_code=401, model=self.name)
return key
def _get_base_url(self) -> str:
return str(os.getenv('MISTRAL_BASE_URL', self.base_url)).rstrip('/')
def _auth_headers(self) -> dict[str, str]:
headers = {
'Authorization': f'Bearer {self._get_api_key()}',
'Content-Type': 'application/json',
}
if self.default_headers:
headers.update(self.default_headers)
return headers
def _client(self) -> httpx.AsyncClient:
if self.http_client:
return self.http_client
if not hasattr(self, '_cached_client'):
transport = httpx.AsyncHTTPTransport(retries=self.max_retries)
client_args: dict[str, Any] = {'transport': transport}
if self.timeout is not None:
client_args['timeout'] = self.timeout
self._cached_client = httpx.AsyncClient(**client_args)
return self._cached_client
def _serialize_messages(self, messages: list[BaseMessage]) -> list[dict[str, Any]]:
raw_messages: list[dict[str, Any]] = []
for msg in OpenAIMessageSerializer.serialize_messages(messages):
dumper = getattr(msg, 'model_dump', None)
if callable(dumper):
raw_messages.append(cast(dict[str, Any], dumper(exclude_none=True)))
else:
raw_messages.append(cast(dict[str, Any], msg)) # type: ignore[arg-type]
return raw_messages
def _query_params(self) -> dict[str, str] | None:
if self.default_query is None:
return None
return {k: str(v) for k, v in self.default_query.items() if v is not None}
def _build_usage(self, usage: dict[str, Any] | None) -> ChatInvokeUsage | None:
if not usage:
return None
return ChatInvokeUsage(
prompt_tokens=usage.get('prompt_tokens', 0),
prompt_cached_tokens=None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=usage.get('completion_tokens', 0),
total_tokens=usage.get('total_tokens', 0),
)
def _extract_content_text(self, choice: dict[str, Any]) -> str:
message = choice.get('message', {})
content = message.get('content')
if isinstance(content, list):
text_parts = []
for part in content:
if isinstance(part, dict):
if part.get('type') == 'text' and 'text' in part:
text_parts.append(part.get('text', ''))
elif 'content' in part:
text_parts.append(str(part['content']))
return ''.join(text_parts)
if isinstance(content, dict):
return json.dumps(content)
return content or ''
def _parse_error(self, response: httpx.Response) -> str:
try:
body = response.json()
if isinstance(body, dict):
for key in ('message', 'error', 'detail'):
val = body.get(key)
if isinstance(val, dict):
val = val.get('message') or val.get('detail')
if val:
return str(val)
except Exception:
pass
return response.text
async def _post(self, payload: dict[str, Any]) -> dict[str, Any]:
url = f'{self._get_base_url()}/chat/completions'
client = self._client()
response = await client.post(url, headers=self._auth_headers(), json=payload, params=self._query_params())
if response.status_code >= 400:
message = self._parse_error(response)
if response.status_code == 429:
raise ModelRateLimitError(message=message, status_code=response.status_code, model=self.name)
raise ModelProviderError(message=message, status_code=response.status_code, model=self.name)
try:
return response.json()
except Exception as e:
raise ModelProviderError(message=f'Failed to parse Mistral response: {e}', model=self.name) from e
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
payload: dict[str, Any] = {
'model': self.model,
'messages': self._serialize_messages(messages),
}
# Generation params
if self.temperature is not None:
payload['temperature'] = self.temperature
if self.top_p is not None:
payload['top_p'] = self.top_p
if self.max_tokens is not None:
payload['max_tokens'] = self.max_tokens
if self.seed is not None:
payload['seed'] = self.seed
if self.safe_prompt:
payload['safe_prompt'] = self.safe_prompt
# Structured output path
if output_format is not None:
payload['response_format'] = {
'type': 'json_schema',
'json_schema': {
'name': 'agent_output',
'strict': True,
'schema': MistralSchemaOptimizer.create_mistral_compatible_schema(output_format),
},
}
try:
data = await self._post(payload)
choices = data.get('choices', [])
if not choices:
raise ModelProviderError('Mistral returned no choices', model=self.name)
content_text = self._extract_content_text(choices[0])
usage = self._build_usage(data.get('usage'))
if output_format is None:
return ChatInvokeCompletion(completion=content_text, usage=usage)
parsed = output_format.model_validate_json(content_text)
return ChatInvokeCompletion(completion=parsed, usage=usage)
except ModelRateLimitError:
raise
except ModelProviderError:
raise
except Exception as e:
logger.error(f'Mistral invocation failed: {e}')
raise ModelProviderError(message=str(e), model=self.name) from e
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/mistral/chat.py",
"license": "MIT License",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/mistral/schema.py | """Schema optimizer for Mistral-compatible JSON schemas."""
from __future__ import annotations
from typing import Any
from pydantic import BaseModel
from browser_use.llm.schema import SchemaOptimizer
class MistralSchemaOptimizer:
"""Create JSON schemas that avoid Mistral's unsupported keywords."""
UNSUPPORTED_KEYWORDS = {'minLength', 'maxLength', 'pattern', 'format'}
@classmethod
def create_mistral_compatible_schema(cls, model: type[BaseModel]) -> dict[str, Any]:
"""
Build a Mistral-safe schema by starting with the standard optimized schema and
then stripping unsupported validation keywords recursively.
"""
base_schema = SchemaOptimizer.create_optimized_json_schema(model)
return cls._strip_unsupported_keywords(base_schema)
@classmethod
def _strip_unsupported_keywords(cls, obj: Any) -> Any:
if isinstance(obj, dict):
return {
key: cls._strip_unsupported_keywords(value) for key, value in obj.items() if key not in cls.UNSUPPORTED_KEYWORDS
}
if isinstance(obj, list):
return [cls._strip_unsupported_keywords(item) for item in obj]
return obj
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/mistral/schema.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:browser_use/llm/openai/responses_serializer.py | """Serializer for converting messages to OpenAI Responses API input format."""
from typing import overload
from openai.types.responses.easy_input_message_param import EasyInputMessageParam
from openai.types.responses.response_input_image_param import ResponseInputImageParam
from openai.types.responses.response_input_message_content_list_param import (
ResponseInputMessageContentListParam,
)
from openai.types.responses.response_input_text_param import ResponseInputTextParam
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
UserMessage,
)
class ResponsesAPIMessageSerializer:
"""Serializer for converting between custom message types and OpenAI Responses API input format."""
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ResponseInputTextParam:
return ResponseInputTextParam(text=part.text, type='input_text')
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ResponseInputImageParam:
return ResponseInputImageParam(
image_url=part.image_url.url,
detail=part.image_url.detail,
type='input_image',
)
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | ResponseInputMessageContentListParam:
"""Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_image(part))
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | ResponseInputMessageContentListParam:
"""Serialize content for system messages (text only)."""
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
return serialized_parts
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | ResponseInputMessageContentListParam | None:
"""Serialize content for assistant messages (text only for Responses API)."""
if content is None:
return None
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
# Refusals are converted to text for the Responses API
elif part.type == 'refusal':
serialized_parts.append(ResponseInputTextParam(text=f'[Refusal: {part.refusal}]', type='input_text'))
return serialized_parts
@overload
@staticmethod
def serialize(message: UserMessage) -> EasyInputMessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> EasyInputMessageParam: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> EasyInputMessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> EasyInputMessageParam:
"""Serialize a custom message to an OpenAI Responses API input message param."""
if isinstance(message, UserMessage):
return EasyInputMessageParam(
role='user',
content=ResponsesAPIMessageSerializer._serialize_user_content(message.content),
)
elif isinstance(message, SystemMessage):
# Note: Responses API uses 'developer' role for system messages in some contexts,
# but 'system' is also supported via EasyInputMessageParam
return EasyInputMessageParam(
role='system',
content=ResponsesAPIMessageSerializer._serialize_system_content(message.content),
)
elif isinstance(message, AssistantMessage):
content = ResponsesAPIMessageSerializer._serialize_assistant_content(message.content)
# For assistant messages, we need to provide content
# If content is None but there are tool calls, we represent them as text
if content is None:
if message.tool_calls:
# Convert tool calls to a text representation for context
tool_call_text = '\n'.join(
f'[Tool call: {tc.function.name}({tc.function.arguments})]' for tc in message.tool_calls
)
content = tool_call_text
else:
content = ''
return EasyInputMessageParam(
role='assistant',
content=content,
)
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[EasyInputMessageParam]:
"""Serialize a list of messages to Responses API input format."""
return [ResponsesAPIMessageSerializer.serialize(m) for m in messages]
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/openai/responses_serializer.py",
"license": "MIT License",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/llm/tests/test_mistral_schema.py | from pydantic import BaseModel, Field
from browser_use.llm.mistral.schema import MistralSchemaOptimizer
class NestedExample(BaseModel):
code: str = Field(..., min_length=2, max_length=4, pattern='[A-Z]+')
description: str
class RootExample(BaseModel):
item: NestedExample
email: str = Field(..., json_schema_extra={'format': 'email'})
def test_mistral_schema_strips_unsupported_keywords():
schema = MistralSchemaOptimizer.create_mistral_compatible_schema(RootExample)
def _assert_no_banned_keys(obj):
if isinstance(obj, dict):
for key, value in obj.items():
assert key not in {'minLength', 'maxLength', 'pattern', 'format'}
_assert_no_banned_keys(value)
elif isinstance(obj, list):
for item in obj:
_assert_no_banned_keys(item)
_assert_no_banned_keys(schema)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/llm/tests/test_mistral_schema.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
browser-use/browser-use:browser_use/skills/service.py | """Skills service for fetching and executing skills from the Browser Use API"""
import logging
import os
from typing import Any, Literal
from browser_use_sdk import AsyncBrowserUse
from browser_use_sdk.types.execute_skill_response import ExecuteSkillResponse
from browser_use_sdk.types.skill_list_response import SkillListResponse
from cdp_use.cdp.network import Cookie
from pydantic import BaseModel, ValidationError
from browser_use.skills.views import (
MissingCookieException,
Skill,
)
logger = logging.getLogger(__name__)
class SkillService:
"""Service for managing and executing skills from the Browser Use API"""
def __init__(self, skill_ids: list[str | Literal['*']], api_key: str | None = None):
"""Initialize the skills service
Args:
skill_ids: List of skill IDs to fetch and cache, or ['*'] to fetch all available skills
api_key: Browser Use API key (optional, will use env var if not provided)
"""
self.skill_ids = skill_ids
self.api_key = api_key or os.getenv('BROWSER_USE_API_KEY') or ''
if not self.api_key:
raise ValueError('BROWSER_USE_API_KEY environment variable is not set')
self._skills: dict[str, Skill] = {}
self._client: AsyncBrowserUse | None = None
self._initialized = False
async def async_init(self) -> None:
"""Async initialization to fetch all skills at once
This should be called after __init__ to fetch and cache all skills.
Fetches all available skills in one API call and filters based on skill_ids.
"""
if self._initialized:
logger.debug('SkillService already initialized')
return
# Create the SDK client
self._client = AsyncBrowserUse(api_key=self.api_key)
try:
# Fetch skills from API
logger.info('Fetching skills from Browser Use API...')
use_wildcard = '*' in self.skill_ids
page_size = 100
requested_ids: set[str] = set() if use_wildcard else {s for s in self.skill_ids if s != '*'}
if use_wildcard:
# Wildcard: fetch only first page (max 100 skills) to avoid LLM tool overload
skills_response: SkillListResponse = await self._client.skills.list_skills(
page_size=page_size,
page_number=1,
is_enabled=True,
)
all_items = list(skills_response.items)
if len(all_items) >= page_size:
logger.warning(
f'Wildcard "*" limited to first {page_size} skills. '
f'Specify explicit skill IDs if you need specific skills beyond this limit.'
)
logger.debug(f'Fetched {len(all_items)} skills (wildcard mode, single page)')
else:
# Explicit IDs: paginate until all requested IDs found
all_items = []
page = 1
max_pages = 5 # Safety limit
while page <= max_pages:
skills_response = await self._client.skills.list_skills(
page_size=page_size,
page_number=page,
is_enabled=True,
)
all_items.extend(skills_response.items)
# Check if we've found all requested skills
found_ids = {s.id for s in all_items if s.id in requested_ids}
if found_ids == requested_ids:
break
# Stop if we got fewer items than page_size (last page)
if len(skills_response.items) < page_size:
break
page += 1
if page > max_pages:
logger.warning(f'Reached pagination limit ({max_pages} pages) before finding all requested skills')
logger.debug(f'Fetched {len(all_items)} skills across {page} page(s)')
# Filter to only finished skills (is_enabled already filtered by API)
all_available_skills = [skill for skill in all_items if skill.status == 'finished']
logger.info(f'Found {len(all_available_skills)} available skills from API')
# Determine which skills to load
if use_wildcard:
logger.info('Wildcard "*" detected, loading first 100 skills')
skills_to_load = all_available_skills
else:
# Load only the requested skill IDs
skills_to_load = [skill for skill in all_available_skills if skill.id in requested_ids]
# Warn about any requested skills that weren't found
found_ids = {skill.id for skill in skills_to_load}
missing_ids = requested_ids - found_ids
if missing_ids:
logger.warning(f'Requested skills not found or not available: {missing_ids}')
# Convert SDK SkillResponse objects to our Skill models and cache them
for skill_response in skills_to_load:
try:
skill = Skill.from_skill_response(skill_response)
self._skills[skill.id] = skill
logger.debug(f'Cached skill: {skill.title} ({skill.id})')
except Exception as e:
logger.error(f'Failed to convert skill {skill_response.id}: {type(e).__name__}: {e}')
logger.info(f'Successfully loaded {len(self._skills)} skills')
self._initialized = True
except Exception as e:
logger.error(f'Error during skill initialization: {type(e).__name__}: {e}')
self._initialized = True # Mark as initialized even on failure to avoid retry loops
raise
async def get_skill(self, skill_id: str) -> Skill | None:
"""Get a cached skill by ID. Auto-initializes if not already initialized.
Args:
skill_id: The UUID of the skill
Returns:
Skill model or None if not found in cache
"""
if not self._initialized:
await self.async_init()
return self._skills.get(skill_id)
async def get_all_skills(self) -> list[Skill]:
"""Get all cached skills. Auto-initializes if not already initialized.
Returns:
List of all successfully loaded skills
"""
if not self._initialized:
await self.async_init()
return list(self._skills.values())
async def execute_skill(
self, skill_id: str, parameters: dict[str, Any] | BaseModel, cookies: list[Cookie]
) -> ExecuteSkillResponse:
"""Execute a skill with the provided parameters. Auto-initializes if not already initialized.
Parameters are validated against the skill's Pydantic schema before execution.
Args:
skill_id: The UUID of the skill to execute
parameters: Either a dictionary or BaseModel instance matching the skill's parameter schema
Returns:
ExecuteSkillResponse with execution results
Raises:
ValueError: If skill not found in cache or parameter validation fails
Exception: If API call fails
"""
# Auto-initialize if needed
if not self._initialized:
await self.async_init()
assert self._client is not None, 'Client not initialized'
# Check if skill exists in cache
skill = await self.get_skill(skill_id)
if skill is None:
raise ValueError(f'Skill {skill_id} not found in cache. Available skills: {list(self._skills.keys())}')
# Extract cookie parameters from the skill
cookie_params = [p for p in skill.parameters if p.type == 'cookie']
# Build a dict of cookies from the provided cookie list
cookie_dict: dict[str, str] = {cookie['name']: cookie['value'] for cookie in cookies}
# Check for missing required cookies and fill cookie values
if cookie_params:
for cookie_param in cookie_params:
is_required = cookie_param.required if cookie_param.required is not None else True
if is_required and cookie_param.name not in cookie_dict:
# Required cookie is missing - raise exception with description
raise MissingCookieException(
cookie_name=cookie_param.name, cookie_description=cookie_param.description or 'No description provided'
)
# Fill in cookie values into parameters
# Convert parameters to dict first if it's a BaseModel
if isinstance(parameters, BaseModel):
params_dict = parameters.model_dump()
else:
params_dict = dict(parameters)
# Add cookie values to parameters
for cookie_param in cookie_params:
if cookie_param.name in cookie_dict:
params_dict[cookie_param.name] = cookie_dict[cookie_param.name]
# Replace parameters with the updated dict
parameters = params_dict
# Get the skill's pydantic model for parameter validation
ParameterModel = skill.parameters_pydantic(exclude_cookies=False)
# Validate and convert parameters to dict
validated_params_dict: dict[str, Any]
try:
if isinstance(parameters, BaseModel):
# Already a pydantic model - validate it matches the skill's schema
# by converting to dict and re-validating with the skill's model
params_dict = parameters.model_dump()
validated_model = ParameterModel(**params_dict)
validated_params_dict = validated_model.model_dump()
else:
# Dict provided - validate with the skill's pydantic model
validated_model = ParameterModel(**parameters)
validated_params_dict = validated_model.model_dump()
except ValidationError as e:
# Pydantic validation failed
error_msg = f'Parameter validation failed for skill {skill.title}:\n'
for error in e.errors():
field = '.'.join(str(x) for x in error['loc'])
error_msg += f' - {field}: {error["msg"]}\n'
raise ValueError(error_msg) from e
except Exception as e:
raise ValueError(f'Failed to validate parameters for skill {skill.title}: {type(e).__name__}: {e}') from e
# Execute skill via API
try:
logger.info(f'Executing skill: {skill.title} ({skill_id})')
result: ExecuteSkillResponse = await self._client.skills.execute_skill(
skill_id=skill_id, parameters=validated_params_dict
)
if result.success:
logger.info(f'Skill {skill.title} executed successfully (latency: {result.latency_ms}ms)')
else:
logger.error(f'Skill {skill.title} execution failed: {result.error}')
return result
except Exception as e:
logger.error(f'Error executing skill {skill_id}: {type(e).__name__}: {e}')
# Return error response
return ExecuteSkillResponse(
success=False,
error=f'Failed to execute skill: {type(e).__name__}: {str(e)}',
)
async def close(self) -> None:
"""Close the SDK client and cleanup resources"""
if self._client is not None:
# AsyncBrowserUse client cleanup if needed
# The SDK doesn't currently have a close method, but we set to None for cleanup
self._client = None
self._initialized = False
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skills/service.py",
"license": "MIT License",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skills/utils.py | """Utilities for skill schema conversion"""
from typing import Any
from pydantic import BaseModel, Field, create_model
from browser_use.skills.views import ParameterSchema
def convert_parameters_to_pydantic(parameters: list[ParameterSchema], model_name: str = 'SkillParameters') -> type[BaseModel]:
"""Convert a list of ParameterSchema to a pydantic model for structured output
Args:
parameters: List of parameter schemas from the skill API
model_name: Name for the generated pydantic model
Returns:
A pydantic BaseModel class with fields matching the parameter schemas
"""
if not parameters:
# Return empty model if no parameters
return create_model(model_name, __base__=BaseModel)
fields: dict[str, Any] = {}
for param in parameters:
# Map parameter type string to Python types
python_type: Any = str # default
param_type = param.type
if param_type == 'string':
python_type = str
elif param_type == 'number':
python_type = float
elif param_type == 'boolean':
python_type = bool
elif param_type == 'object':
python_type = dict[str, Any]
elif param_type == 'array':
python_type = list[Any]
elif param_type == 'cookie':
python_type = str # Treat cookies as strings
# Check if parameter is required (defaults to True if not specified)
is_required = param.required if param.required is not None else True
# Make optional if not required
if not is_required:
python_type = python_type | None # type: ignore
# Create field with description
field_kwargs = {}
if param.description:
field_kwargs['description'] = param.description
if is_required:
fields[param.name] = (python_type, Field(**field_kwargs))
else:
fields[param.name] = (python_type, Field(default=None, **field_kwargs))
# Create and return the model
return create_model(model_name, __base__=BaseModel, **fields)
def convert_json_schema_to_pydantic(schema: dict[str, Any], model_name: str = 'SkillOutput') -> type[BaseModel]:
"""Convert a JSON schema to a pydantic model
Args:
schema: JSON schema dictionary (OpenAPI/JSON Schema format)
model_name: Name for the generated pydantic model
Returns:
A pydantic BaseModel class matching the schema
Note:
This is a simplified converter that handles basic types.
For complex nested schemas, consider using datamodel-code-generator.
"""
if not schema or 'properties' not in schema:
# Return empty model if no schema
return create_model(model_name, __base__=BaseModel)
fields: dict[str, Any] = {}
properties = schema.get('properties', {})
required_fields = set(schema.get('required', []))
for field_name, field_schema in properties.items():
# Get the field type
field_type_str = field_schema.get('type', 'string')
field_description = field_schema.get('description')
# Map JSON schema types to Python types
python_type: Any = str # default
if field_type_str == 'string':
python_type = str
elif field_type_str == 'number':
python_type = float
elif field_type_str == 'integer':
python_type = int
elif field_type_str == 'boolean':
python_type = bool
elif field_type_str == 'object':
python_type = dict[str, Any]
elif field_type_str == 'array':
# Check if items type is specified
items_schema = field_schema.get('items', {})
items_type = items_schema.get('type', 'string')
if items_type == 'string':
python_type = list[str]
elif items_type == 'number':
python_type = list[float]
elif items_type == 'integer':
python_type = list[int]
elif items_type == 'boolean':
python_type = list[bool]
elif items_type == 'object':
python_type = list[dict[str, Any]]
else:
python_type = list[Any]
# Make optional if not required
is_required = field_name in required_fields
if not is_required:
python_type = python_type | None # type: ignore
# Create field with description
field_kwargs = {}
if field_description:
field_kwargs['description'] = field_description
if is_required:
fields[field_name] = (python_type, Field(**field_kwargs))
else:
fields[field_name] = (python_type, Field(default=None, **field_kwargs))
# Create and return the model
return create_model(model_name, __base__=BaseModel, **fields)
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skills/utils.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:browser_use/skills/views.py | """Skills views - wraps SDK types with helper methods"""
from typing import Any
from browser_use_sdk.types.parameter_schema import ParameterSchema
from browser_use_sdk.types.skill_response import SkillResponse
from pydantic import BaseModel, ConfigDict, Field
class MissingCookieException(Exception):
"""Raised when a required cookie is missing for skill execution
Attributes:
cookie_name: The name of the missing cookie parameter
cookie_description: Description of how to obtain the cookie
"""
def __init__(self, cookie_name: str, cookie_description: str):
self.cookie_name = cookie_name
self.cookie_description = cookie_description
super().__init__(f"Missing required cookie '{cookie_name}': {cookie_description}")
class Skill(BaseModel):
"""Skill model with helper methods for LLM integration
This wraps the SDK SkillResponse with additional helper properties
for converting schemas to Pydantic models.
"""
model_config = ConfigDict(extra='forbid', validate_assignment=True)
id: str
title: str
description: str
parameters: list[ParameterSchema]
output_schema: dict[str, Any] = Field(default_factory=dict)
@staticmethod
def from_skill_response(response: SkillResponse) -> 'Skill':
"""Create a Skill from SDK SkillResponse"""
return Skill(
id=response.id,
title=response.title,
description=response.description,
parameters=response.parameters,
output_schema=response.output_schema,
)
def parameters_pydantic(self, exclude_cookies: bool = False) -> type[BaseModel]:
"""Convert parameter schemas to a pydantic model for structured output
exclude_cookies is very useful when dealing with LLMs that are not aware of cookies.
"""
from browser_use.skills.utils import convert_parameters_to_pydantic
parameters = list[ParameterSchema](self.parameters)
if exclude_cookies:
parameters = [param for param in parameters if param.type != 'cookie']
return convert_parameters_to_pydantic(parameters, model_name=f'{self.title}Parameters')
@property
def output_type_pydantic(self) -> type[BaseModel] | None:
"""Convert output schema to a pydantic model for structured output"""
if not self.output_schema:
return None
from browser_use.skills.utils import convert_json_schema_to_pydantic
return convert_json_schema_to_pydantic(self.output_schema, model_name=f'{self.title}Output')
| {
"repo_id": "browser-use/browser-use",
"file_path": "browser_use/skills/views.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
browser-use/browser-use:examples/features/fallback_model.py | """
Example: Using a fallback LLM model.
When the primary LLM fails with rate limits (429), authentication errors (401),
payment/credit errors (402), or server errors (500, 502, 503, 504), the agent
automatically switches to the fallback model and continues execution.
Note: The primary LLM will first exhaust its own retry logic (typically 5 attempts
with exponential backoff) before the fallback is triggered. This means transient errors
are handled by the provider's built-in retries, and the fallback only kicks in when
the provider truly can't recover.
This is useful for:
- High availability: Keep your agent running even when one provider has issues
- Cost optimization: Use a cheaper model as fallback when the primary is rate limited
- Multi-provider resilience: Switch between OpenAI, Anthropic, Google, etc.
@dev You need to add OPENAI_API_KEY and ANTHROPIC_API_KEY to your environment variables.
"""
import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
load_dotenv()
from browser_use import Agent
from browser_use.llm import ChatAnthropic, ChatOpenAI
llm = ChatAnthropic(model='claude-sonnet-4-0')
fallback_llm = ChatOpenAI(model='gpt-4o')
agent = Agent(
task='Go to github.com and find the browser-use repository',
llm=llm,
fallback_llm=fallback_llm,
)
async def main():
result = await agent.run()
print(result)
# You can check if fallback was used:
if agent.is_using_fallback_llm:
print('Note: Agent switched to fallback LLM during execution')
print(f'Current model: {agent.current_llm_model}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/features/fallback_model.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/bu_oss.py | """
Setup:
1. Get your API key from https://cloud.browser-use.com/new-api-key
2. Set environment variable: export BROWSER_USE_API_KEY="your-key"
"""
from dotenv import load_dotenv
from browser_use import Agent, ChatBrowserUse
load_dotenv()
try:
from lmnr import Laminar
Laminar.initialize()
except ImportError:
pass
# Point to local llm-use server for testing
llm = ChatBrowserUse(
model='browser-use/bu-30b-a3b-preview', # BU Open Source Model!!
)
agent = Agent(
task='Find the number of stars of browser-use and stagehand. Tell me which one has more stars :)',
llm=llm,
flash_mode=True,
)
agent.run_sync()
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/bu_oss.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/mistral.py | """
Simple agent run with Mistral.
You need to set MISTRAL_API_KEY in your environment (and optionally MISTRAL_BASE_URL).
"""
import asyncio
from dotenv import load_dotenv
from browser_use import Agent
from browser_use.llm.mistral import ChatMistral
load_dotenv()
llm = ChatMistral(model='mistral-small-2506', temperature=0.6)
agent = Agent(
llm=llm,
task='List two fun weekend activities in Barcelona.',
)
async def main():
await agent.run(max_steps=10)
input('Press Enter to continue...')
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/mistral.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/models/skills.py | import asyncio
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dotenv import load_dotenv
from browser_use import Agent
load_dotenv()
async def run_search():
agent = Agent(
# llm=llm,
task='How many stars does the browser-use repo have?',
flash_mode=True,
skills=['502af156-2a75-4b4e-816d-b2dc138b6647'], # skill for fetching the number of stars of any Github repository
)
await agent.run()
if __name__ == '__main__':
asyncio.run(run_search())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/models/skills.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:examples/sandbox/structured_output.py | """Example of using structured output with sandbox execution
To run:
export BROWSER_USE_API_KEY=your_key
python examples/sandbox/structured_output.py
"""
import asyncio
import os
from pydantic import BaseModel, Field
from browser_use import Agent, Browser, ChatBrowserUse, sandbox
from browser_use.agent.views import AgentHistoryList
class IPLocation(BaseModel):
"""Structured output for IP location data"""
ip_address: str = Field(description='The public IP address')
country: str = Field(description='Country name')
city: str | None = Field(default=None, description='City name if available')
region: str | None = Field(default=None, description='Region/state if available')
@sandbox(log_level='INFO')
async def get_ip_location(browser: Browser) -> AgentHistoryList:
"""Get IP location using sandbox"""
agent = Agent(
task='Go to ipinfo.io and extract my IP address and location details (country, city, region)',
browser=browser,
llm=ChatBrowserUse(model='bu-2-0'),
output_model_schema=IPLocation,
)
return await agent.run(max_steps=10)
async def main():
if not os.getenv('BROWSER_USE_API_KEY'):
print('❌ Please set BROWSER_USE_API_KEY environment variable')
print(' Get a key at: https://cloud.browser-use.com/new-api-key')
return
result = await get_ip_location()
location = result.get_structured_output(IPLocation)
if location:
print(f'IP: {location.ip_address}')
print(f'Country: {location.country}')
print(f'City: {location.city or "N/A"}')
print(f'Region: {location.region or "N/A"}')
else:
print(f'No structured output. Final result: {result.final_result()}')
if __name__ == '__main__':
asyncio.run(main())
| {
"repo_id": "browser-use/browser-use",
"file_path": "examples/sandbox/structured_output.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
browser-use/browser-use:tests/ci/browser/test_cdp_headers.py | """
Test that headers are properly passed to CDPClient for authenticated remote browser connections.
This tests the fix for: When using browser-use with remote browser services that require
authentication headers, these headers need to be included in the WebSocket handshake.
"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from browser_use.browser.profile import BrowserProfile
from browser_use.browser.session import BrowserSession
def test_browser_profile_headers_attribute():
"""Test that BrowserProfile correctly stores headers attribute."""
test_headers = {'Authorization': 'Bearer token123', 'X-API-Key': 'key456'}
profile = BrowserProfile(headers=test_headers)
# Verify headers are stored correctly
assert profile.headers == test_headers
# Test with profile without headers
profile_no_headers = BrowserProfile()
assert profile_no_headers.headers is None
def test_browser_profile_headers_inherited():
"""Test that BrowserSession can access headers from its profile."""
test_headers = {'Authorization': 'Bearer test-token'}
session = BrowserSession(cdp_url='wss://example.com/cdp', headers=test_headers)
assert session.browser_profile.headers == test_headers
@pytest.mark.asyncio
async def test_cdp_client_headers_passed_on_connect():
"""Test that headers from BrowserProfile are passed to CDPClient on connect()."""
test_headers = {
'Authorization': 'AWS4-HMAC-SHA256 Credential=test...',
'X-Amz-Date': '20250914T163733Z',
'X-Amz-Security-Token': 'test-token',
'Host': 'remote-browser.example.com',
}
session = BrowserSession(cdp_url='wss://remote-browser.example.com/cdp', headers=test_headers)
with patch('browser_use.browser.session.CDPClient') as mock_cdp_client_class:
# Setup mock CDPClient instance
mock_cdp_client = AsyncMock()
mock_cdp_client_class.return_value = mock_cdp_client
mock_cdp_client.start = AsyncMock()
mock_cdp_client.stop = AsyncMock()
# Mock CDP methods
mock_cdp_client.send = MagicMock()
mock_cdp_client.send.Target = MagicMock()
mock_cdp_client.send.Target.setAutoAttach = AsyncMock()
mock_cdp_client.send.Target.getTargets = AsyncMock(return_value={'targetInfos': []})
mock_cdp_client.send.Target.createTarget = AsyncMock(return_value={'targetId': 'test-target-id'})
# Mock SessionManager (imported inside connect() from browser_use.browser.session_manager)
with patch('browser_use.browser.session_manager.SessionManager') as mock_session_manager_class:
mock_session_manager = MagicMock()
mock_session_manager_class.return_value = mock_session_manager
mock_session_manager.start_monitoring = AsyncMock()
mock_session_manager.get_all_page_targets = MagicMock(return_value=[])
try:
await session.connect()
except Exception:
# May fail due to incomplete mocking, but we can still verify the key assertion
pass
# Verify CDPClient was instantiated with the headers
mock_cdp_client_class.assert_called_once()
call_kwargs = mock_cdp_client_class.call_args
# Check positional args and keyword args
assert call_kwargs[0][0] == 'wss://remote-browser.example.com/cdp', 'CDP URL should be first arg'
assert call_kwargs[1].get('additional_headers') == test_headers, 'Headers should be passed as additional_headers'
assert call_kwargs[1].get('max_ws_frame_size') == 200 * 1024 * 1024, 'max_ws_frame_size should be set'
@pytest.mark.asyncio
async def test_cdp_client_no_headers_when_none():
"""Test that CDPClient is created with None headers when profile has no headers."""
session = BrowserSession(cdp_url='wss://example.com/cdp')
assert session.browser_profile.headers is None
with patch('browser_use.browser.session.CDPClient') as mock_cdp_client_class:
mock_cdp_client = AsyncMock()
mock_cdp_client_class.return_value = mock_cdp_client
mock_cdp_client.start = AsyncMock()
mock_cdp_client.stop = AsyncMock()
mock_cdp_client.send = MagicMock()
mock_cdp_client.send.Target = MagicMock()
mock_cdp_client.send.Target.setAutoAttach = AsyncMock()
mock_cdp_client.send.Target.getTargets = AsyncMock(return_value={'targetInfos': []})
mock_cdp_client.send.Target.createTarget = AsyncMock(return_value={'targetId': 'test-target-id'})
with patch('browser_use.browser.session_manager.SessionManager') as mock_session_manager_class:
mock_session_manager = MagicMock()
mock_session_manager_class.return_value = mock_session_manager
mock_session_manager.start_monitoring = AsyncMock()
mock_session_manager.get_all_page_targets = MagicMock(return_value=[])
try:
await session.connect()
except Exception:
pass
# Verify CDPClient was called with None for additional_headers
call_kwargs = mock_cdp_client_class.call_args
assert call_kwargs[1].get('additional_headers') is None
@pytest.mark.asyncio
async def test_headers_used_for_json_version_endpoint():
"""Test that headers are also used when fetching WebSocket URL from /json/version."""
test_headers = {'Authorization': 'Bearer test-token'}
# Use HTTP URL (not ws://) to trigger /json/version fetch
session = BrowserSession(cdp_url='http://remote-browser.example.com:9222', headers=test_headers)
with patch('browser_use.browser.session.httpx.AsyncClient') as mock_client_class:
mock_client = AsyncMock()
mock_client_class.return_value.__aenter__ = AsyncMock(return_value=mock_client)
mock_client_class.return_value.__aexit__ = AsyncMock()
# Mock the /json/version response
mock_response = MagicMock()
mock_response.json.return_value = {'webSocketDebuggerUrl': 'ws://remote-browser.example.com:9222/devtools/browser/abc'}
mock_client.get = AsyncMock(return_value=mock_response)
with patch('browser_use.browser.session.CDPClient') as mock_cdp_client_class:
mock_cdp_client = AsyncMock()
mock_cdp_client_class.return_value = mock_cdp_client
mock_cdp_client.start = AsyncMock()
mock_cdp_client.send = MagicMock()
mock_cdp_client.send.Target = MagicMock()
mock_cdp_client.send.Target.setAutoAttach = AsyncMock()
with patch('browser_use.browser.session_manager.SessionManager') as mock_sm_class:
mock_sm = MagicMock()
mock_sm_class.return_value = mock_sm
mock_sm.start_monitoring = AsyncMock()
mock_sm.get_all_page_targets = MagicMock(return_value=[])
try:
await session.connect()
except Exception:
pass
# Verify headers were passed to the HTTP GET request
mock_client.get.assert_called_once()
call_kwargs = mock_client.get.call_args
assert call_kwargs[1].get('headers') == test_headers
| {
"repo_id": "browser-use/browser-use",
"file_path": "tests/ci/browser/test_cdp_headers.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.