repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
cwjokaka/bilibili_member_crawler | https://github.com/cwjokaka/bilibili_member_crawler/blob/a4b9cb64b073ec452f1b12b5225539e53996689b/exception/sql_insert_exception.py | exception/sql_insert_exception.py | from exception.bilibili_exception import BilibiliException
class SqlInsertException(BilibiliException):
def __init__(self, msg) -> None:
super().__init__(msg)
| python | MIT | a4b9cb64b073ec452f1b12b5225539e53996689b | 2026-01-05T07:13:29.310919Z | false |
cwjokaka/bilibili_member_crawler | https://github.com/cwjokaka/bilibili_member_crawler/blob/a4b9cb64b073ec452f1b12b5225539e53996689b/exception/sql_already_exists_exception.py | exception/sql_already_exists_exception.py | from exception.bilibili_exception import BilibiliException
class SqlAlreadyExistsException(BilibiliException):
def __init__(self, msg) -> None:
super().__init__(msg)
| python | MIT | a4b9cb64b073ec452f1b12b5225539e53996689b | 2026-01-05T07:13:29.310919Z | false |
cwjokaka/bilibili_member_crawler | https://github.com/cwjokaka/bilibili_member_crawler/blob/a4b9cb64b073ec452f1b12b5225539e53996689b/exception/__init__.py | exception/__init__.py | python | MIT | a4b9cb64b073ec452f1b12b5225539e53996689b | 2026-01-05T07:13:29.310919Z | false | |
cwjokaka/bilibili_member_crawler | https://github.com/cwjokaka/bilibili_member_crawler/blob/a4b9cb64b073ec452f1b12b5225539e53996689b/exception/user_not_found_exception.py | exception/user_not_found_exception.py | from exception.bilibili_exception import BilibiliException
class UserNotFoundException(BilibiliException):
def __init__(self, msg) -> None:
super().__init__(msg)
| python | MIT | a4b9cb64b073ec452f1b12b5225539e53996689b | 2026-01-05T07:13:29.310919Z | false |
aaugustin/datedelta | https://github.com/aaugustin/datedelta/blob/82df8cc33b270fe5b06870b2ca2591faf1d3241c/test_datedelta.py | test_datedelta.py | # For convenience and readability in tests, use short aliases.
import pickle
from datetime import date as d
from datetime import timedelta as td
import pytest
from datedelta import datedelta as dd
from datedelta import DAY, MONTH, WEEK, YEAR
@pytest.mark.parametrize(
("constant", "value"),
[
(DAY, dd(days=1)),
(WEEK, dd(days=7)),
(MONTH, dd(months=1)),
(YEAR, dd(years=1)),
],
)
def test_constants(constant, value):
assert constant == value
def test_years_must_be_integer():
with pytest.raises(ValueError) as exc:
dd(years=4.5)
assert "years must be an integer value" in str(exc.value)
def test_months_must_be_integer():
with pytest.raises(ValueError) as exc:
dd(months=6.5)
assert "months must be an integer value" in str(exc.value)
def test_days_must_be_integer():
with pytest.raises(ValueError) as exc:
dd(days=8.5)
assert "days must be an integer value" in str(exc.value)
def test_can_get_years_attribute():
assert dd(years=2, months=3, days=6).years == 2
def test_can_get_months_attribute():
assert dd(years=2, months=3, days=6).months == 3
def test_can_get_days_attribute():
assert dd(years=2, months=3, days=6).days == 6
def test_cannot_set_years_attribute():
delta = dd(years=2, months=3, days=6)
with pytest.raises(AttributeError):
delta.years = 2
def test_cannot_set_months_attribute():
delta = dd(years=2, months=3, days=6)
with pytest.raises(AttributeError):
delta.years = 3
def test_cannot_set_days_attribute():
delta = dd(years=2, months=3, days=6)
with pytest.raises(AttributeError):
delta.years = 6
def test_cannot_delete_years_attribute():
delta = dd(years=2, months=3, days=6)
with pytest.raises(AttributeError):
del delta.years
def test_cannot_delete_months_attribute():
delta = dd(years=2, months=3, days=6)
with pytest.raises(AttributeError):
del delta.years
def test_cannot_delete_days_attribute():
delta = dd(years=2, months=3, days=6)
with pytest.raises(AttributeError):
del delta.years
@pytest.mark.parametrize(
("delta_repr", "delta_str"),
[
# No values.
("dd()", "0 days"),
# Positive singular values.
("dd(years=1)", "1 year"),
("dd(months=1)", "1 month"),
("dd(days=1)", "1 day"),
("dd(years=1, months=1)", "1 year, 1 month"),
("dd(years=1, days=1)", "1 year, 1 day"),
("dd(months=1, days=1)", "1 month, 1 day"),
("dd(years=1, months=1, days=1)", "1 year, 1 month, 1 day"),
# Negative singular values.
("dd(years=-1)", "-1 year"),
("dd(months=-1)", "-1 month"),
("dd(days=-1)", "-1 day"),
("dd(years=-1, months=-1)", "-1 year, -1 month"),
("dd(years=-1, days=-1)", "-1 year, -1 day"),
("dd(months=-1, days=-1)", "-1 month, -1 day"),
("dd(years=-1, months=-1, days=-1)", "-1 year, -1 month, -1 day"),
# Mixed singular values.
("dd(years=1, months=-1)", "1 year, -1 month"),
("dd(years=-1, months=1)", "-1 year, 1 month"),
("dd(years=1, days=-1)", "1 year, -1 day"),
("dd(years=-1, days=1)", "-1 year, 1 day"),
("dd(months=1, days=-1)", "1 month, -1 day"),
("dd(months=-1, days=1)", "-1 month, 1 day"),
("dd(years=1, months=1, days=-1)", "1 year, 1 month, -1 day"),
("dd(years=1, months=-1, days=1)", "1 year, -1 month, 1 day"),
("dd(years=-1, months=1, days=1)", "-1 year, 1 month, 1 day"),
("dd(years=1, months=-1, days=-1)", "1 year, -1 month, -1 day"),
("dd(years=-1, months=1, days=-1)", "-1 year, 1 month, -1 day"),
("dd(years=-1, months=-1, days=1)", "-1 year, -1 month, 1 day"),
# Positive plural values.
("dd(years=2)", "2 years"),
("dd(months=3)", "3 months"),
("dd(days=6)", "6 days"),
("dd(years=2, months=3)", "2 years, 3 months"),
("dd(years=2, days=6)", "2 years, 6 days"),
("dd(months=3, days=6)", "3 months, 6 days"),
("dd(years=2, months=3, days=6)", "2 years, 3 months, 6 days"),
# Negative plural values.
("dd(years=-2)", "-2 years"),
("dd(months=-3)", "-3 months"),
("dd(days=-6)", "-6 days"),
("dd(years=-2, months=-3)", "-2 years, -3 months"),
("dd(years=-2, days=-6)", "-2 years, -6 days"),
("dd(months=-3, days=-6)", "-3 months, -6 days"),
("dd(years=-2, months=-3, days=-6)", "-2 years, -3 months, -6 days"),
# Mixed plural values.
("dd(years=2, months=-3)", "2 years, -3 months"),
("dd(years=-2, months=3)", "-2 years, 3 months"),
("dd(years=2, days=-6)", "2 years, -6 days"),
("dd(years=-2, days=6)", "-2 years, 6 days"),
("dd(months=3, days=-6)", "3 months, -6 days"),
("dd(months=-3, days=6)", "-3 months, 6 days"),
("dd(years=2, months=3, days=-6)", "2 years, 3 months, -6 days"),
("dd(years=2, months=-3, days=6)", "2 years, -3 months, 6 days"),
("dd(years=-2, months=3, days=6)", "-2 years, 3 months, 6 days"),
("dd(years=2, months=-3, days=-6)", "2 years, -3 months, -6 days"),
("dd(years=-2, months=3, days=-6)", "-2 years, 3 months, -6 days"),
("dd(years=-2, months=-3, days=6)", "-2 years, -3 months, 6 days"),
# Mixed singular and plural values (not all combinations are included).
("dd(years=-1, months=1, days=10)", "-1 year, 1 month, 10 days"),
("dd(months=2, days=-1)", "2 months, -1 day"),
("dd(months=-1, days=10)", "-1 month, 10 days"),
],
)
def test_repr_and_str(delta_repr, delta_str):
delta = eval(delta_repr) # repr must be evaluable
delta_repr = delta_repr.replace("dd", "datedelta.datedelta")
assert repr(delta) == delta_repr # repr must round-trip (on test cases)
assert str(delta) == delta_str
@pytest.mark.parametrize(
("delta_1", "delta_2", "is_equal"),
[
# Same type.
(dd(), dd(), True),
(dd(), dd(years=0, months=0, days=0), True),
(dd(years=2), dd(years=2), True),
(dd(years=2), dd(years=2, months=0, days=0), True),
(dd(months=3, days=6), dd(months=3, days=6), True),
(dd(months=3, days=6), dd(years=0, months=3, days=6), True),
(dd(years=2, months=3, days=6), dd(years=2, months=3, days=6), True),
(dd(), dd(years=2), False),
(dd(years=1), dd(years=2), False),
(dd(), dd(months=3, days=6), False),
(dd(months=3, days=6), dd(months=3, days=3), False),
(dd(years=2), dd(months=3, days=6), False),
(dd(years=2), dd(years=2, months=3, days=6), False),
# Other types.
(dd(), 0, False),
(dd(), None, False),
(dd(), True, False),
(dd(), False, False),
(dd(years=2, months=3, days=6), d(year=2, month=3, day=6), False),
(dd(days=6), td(days=6), False),
],
)
def test_equal_not_equal_and_hash(delta_1, delta_2, is_equal):
assert (delta_1 == delta_2) == is_equal
assert (delta_2 == delta_1) == is_equal
assert (delta_1 != delta_2) != is_equal
assert (delta_2 != delta_1) != is_equal
if type(delta_1) is type(delta_2) is dd:
# Technically, hashes could be equal even if values are different, but
# that case doesn't happen in the current implementation.
assert (hash(delta_1) == hash(delta_2)) == is_equal
@pytest.mark.parametrize(
("delta"),
[
# Same type.
(dd()),
(dd(years=2)),
(dd(months=3)),
(dd(days=6)),
(dd(years=2, months=3)),
(dd(months=3, days=6)),
(dd(years=2, months=3, days=6)),
],
)
def test_pickle_unpickle(delta):
pickled = pickle.dumps(delta)
assert len(pickled) <= 40
unpickled = pickle.loads(pickled)
assert unpickled == delta
@pytest.mark.parametrize(
("delta_1", "delta_2", "delta"),
[
(dd(), dd(), dd()),
# Positive deltas.
(dd(years=2), dd(), dd(years=2)),
(dd(), dd(months=3, days=6), dd(months=3, days=6)),
(dd(years=1), dd(years=1), dd(years=2)),
(dd(years=2), dd(months=3, days=6), dd(years=2, months=3, days=6)),
(dd(years=2, months=1), dd(months=2, days=6), dd(years=2, months=3, days=6)),
(
dd(years=2, months=1, days=2),
dd(months=2, days=4),
dd(years=2, months=3, days=6),
),
# Negative deltas.
(dd(years=-2), dd(), dd(years=-2)),
(dd(), dd(months=-3, days=-6), dd(months=-3, days=-6)),
(dd(years=-1), dd(years=-1), dd(years=-2)),
(dd(years=-2), dd(months=-3, days=-6), dd(years=-2, months=-3, days=-6)),
(
dd(years=-2, months=-1),
dd(months=-2, days=-6),
dd(years=-2, months=-3, days=-6),
),
(
dd(years=-2, months=-1, days=-2),
dd(months=-2, days=-4),
dd(years=-2, months=-3, days=-6),
),
# Supported mixed deltas.
(dd(years=2), dd(months=-3, days=6), dd(years=2, months=-3, days=6)),
(
dd(years=-2, months=1),
dd(months=2, days=-6),
dd(years=-2, months=3, days=-6),
),
(
dd(years=2, months=1, days=-2),
dd(months=2, days=-4),
dd(years=2, months=3, days=-6),
),
],
)
def test_add_datedelta(delta_1, delta_2, delta):
assert delta_1 + delta_2 == delta
@pytest.mark.parametrize(
("delta_1", "delta_2"),
[
# Unsupported mixed deltas.
(dd(years=3), dd(years=-1)),
(dd(years=2, months=5), dd(months=-2, days=-6)),
(dd(years=2, months=1, days=-10), dd(months=2, days=4)),
],
)
def test_add_unsupported_datedelta(delta_1, delta_2):
with pytest.raises(ValueError) as exc:
delta_1 + delta_2
assert "cannot add datedeltas with opposite signs" in str(exc.value)
@pytest.mark.parametrize(
("delta_1", "other"),
[
# Other types.
(dd(), None),
(dd(), 0),
(dd(), "a"),
(dd(), []),
(dd(), d.today()),
],
)
def test_add_unsupported_type(delta_1, other):
with pytest.raises(TypeError) as exc:
delta_1 + other
assert "unsupported operand type(s) for +" in str(exc.value)
@pytest.mark.parametrize(
("delta_1", "delta_2", "delta"),
[
(dd(), dd(), dd()),
# Positive deltas.
(dd(years=2), dd(), dd(years=2)),
(dd(), dd(months=-3, days=-6), dd(months=3, days=6)),
(dd(years=1), dd(years=-1), dd(years=2)),
(dd(years=2), dd(months=-3, days=-6), dd(years=2, months=3, days=6)),
(dd(years=2, months=1), dd(months=-2, days=-6), dd(years=2, months=3, days=6)),
(
dd(years=2, months=1, days=2),
dd(months=-2, days=-4),
dd(years=2, months=3, days=6),
),
# Negative deltas.
(dd(years=-2), dd(), dd(years=-2)),
(dd(), dd(months=3, days=6), dd(months=-3, days=-6)),
(dd(years=-1), dd(years=1), dd(years=-2)),
(dd(years=-2), dd(months=3, days=6), dd(years=-2, months=-3, days=-6)),
(
dd(years=-2, months=-1),
dd(months=2, days=6),
dd(years=-2, months=-3, days=-6),
),
(
dd(years=-2, months=-1, days=-2),
dd(months=2, days=4),
dd(years=-2, months=-3, days=-6),
),
# Supported mixed deltas.
(dd(years=2), dd(months=3, days=-6), dd(years=2, months=-3, days=6)),
(
dd(years=-2, months=1),
dd(months=-2, days=6),
dd(years=-2, months=3, days=-6),
),
(
dd(years=2, months=1, days=-2),
dd(months=-2, days=4),
dd(years=2, months=3, days=-6),
),
],
)
def test_subtract_datedelta(delta_1, delta_2, delta):
assert delta_1 - delta_2 == delta
@pytest.mark.parametrize(
("delta_1", "delta_2"),
[
# Unsupported mixed deltas.
(dd(years=3), dd(years=1)),
(dd(years=2, months=5), dd(months=2, days=6)),
(dd(years=2, months=1, days=-10), dd(months=-2, days=-4)),
],
)
def test_subtract_unsupported_datedelta(delta_1, delta_2):
with pytest.raises(ValueError) as exc:
delta_1 - delta_2
assert "cannot subtract datedeltas with same signs" in str(exc.value)
@pytest.mark.parametrize(
("delta_1", "count", "delta"),
[
(dd(), 0, dd()),
(dd(), 1, dd()),
(dd(), 2, dd()),
(dd(), -1, dd()),
(dd(years=1), 0, dd()),
(dd(years=1), 1, dd(years=1)),
(dd(years=1), 2, dd(years=2)),
(dd(years=1), -1, dd(years=-1)),
(dd(years=2, months=3, days=6), 0, dd()),
(dd(years=2, months=3, days=6), 1, dd(years=2, months=3, days=6)),
(dd(years=2, months=3, days=6), 2, dd(years=4, months=6, days=12)),
(dd(years=2, months=3, days=6), -1, dd(years=-2, months=-3, days=-6)),
],
)
def test_multiply_integer(delta_1, count, delta):
assert delta_1 * count == delta
assert count * delta_1 == delta
@pytest.mark.parametrize(
("date_1", "delta", "date_2"),
[
(d(2020, 1, 1), dd(), d(2020, 1, 1)),
(d(2020, 2, 29), dd(), d(2020, 2, 29)),
(d(2020, 3, 1), dd(), d(2020, 3, 1)),
(d(2020, 12, 31), dd(), d(2020, 12, 31)),
(d(2020, 1, 1), dd(years=1), d(2021, 1, 1)),
(d(2020, 2, 29), dd(years=1), d(2021, 3, 1)),
(d(2020, 3, 1), dd(years=1), d(2021, 3, 1)),
(d(2020, 12, 31), dd(years=1), d(2021, 12, 31)),
(d(2020, 1, 1), dd(months=12), d(2021, 1, 1)),
(d(2020, 2, 29), dd(months=12), d(2021, 3, 1)),
(d(2020, 3, 1), dd(months=12), d(2021, 3, 1)),
(d(2020, 12, 31), dd(months=12), d(2021, 12, 31)),
(d(2020, 1, 1), dd(days=365), d(2020, 12, 31)),
(d(2020, 2, 29), dd(days=365), d(2021, 2, 28)),
(d(2020, 3, 1), dd(days=365), d(2021, 3, 1)),
(d(2020, 12, 31), dd(days=365), d(2021, 12, 31)),
(d(2020, 1, 1), dd(years=1, days=-10), d(2020, 12, 22)),
(d(2020, 2, 29), dd(years=1, days=-10), d(2021, 2, 19)),
(d(2020, 3, 1), dd(years=1, days=-10), d(2021, 2, 19)),
(d(2020, 12, 31), dd(years=1, days=-10), d(2021, 12, 21)),
(d(2021, 1, 1), dd(years=-1), d(2020, 1, 1)),
(d(2021, 2, 28), dd(years=-1), d(2020, 2, 28)),
(d(2021, 3, 1), dd(years=-1), d(2020, 3, 1)),
(d(2021, 12, 31), dd(years=-1), d(2020, 12, 31)),
(d(2021, 1, 1), dd(months=-12), d(2020, 1, 1)),
(d(2021, 2, 28), dd(months=-12), d(2020, 2, 28)),
(d(2021, 3, 1), dd(months=-12), d(2020, 3, 1)),
(d(2021, 12, 31), dd(months=-12), d(2020, 12, 31)),
(d(2021, 1, 1), dd(days=-365), d(2020, 1, 2)),
(d(2021, 2, 28), dd(days=-365), d(2020, 2, 29)),
(d(2021, 3, 1), dd(days=-365), d(2020, 3, 1)),
(d(2021, 12, 31), dd(days=-365), d(2020, 12, 31)),
(d(2021, 1, 1), dd(years=-1, days=10), d(2020, 1, 11)),
(d(2021, 2, 28), dd(years=-1, days=10), d(2020, 3, 9)),
(d(2021, 3, 1), dd(years=-1, days=10), d(2020, 3, 11)),
(d(2021, 12, 31), dd(years=-1, days=10), d(2021, 1, 10)),
],
)
def test_add_datedelta_to_date(date_1, delta, date_2):
assert date_1 + delta == date_2
@pytest.mark.parametrize(
("date_1", "delta", "date_2"),
[
(d(2020, 1, 1), dd(), d(2020, 1, 1)),
(d(2020, 2, 29), dd(), d(2020, 2, 29)),
(d(2020, 3, 1), dd(), d(2020, 3, 1)),
(d(2020, 12, 31), dd(), d(2020, 12, 31)),
(d(2021, 1, 1), dd(years=1), d(2020, 1, 1)),
(d(2021, 2, 28), dd(years=1), d(2020, 2, 28)),
(d(2021, 3, 1), dd(years=1), d(2020, 3, 1)),
(d(2021, 12, 31), dd(years=1), d(2020, 12, 31)),
(d(2021, 1, 1), dd(months=12), d(2020, 1, 1)),
(d(2021, 2, 28), dd(months=12), d(2020, 2, 28)),
(d(2021, 3, 1), dd(months=12), d(2020, 3, 1)),
(d(2021, 12, 31), dd(months=12), d(2020, 12, 31)),
(d(2021, 1, 1), dd(days=365), d(2020, 1, 2)),
(d(2021, 2, 28), dd(days=365), d(2020, 2, 29)),
(d(2021, 3, 1), dd(days=365), d(2020, 3, 1)),
(d(2021, 12, 31), dd(days=365), d(2020, 12, 31)),
(d(2021, 1, 1), dd(years=1, days=-10), d(2020, 1, 11)),
(d(2021, 2, 28), dd(years=1, days=-10), d(2020, 3, 9)),
(d(2021, 3, 1), dd(years=1, days=-10), d(2020, 3, 11)),
(d(2021, 12, 31), dd(years=1, days=-10), d(2021, 1, 10)),
(d(2020, 1, 1), dd(years=-1), d(2021, 1, 1)),
(d(2020, 2, 29), dd(years=-1), d(2021, 3, 1)),
(d(2020, 3, 1), dd(years=-1), d(2021, 3, 1)),
(d(2020, 12, 31), dd(years=-1), d(2021, 12, 31)),
(d(2020, 1, 1), dd(months=-12), d(2021, 1, 1)),
(d(2020, 2, 29), dd(months=-12), d(2021, 3, 1)),
(d(2020, 3, 1), dd(months=-12), d(2021, 3, 1)),
(d(2020, 12, 31), dd(months=-12), d(2021, 12, 31)),
(d(2020, 1, 1), dd(days=-365), d(2020, 12, 31)),
(d(2020, 2, 29), dd(days=-365), d(2021, 2, 28)),
(d(2020, 3, 1), dd(days=-365), d(2021, 3, 1)),
(d(2020, 12, 31), dd(days=-365), d(2021, 12, 31)),
(d(2020, 1, 1), dd(years=-1, days=10), d(2020, 12, 22)),
(d(2020, 2, 29), dd(years=-1, days=10), d(2021, 2, 19)),
(d(2020, 3, 1), dd(years=-1, days=10), d(2021, 2, 19)),
(d(2020, 12, 31), dd(years=-1, days=10), d(2021, 12, 21)),
],
)
def test_subtract_datedelta_from_date(date_1, delta, date_2):
assert date_1 - delta == date_2
@pytest.mark.parametrize(
("delta", "minus_delta"),
[
(dd(), dd()),
(dd(years=2), dd(years=-2)),
(dd(months=3, days=6), dd(months=-3, days=-6)),
(dd(years=2, months=3, days=6), dd(years=-2, months=-3, days=-6)),
(dd(years=-2), dd(years=2)),
(dd(months=-3, days=-6), dd(months=3, days=6)),
(dd(years=-2, months=-3, days=-6), dd(years=2, months=3, days=6)),
(dd(years=2, months=-3, days=6), dd(years=-2, months=3, days=-6)),
],
)
def test_minus_datedelta(delta, minus_delta):
assert -delta == minus_delta
@pytest.mark.parametrize(
"delta",
[
dd(),
dd(years=2),
dd(months=3, days=6),
dd(years=2, months=3, days=6),
dd(years=-2),
dd(months=-3, days=-6),
dd(years=-2, months=-3, days=-6),
dd(years=2, months=-3, days=6),
],
)
def test_plus_datedelta(delta):
assert +delta == delta
@pytest.mark.parametrize(
("delta", "other"),
[
(dd(), None),
(dd(), 0),
(dd(), "a"),
(dd(), []),
(dd(), d.today()),
],
)
def test_add_or_subtract_unsupported_type(delta, other):
with pytest.raises(TypeError):
delta + other
with pytest.raises(TypeError):
delta - other
@pytest.mark.parametrize(
("delta", "other"),
[
(dd(), None),
(dd(), 0),
(dd(), "a"),
(dd(), []),
],
)
def test_add_to_or_subtract_from_unsupported_type(delta, other):
with pytest.raises(TypeError):
other + delta
with pytest.raises(TypeError):
other - delta
@pytest.mark.parametrize(
("delta", "other"),
[
(dd(), None),
(dd(), "a"),
(dd(), []),
(dd(), dd()),
],
)
def test_multiply_unsupported_type(delta, other):
with pytest.raises(TypeError):
delta * other
with pytest.raises(TypeError):
other * delta
| python | BSD-3-Clause | 82df8cc33b270fe5b06870b2ca2591faf1d3241c | 2026-01-05T07:13:33.593315Z | false |
aaugustin/datedelta | https://github.com/aaugustin/datedelta/blob/82df8cc33b270fe5b06870b2ca2591faf1d3241c/datedelta.py | datedelta.py | import datetime
class datedelta:
__slots__ = ["_years", "_months", "_days"]
def __init__(self, *, years=0, months=0, days=0):
int_years = int(years)
int_months = int(months)
int_days = int(days)
if int_years != years:
raise ValueError("years must be an integer value")
if int_months != months:
raise ValueError("months must be an integer value")
if int_days != days:
raise ValueError("days must be an integer value")
self._years = int_years
self._months = int_months
self._days = int_days
# datedelta must be immutable to be hashable.
@property
def years(self):
return self._years
@property
def months(self):
return self._months
@property
def days(self):
return self._days
def __repr__(self):
args = []
if self._years != 0:
args.append(f"years={self._years}")
if self._months != 0:
args.append(f"months={self._months}")
if self._days != 0:
args.append(f"days={self._days}")
return f"datedelta.datedelta({', '.join(args)})"
def __str__(self):
bits = []
if self._years != 0:
bits.append(f"{self._years} year{_s(self._years)}")
if self._months != 0:
bits.append(f"{self._months} month{_s(self._months)}")
if self._days != 0:
bits.append(f"{self._days} day{_s(self._days)}")
return ", ".join(bits) or "0 days"
def __eq__(self, other):
if isinstance(other, datedelta):
return (
self._years == other._years
and self._months == other._months
and self._days == other._days
)
return NotImplemented
def __ne__(self, other):
if isinstance(other, datedelta):
return (
self._years != other._years
or self._months != other._months
or self._days != other._days
)
return NotImplemented
def __hash__(self):
return hash((self._years, self._months, self._days))
def __add__(self, other):
if isinstance(other, datedelta):
if (
self._years * other._years >= 0
and self._months * other._months >= 0
and self._days * other._days >= 0
):
return self.__class__(
years=self._years + other._years,
months=self._months + other._months,
days=self._days + other._days,
)
else:
raise ValueError("cannot add datedeltas with opposite signs")
return NotImplemented
def __sub__(self, other):
if isinstance(other, datedelta):
if (
self._years * other._years <= 0
and self._months * other._months <= 0
and self._days * other._days <= 0
):
return self.__class__(
years=self._years - other._years,
months=self._months - other._months,
days=self._days - other._days,
)
else:
raise ValueError("cannot subtract datedeltas with same signs")
return NotImplemented
def __mul__(self, other):
if isinstance(other, int):
return self.__class__(
years=self._years * other,
months=self._months * other,
days=self._days * other,
)
return NotImplemented
def __radd__(self, other):
# This also matches subclasseses such as datetime.datetime. We leave it
# up to users to figure out whether that makes sense in their use case.
if isinstance(other, datetime.date):
year = other.year
month = other.month
day = other.day
# Add years.
if self._years:
year += self._years
# Adjust the month and day if the target day doesn't exist.
if day > _days_in_month(year, month):
# This branch is never taken when month == 12 because day is
# always in 1..31 and because December has 31 days.
month += 1
day = 1
# Add months.
if self._months:
month += self._months
# Adjust the year if the target month isn't in 1..12.
dyear, month0 = divmod(month - 1, 12)
year += dyear
month = month0 + 1
# Adjust the month and day if the target day doesn't exist.
if day > _days_in_month(year, month):
# This branch is never taken when month == 12 because day is
# always in 1..31 and because December has 31 days.
month += 1
day = 1
result = other.replace(year, month, day)
# Add days.
if self._days:
result += datetime.timedelta(days=self._days)
return result
return NotImplemented
def __rsub__(self, other):
# This also matches subclasseses such as datetime.datetime. We leave it
# up to users to figure out whether that makes sense in their use case.
if isinstance(other, datetime.date):
year = other.year
month = other.month
day = other.day
# Subtract years.
if self._years:
year -= self._years
# Adjust the month and day if the target day doesn't exist.
if day > _days_in_month(year, month):
# This branch is never taken when month == 12 because day is
# always in 1..31 and because December has 31 days.
month += 1
day = 1
# Subtract months.
if self._months:
month -= self._months
# Adjust the year if the target month isn't in 1..12.
dyear, month0 = divmod(month - 1, 12)
year += dyear
month = month0 + 1
# Adjust the month and day if the target day doesn't exist.
if day > _days_in_month(year, month):
# This branch is never taken when month == 12 because day is
# always in 1..31 and because December has 31 days.
month += 1
day = 1
result = other.replace(year, month, day)
# Subtract days.
if self._days:
result -= datetime.timedelta(days=self._days)
return result
return NotImplemented
__rmul__ = __mul__
def __neg__(self):
return self.__class__(
years=-self._years,
months=-self._months,
days=-self._days,
)
def __pos__(self):
return self
# Optimize pickling.
def __getstate__(self):
return self._years, self._months, self._days
def __setstate__(self, state):
self._years, self._months, self._days = state
# Public constants for convenience.
YEAR = datedelta(years=1)
MONTH = datedelta(months=1)
WEEK = datedelta(days=7)
DAY = datedelta(days=1)
# There's a private implementation of the same logic in the datetime module.
_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def _days_in_month(year, month):
assert 1 <= month <= 12
# Inline definition of calendar.isleap(year) for clarity and performance.
if month == 2 and (year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)):
return 29
return _DAYS_IN_MONTH[month]
def _s(value):
return "" if abs(value) == 1 else "s"
| python | BSD-3-Clause | 82df8cc33b270fe5b06870b2ca2591faf1d3241c | 2026-01-05T07:13:33.593315Z | false |
aantonop/wifiportal21 | https://github.com/aantonop/wifiportal21/blob/73c6e1120eeeb2b4a1c1684bc61062ab77fde85f/setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='wifiportal21',
version = "0.1",
packages=['wifiportal21'],
package_data={'wifiportal21': ['templates/*','static/*']},
include_package_data=True,
license='http://opensource.org/licenses/MIT',
author='Andreas M. Antonopoulos',
url='http://antonopoulos.com',
author_email='andreas@antonopoulos.com',
description='A WiFi-for-bitcoin captive portal and authentication server for use with wifidog hotspots',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'two1',
'flask',
'qrcode',
'flask_sqlalchemy',
],
entry_points={
'console_scripts': [
'wifiportal21=wifiportal21.auth_server:run_server',
],
},
)
| python | MIT | 73c6e1120eeeb2b4a1c1684bc61062ab77fde85f | 2026-01-05T07:13:34.315735Z | false |
aantonop/wifiportal21 | https://github.com/aantonop/wifiportal21/blob/73c6e1120eeeb2b4a1c1684bc61062ab77fde85f/config.py | config.py | receiving_key = "xpub6F8dWKbomfy7qmQ9Ma16SAwL3H9xMyaEjAfsEhtRjt5Bx3MFHTgDjvp4eZfUZES4i4AgaVGzVPyCKbSufdVsFvfR4wNjKRGraJrv5nLVs4h" # m/44'/0'/0'/0
SATOSHIS_PER_MINUTE = 2000
| python | MIT | 73c6e1120eeeb2b4a1c1684bc61062ab77fde85f | 2026-01-05T07:13:34.315735Z | false |
aantonop/wifiportal21 | https://github.com/aantonop/wifiportal21/blob/73c6e1120eeeb2b4a1c1684bc61062ab77fde85f/wifiportal21/__init__.py | wifiportal21/__init__.py | python | MIT | 73c6e1120eeeb2b4a1c1684bc61062ab77fde85f | 2026-01-05T07:13:34.315735Z | false | |
aantonop/wifiportal21 | https://github.com/aantonop/wifiportal21/blob/73c6e1120eeeb2b4a1c1684bc61062ab77fde85f/wifiportal21/auth_server.py | wifiportal21/auth_server.py | #!/usr/bin/env python
import logging
import requests
import flask
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import request, flash, redirect, render_template, url_for
from flask import jsonify
from two1.lib.bitcoin.crypto import HDKey, HDPublicKey
from two1.lib.wallet.hd_account import HDAccount
from two1.lib.wallet.cache_manager import CacheManager
from two1.lib.blockchain.twentyone_provider import TwentyOneProvider
import qrcode
import base64
import io
import uuid
# change the receiving_key in config.py in the root folder.
from config import receiving_key, SATOSHIS_PER_MINUTE
# logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.ERROR)
auth_app = Flask(__name__, static_folder='static')
auth_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/wifiportal21.db'
db = SQLAlchemy(auth_app)
K = HDPublicKey.from_b58check(receiving_key)
blockchain_provider = TwentyOneProvider()
cache = CacheManager()
receiving_account = HDAccount(hd_key=K, name="hotspot receiving", index=0,data_provider=blockchain_provider, cache_manager=cache)
SATOSHIS_PER_MBTC = 100*10**3
SATOSHIS_PER_BTC = 100*10**6
STATUS_NONE = 0
STATUS_PAYREQ = 1
STATUS_PAID = 2
class Guest(db.Model):
uuid = db.Column(db.String, primary_key=True)
mac = db.Column(db.String(17), unique=True)
address = db.Column(db.String(40), unique=True)
status = db.Column(db.Integer())
minutes = db.Column(db.Integer())
def __init__(self, uuid, mac):
self.uuid = uuid
self.mac = mac
self.address = None
self.status = STATUS_NONE
self.minutes = -1
def __repr__(self):
return "UUID: {0}\nMAC: {1}\nStatus: {2}\nAddress: {3}\nMinutes: {4}".format(self.uuid,self.mac, self.status, self.address, self.minutes)
db.create_all()
@auth_app.route('/wifidog/login/', methods=[ 'GET', 'POST' ])
def client_login():
gw_address = flask.request.args.get('gw_address')
gw_port = flask.request.args.get('gw_port')
success_URL = flask.request.args.get('url')
token = uuid.uuid4()
auth_URL = "http://{0}:{1}/wifidog/auth?token={2}".format(gw_address, gw_port, token)
price = "The cost of this service is {0:1.6f} BTC, or {1:1.2f} mBTC or {2:,} satoshis per minute".format(SATOSHIS_PER_MINUTE/SATOSHIS_PER_BTC, SATOSHIS_PER_MINUTE/SATOSHIS_PER_MBTC, SATOSHIS_PER_MINUTE)
portal_html = render_template('portal.html', auth_URL=auth_URL, token=token, price=price, success_URL=success_URL)
return portal_html
@auth_app.route('/wifidog/auth/')
def client_auth():
stage = flask.request.args.get('stage')
mac = flask.request.args.get('mac')
uuid = flask.request.args.get('token')
guest = Guest.query.filter_by(mac=mac).first()
if guest: # Existing Guest
if guest.uuid != uuid: # Old UUID, update it
# print("Found existing under different uuid {0}".format(guest.uuid))
guest.uuid = uuid # Update UUID in guest
if guest.status == STATUS_PAID and guest.minutes <= 0: # Old guest without balance
guest.status = STATUS_PAYREQ
db.session.commit()
else: # New Guest
guest = Guest(uuid,mac)
db.session.add(guest)
db.session.commit()
if stage == "login":
if guest.status == STATUS_NONE:
return ("Auth: -1" , 200) # Auth - Invalid
elif guest.status == STATUS_PAID:
if guest.minutes > 0:
return("Auth: 1", 200) # Paid, give access!
else:
guest.status == STATUS_NONE
return ("Auth: -1" , 200) # Auth - Invalid
elif guest.status == STATUS_PAYREQ:
return ("Auth: -1" , 200) # Auth - Invalid
elif stage == "counters":
guest = Guest.query.filter_by(uuid=uuid).first()
if guest.minutes > 0:
guest.minutes -= 1
db.session.commit()
print("Guest accounting, {0} minutes remain".format(guest.minutes))
return("Auth: 1", 200) # Paid, give access!
else:
# print("Guest {0} not yet paid".format(uuid))
if guest.status == STATUS_PAID: # No more minutes left, restart payment request
guest.status = STATUS_PAYREQ
return ("Auth: 0" , 200) # Auth - Invalid
else:
raise Exception("Unknown authorization stage {0}".format(stage))
@auth_app.route('/auth_status')
def auth_status():
uuid = flask.request.args.get('token')
guest = Guest.query.filter_by(uuid=uuid).first()
if not guest:
# print("Unregistered guest {0}".format(uuid))
return "Must register first", 404
try:
# print("Returning status {0} for {1}".format(guest.status, guest.uuid))
status_response = { 'status' : guest.status }
return flask.json.dumps(status_response)
except:
raise Exception("Error finding guest status {0}".format(uuid))
def inline_base64_qrcode(address):
qr = qrcode.make("bitcoin:{0}".format(address), error_correction=qrcode.constants.ERROR_CORRECT_L)
output = io.BytesIO()
qr.save(output,'PNG')
output.seek(0)
qr_base64 = base64.b64encode(output.read()).decode()
return qr_base64
def get_unconfirmed_balance(address):
r = requests.get('https://blockchain.info/unspent?active={0}'.format(address))
# print("Checking balance for {0}".format(address))
balance = 0
if r.status_code == 200:
utxo_response = r.json()
if 'unspent_outputs' in utxo_response:
for utxo in utxo_response['unspent_outputs']:
if 'value' in utxo:
balance += utxo['value']
# print("Balance for {0} is {1}".format(address, balance))
return balance
elif r.status_code == 500: # No UTXO to spend
return balance
else:
raise Exception("Error checking balance, unexpected HTTP code: {0} {1}".format(r.status_code, r.text))
@auth_app.route('/static/<path:path>')
@auth_app.route('/js/<path:path>')
def static_jquery(path):
return flask.send_from_directory(auth_app.static_folder, path)
@auth_app.route('/get_payment_address')
def get_payment_address():
uuid = flask.request.args.get('token')
guest = Guest.query.filter_by(uuid=uuid).first()
if guest.status == STATUS_NONE or guest.status == STATUS_PAYREQ:
guest.status = STATUS_PAYREQ
if not guest.address:
new_address = receiving_account.get_address(False)
guest.address = new_address
db.session.commit()
qr = inline_base64_qrcode(guest.address)
response = {'address': guest.address, 'qr': qr}
return flask.json.dumps(response), 200
else:
return('must register first', 404)
@auth_app.route('/check_payment')
def check_payment():
uuid = flask.request.args.get('token')
guest = Guest.query.filter_by(uuid=uuid).first()
# assert guest
# assert guest.status == STATUS_PAYREQ
# assert guest.address
address = guest.address
unconf_balance = get_unconfirmed_balance(address)
if unconf_balance > 0: # Payment detected on this address
guest.status = STATUS_PAID
minutes = unconf_balance // SATOSHIS_PER_MINUTE
# assert minutes > 0
# print("Allocating {0} stoshis, {1} minutes to guest {2}".format(unconf_balance,minutes, uuid))
guest.minutes = minutes
db.session.commit()
return("Payment received", 200)
else:
return("Waiting for payment", 402)
@auth_app.route('/wifidog/ping/')
def gw_ping():
# print(db.session.query(Guest).all())
return ('Pong', 200)
def run_server(host='0.0.0.0', port=21142):
auth_app.run(host=host, port=port)
if __name__ == '__main__':
run_server()
| python | MIT | 73c6e1120eeeb2b4a1c1684bc61062ab77fde85f | 2026-01-05T07:13:34.315735Z | false |
CuriousLearner/GeeksForGeeksScrapper | https://github.com/CuriousLearner/GeeksForGeeksScrapper/blob/1b683913d9eb3a1f60140920c33c46afc9c3de79/g4g.py | g4g.py | #!/usr/bin/python
import requests
from os import system
from sys import exit
from time import sleep
from requests.exceptions import ConnectionError
from bs4 import BeautifulSoup
from article import Article
BASE_URL = "http://www.geeksforgeeks.org/"
articles = []
CHOICE_TO_CATEGORY_MAPPING = {
1: "c",
2: "c-plus-plus",
3: "java",
4: "python",
5: "fundamentals-of-algorithms",
6: "data-structures",
}
def display_menu():
print("Choose category to scrape: ")
print("1. C Language")
print("2. C++ Language")
print("3. Java")
print("4. Python")
print("5. Algorithms")
print("6. Data Structures")
def get_category_choice():
choice = int(raw_input("Enter choice: "))
try:
category_url = CHOICE_TO_CATEGORY_MAPPING[choice]
except KeyError:
print("Wrong Choice Entered. Exiting!")
exit(1)
return category_url
def save_articles_as_html_and_pdf():
print("All links scraped, extracting articles")
# Formatting the html for articles
all_articles = (
"<!DOCTYPE html>"
"<html><head>"
'<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />'
'<link rel="stylesheet" href="style.min.css" type="text/css" media="all" />'
'<script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script>'
"</head><body>"
)
all_articles += (
'<h1 style="text-align:center;font-size:40px">'
+ category_url.title()
+ " Archive</h1><hr>"
)
all_articles += '<h1 style="padding-left:5%;font-size:200%;">Index</h1><br/>'
for x in range(len(articles)):
all_articles += (
'<a href ="#'
+ str(x + 1)
+ '">'
+ '<h1 style="padding-left:5%;font-size:20px;">'
+ str(x + 1)
+ ".\t\t"
+ articles[x].title
+ "</h1></a> <br/>"
)
for x in range(len(articles)):
all_articles += (
'<hr id="' + str(x + 1) + '">' + articles[x].content.decode("utf-8")
)
all_articles += """</body></html>"""
html_file_name = "G4G_" + category_url.title() + ".html"
html_file = open(html_file_name, "w")
html_file.write(all_articles.encode("utf-8"))
html_file.close()
pdf_file_name = "G4G_" + category_url.title() + ".pdf"
print("Generating PDF " + pdf_file_name)
html_to_pdf_command = "wkhtmltopdf " + html_file_name + " " + pdf_file_name
system(html_to_pdf_command)
def scrape_category(category_url):
try:
soup = BeautifulSoup(requests.get(BASE_URL + category_url).text)
except ConnectionError:
print("Couldn't connect to Internet! Please check your connection & Try again.")
exit(1)
# Selecting links which are in the category page
links = [a.attrs.get("href") for a in soup.select("article li a")]
# Removing links for the categories with anchor on same page
links = [link for link in links if not link.startswith("#")]
print("Found: " + str(len(links)) + " links")
i = 1
# Traverse each link to find article and save it.
for link in links:
try:
if i % 10 == 0:
sleep(5) # Sleep for 5 seconds before scraping every 10th link
link = link.strip()
print("Scraping link no: " + str(i) + " Link: " + link)
i += 1
link_soup = BeautifulSoup(requests.get(link).text)
# Remove the space occupied by Google Ads (Drop script & ins node)
[script.extract() for script in link_soup(["script", "ins"])]
for code_tag in link_soup.find_all("pre"):
code_tag["class"] = code_tag.get("class", []) + ["prettyprint"]
article = link_soup.find("article")
# Now add this article to list of all articles
page = Article(
title=link_soup.title.string, content=article.encode("UTF-8")
)
articles.append(page)
# Sometimes hanging. So Ctrl ^ C, and try the next link.
# Find out the reason & improve this.
except KeyboardInterrupt:
continue
except ConnectionError:
print("Internet disconnected! Please check your connection & Try again.")
if articles:
print("Making PDF of links scraped till now.")
break
else:
exit(1)
if __name__ == "__main__":
display_menu()
category_url = get_category_choice()
scrape_category(category_url)
save_articles_as_html_and_pdf()
| python | MIT | 1b683913d9eb3a1f60140920c33c46afc9c3de79 | 2026-01-05T07:13:34.983476Z | false |
CuriousLearner/GeeksForGeeksScrapper | https://github.com/CuriousLearner/GeeksForGeeksScrapper/blob/1b683913d9eb3a1f60140920c33c46afc9c3de79/article.py | article.py | class Article(object):
""" This Class Contains the title and the content of the article.
The title can be use as key to the article link for the navigation purpose. """
def __init__(self, title, content):
self.title = title
self.content = content
| python | MIT | 1b683913d9eb3a1f60140920c33c46afc9c3de79 | 2026-01-05T07:13:34.983476Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/client.py | exps/android_exp/client.py | from gradio_client import Client
from PIL import Image
from env import AndroidAction, ActionType
from typing import Dict, Union
from time import sleep
from abc import ABC, abstractmethod
class AbstractAgent(ABC):
@abstractmethod
def act(self, task:str, image_path:str)->Union[AndroidAction, Dict]:
pass
class CogAgent:
def __init__(self, url, temp=0.8, top_p=0.4, top_k=10):
self.client = Client(url)
self.temp = temp
self.top_p = top_p
self.top_k = top_k
def predict(self, text:str, image_path:str)->str:
for _ in range(3):
try:
out = self.client.predict(text, image_path, self.temp, self.top_p, self.top_k)
break
except:
sleep(1)
return out
def act(self, task:str, image_path:str)->Union[AndroidAction, Dict]:
prompt = f'What steps do I need to take to "{task}"?(with grounding)'
out = self.predict(prompt, image_path)
return self._translate_action(out), {"prompt": prompt, "output": out}
def _translate_action(self, raw_action):
raw_action = raw_action.split('Grounded Operation:')[1]
try:
action = raw_action.split(" ")[0]
if action == 'tap':
numbers = raw_action.split('[[')[1].split(',')
x = int(numbers[0])
y = int(numbers[1].split(']]')[0])
touch_point = (x/1000, y/1000)
return AndroidAction(action_type=ActionType.DualPoint, touch_point=touch_point, lift_point=touch_point)
elif "type" in action:
text = raw_action.split('"')[1]
return AndroidAction(action_type=ActionType.Type, typed_text=text)
elif "press home" in raw_action:
return AndroidAction(action_type=ActionType.GoHome)
elif "press back" in raw_action:
return AndroidAction(action_type=ActionType.GoBack)
elif "press enter" in raw_action:
return AndroidAction(action_type=ActionType.Enter)
elif "task complete" in raw_action:
return AndroidAction(action_type=ActionType.TaskComplete)
elif "task impossible" in raw_action:
return AndroidAction(action_type=ActionType.TaskImpossible)
elif "swipe up" in raw_action:
return AndroidAction(action_type=ActionType.DualPoint, touch_point=(0.5, 0.5), lift_point=(0.5, 0.2))
elif "swipe down" in raw_action:
return AndroidAction(action_type=ActionType.DualPoint, touch_point=(0.5, 0.2), lift_point=(0.5, 0.5))
elif "swipe left" in raw_action:
return AndroidAction(action_type=ActionType.DualPoint, touch_point=(0.8, 0.5), lift_point=(0.2, 0.5))
elif "swipe right" in raw_action:
return AndroidAction(action_type=ActionType.DualPoint, touch_point=(0.2, 0.5), lift_point=(0.8, 0.5))
else:
print(f"Action {raw_action} not supported yet.")
return AndroidAction(action_type=ActionType.Idle)
except Exception as e:
print(f"Action {raw_action} Parsing Error: {e}")
return AndroidAction(action_type=ActionType.Idle)
class AutoUI:
def __init__(self, url):
self.client = Client(url)
self.reset_history()
def predict(self, text:str, image_path:str)->str:
for _ in range(3):
try:
out = self.client.predict(text, image_path)
break
except:
sleep(1)
return out
@classmethod
def to_autoui(self, act: AndroidAction):
if act.action_type == ActionType.DualPoint:
return f'"action_type": "DUAL_POINT", "touch_point": "[{act.touch_point[1]:.4f}, {act.touch_point[0]:.4f}]", "lift_point": "[{act.lift_point[1]:.4f}, {act.lift_point[0]:.4f}]", "typed_text": ""'
elif act.action_type == ActionType.Type:
return f'"action_type": "TYPE", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": "{act.typed_text}"'
elif act.action_type == ActionType.GoBack:
return f'"action_type": "PRESS_BACK", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": ""'
elif act.action_type == ActionType.GoHome:
return f'"action_type": "PRESS_HOME", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": ""'
elif act.action_type == ActionType.Enter:
return f'"action_type": "PRESS_ENTER", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": ""'
elif act.action_type == ActionType.TaskComplete or act.action_type == ActionType.TaskImpossible:
return f'"action_type": "STATUS_TASK_COMPLETE", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": ""'
else:
print(f"Action {act} not supported yet.")
return ""
def act(self, task:str, image_path:str)->Union[AndroidAction, Dict]:
prompt = self.prepare_prompts(task)
out = self.predict(prompt, image_path)
translated_action = self._translate_action(out)
self.history_acts.append(translated_action)
return translated_action, {"prompt": prompt, "output": out}
def reset_history(self):
self.history_acts = []
def prepare_prompts(self, task:str):
prompt = "Previous Actions: "
for act in self.history_acts[-8:]:
prompt += f"{AutoUI.to_autoui(act)} "
prompt += f"Goal: {task}</s>"
return prompt
def _translate_action(self, out):
action_str = out.split("Action Decision: ")[1]
action_type, touch_point_1, touch_point_2, lift_point_1, lift_point_2, typed_text = action_str.split(", ")
touch_point = touch_point_1 + ", " + touch_point_2
lift_point = lift_point_1 + ", " + lift_point_2
try:
action_type = action_type.split(": ")[1].strip('"')
if action_type == 'DUAL_POINT':
touch_point_yx = touch_point.split(": ")[1].strip('[]"')
touch_point_yx = [float(num) for num in touch_point_yx.split(", ")]
lift_point_yx = lift_point.split(": ")[1].strip('[]"')
lift_point_yx = [float(num) for num in lift_point_yx.split(", ")]
return AndroidAction(action_type=ActionType.DualPoint, touch_point=touch_point_yx[::-1], lift_point=lift_point_yx[::-1])
elif action_type == 'TYPE':
text = typed_text.split(": ")[1].strip('"')
return AndroidAction(action_type=ActionType.Type, typed_text=text)
elif action_type == 'PRESS_HOME':
return AndroidAction(action_type=ActionType.GoHome)
elif action_type == 'PRESS_BACK':
return AndroidAction(action_type=ActionType.GoBack)
elif action_type == 'PRESS_ENTER':
return AndroidAction(action_type=ActionType.Enter)
elif action_type == 'STATUS_TASK_COMPLETE':
return AndroidAction(action_type=ActionType.TaskComplete)
elif action_type == 'TASK_IMPOSSIBLE':
return AndroidAction(action_type=ActionType.TaskImpossible)
else:
print(f"Action {out} not supported yet.")
return AndroidAction(action_type=ActionType.Idle)
except Exception as e:
print(f"Action {out} Parsing Error: {e}")
return AndroidAction(action_type=ActionType.Idle) | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/runner.py | exps/android_exp/runner.py | from env import AndroidEnv
from client import AbstractAgent
import time
import time
import uuid
import os
import json
from termcolor import colored, cprint
def create_human_friendly_uid():
current_time_str = time.strftime("%m%d_%H%M%S")
# Generate a random UUID
random_uuid = str(uuid.uuid4()).split("-")[0]
# Create a unique string combining the time and UUID
unique_string = f"{current_time_str}-{random_uuid}"
return unique_string
class Runner:
def __init__(self, task_prompt: str, env: AndroidEnv, agent: AbstractAgent, log_path: str, verbose: bool = True):
self.task_prompt = task_prompt
self.env = env
self.agent = agent
self.log_path = log_path
self.image_path = os.path.join(self.log_path, "images")
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
if not os.path.exists(self.image_path):
os.makedirs(self.image_path)
self.uid = create_human_friendly_uid()
self.current_step = 0
self.log = []
self.verbose = verbose
def _save_img(self, obs):
if obs:
img_path = os.path.join(self.image_path, f"{self.uid}_{self.current_step}.jpg")
obs.save(img_path)
return img_path
else:
return None
def _save_log(self):
with open(os.path.join(self.log_path, f"{self.uid}.json"), "w") as f:
json.dump(self.log, f, indent=2)
def run(self):
obs = self.env.reset()
if self.verbose:
cprint(f"Task: {self.task_prompt}", "blue")
while True:
img_path = self._save_img(obs)
cprint(f"Step {self.current_step}, Query the Agent...", "green")
action, actor_info = self.agent.act(self.task_prompt, img_path)
if self.verbose:
cprint(f"Agent: {actor_info}\n Actual Action: {action}", "blue")
next_obs, terminated, action_success, info = self.env.step(action)
this_step_info = {
"step": self.current_step,
"obs": img_path,
"action": str(action),
"actor_info": actor_info,
"action_success": action_success,
"info": info,
"task": self.task_prompt,
}
if self.verbose:
if action_success:
cprint(f"Action Success!", "green")
else:
cprint(f"Action Failed!\n{info}", "red")
# cprint(f"Info: {this_step_info}", "black")
self.log.append(this_step_info)
self._save_log()
self.current_step += 1
obs = next_obs
if terminated or not action_success:
with open(f"{self.log_path}/log.json", "r") as f:
log = json.load(f)
if self.task_prompt not in log:
log[self.task_prompt] = {}
log[self.task_prompt][self.uid] = "SUCCESS" if action_success else "FAILED"
with open(f"{self.log_path}/log.json", "w") as f:
json.dump(log, f, indent=2)
break | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/main.py | exps/android_exp/main.py | from env import AndroidEnv, AndroidAction, ActionType
from runner import Runner
from client import CogAgent, AutoUI
import argparse
import json
import os
def main(args):
if args.agent == "cogagent":
agent = CogAgent("https://6cbe60874cce4c4f3e<removed>/")
elif args.agent == "autoui-large":
agent = AutoUI("https://23e7c6268fe5a815b0<removed>/")
elif args.agent == "autoui-base":
# agent = AutoUI("https://b46720d1e607b54a73<removed>/")
agent = AutoUI("https://d6be99e10fc03a8204<removed>/")
else:
raise NotImplementedError(f"Agent {args.agent} not supported yet.")
task_prompt = args.task
output_dir = args.output_dir
if not os.path.exists(f"{output_dir}/log.json"):
with open(f"{output_dir}/log.json", "w") as f:
json.dump({}, f)
with open(f"{output_dir}/log.json") as f:
log = json.load(f)
if task_prompt in log:
for uid, status in log[task_prompt].items():
if status == "SUCCESS":
print(f'Task "{task_prompt}"" already completed.')
return
env = AndroidEnv(
avd_name="Pixel_4_API_33",
cache_avd_name="cache_Pixel_4_API_33",
android_avd_home="/home/<user>/.android/avd",
emulator_path="/home/<user>/Android/Sdk/emulator/emulator",
adb_path="/home/<user>/Android/Sdk/platform-tools/adb",
)
runner = Runner(task_prompt, env, agent, output_dir)
runner.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run an Android task.")
parser.add_argument("--task", type=str, help="Task prompt to run.")
parser.add_argument("--output_dir", type=str, help="output_path")
parser.add_argument("--agent", type=str, help="agent type")
args = parser.parse_args()
main(args)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/env.py | exps/android_exp/env.py | import os
import shutil
from dataclasses import dataclass
from typing import List, Tuple, Union
from enum import Enum
import subprocess, signal
import re
from time import sleep
from appium import webdriver
from appium.options.android import UiAutomator2Options
import base64
from PIL import Image
from io import BytesIO
from termcolor import colored, cprint
def escape_shell_text(text):
# List of characters to escape
chars_to_escape = ['\\','"', "'", '`', '$']
# Escape the characters by adding a backslash before them
for char in chars_to_escape:
text = text.replace(char, '\\' + char)
text = text.replace(" ", "%s")
return text
def kill_all_emulators(adb_path):
# Get the list of connected devices
result = subprocess.run([adb_path, 'devices'], stdout=subprocess.PIPE)
devices_output = result.stdout.decode('utf-8')
# Find all emulator device names using a regular expression
emulators = re.findall(r'emulator-\d+', devices_output)
# Shut down each emulator found
for emulator in emulators:
subprocess.run([adb_path, '-s', emulator, 'emu', 'kill'])
print(f'{emulator} has been shut down.')
if not emulators:
print("No running emulators found.")
def clone_avd(src_avd_name, tar_avd_name, android_avd_home):
"""
Clone the source AVD to the target AVD.
Parameters:
- src_avd_name: The name of the source AVD folder.
- tar_avd_name: The name of the target AVD folder.
- android_avd_home: The path to the .android/avd directory.
This function copies the source AVD folder and its .ini file to a new target AVD
and updates the paths inside the .ini files accordingly.
"""
# Paths for source and target AVD directories and .ini files
src_avd_dir = os.path.join(android_avd_home, src_avd_name + '.avd')
tar_avd_dir = os.path.join(android_avd_home, tar_avd_name + '.avd')
src_ini_file = os.path.join(android_avd_home, src_avd_name + '.ini')
tar_ini_file = os.path.join(android_avd_home, tar_avd_name + '.ini')
# Copy the AVD folder
shutil.copytree(src_avd_dir, tar_avd_dir)
# Copy the .ini file and modify it for the new AVD
with open(src_ini_file, 'r') as src_ini, open(tar_ini_file, 'w') as tar_ini:
for line in src_ini:
tar_ini.write(line.replace(src_avd_name, tar_avd_name))
# Update paths inside the target AVD's .ini files
for ini_name in ['config.ini', 'hardware-qemu.ini']:
ini_path = os.path.join(tar_avd_dir, ini_name)
if os.path.exists(ini_path):
with open(ini_path, 'r') as file:
lines = file.readlines()
with open(ini_path, 'w') as file:
for line in lines:
# Update paths and AVD name/ID
new_line = line.replace(src_avd_name, tar_avd_name)
file.write(new_line)
# Update the snapshots' hardware.ini file if it exists
snapshots_hw_ini = os.path.join(tar_avd_dir, 'snapshots', 'default_boot', 'hardware.ini')
if os.path.exists(snapshots_hw_ini):
with open(snapshots_hw_ini, 'r') as file:
lines = file.readlines()
with open(snapshots_hw_ini, 'w') as file:
for line in lines:
# Update AVD name/ID
new_line = line.replace(src_avd_name, tar_avd_name)
file.write(new_line)
class ActionType(Enum):
Idle=0
DualPoint=1
Type=2
GoBack=3
GoHome=4
Enter=5
TaskComplete=6
TaskImpossible=7
@dataclass
class AndroidAction():
action_type: ActionType
touch_point: Tuple[float, float] = None
lift_point: Tuple[float, float] = None
typed_text: str = None
def __str__(self):
# Construct the basic action type string.
components = [f"Action Type: {self.action_type.name}"]
# Format and add touch_point if it's not None.
if self.touch_point:
touch_point_str = f"({self.touch_point[0]:.4f}, {self.touch_point[1]:.4f})"
components.append(f"Touch Point: {touch_point_str}")
# Format and add lift_point if it's not None.
if self.lift_point:
lift_point_str = f"({self.lift_point[0]:.4f}, {self.lift_point[1]:.4f})"
components.append(f"Lift Point: {lift_point_str}")
# Add typed_text if it's not None.
if self.typed_text:
components.append(f"Typed Text: '{self.typed_text}'")
# Join all components into a single string.
return ", ".join(components)
def to_act(self):
pass
class AndroidEmulator():
def __init__(self, avd_name, max_steps, emulator_path="~/Android/Sdk/emulator/emulator", appium_server_url='http://localhost:4723'):
self.emulator_path = os.path.expanduser(emulator_path)
self.avd_name = avd_name
cprint(colored(f"Starting the Emulator", "green"))
self.emulator_process = subprocess.Popen(f"{self.emulator_path} -avd {self.avd_name} -no-boot-anim -gpu auto -no-snapshot-save -no-audio -netfast -partition-size 4096 -cache-size 4000", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
sleep(30)
capabilities = dict(
platformName='Android',
automationName='uiautomator2',
deviceName='Android',
newCommandTimeout="1200"
)
self.options = UiAutomator2Options().load_capabilities(capabilities)
self.appium_server_url = appium_server_url
self.driver = webdriver.Remote(self.appium_server_url, options=self.options)
self.terminated = False
self.max_steps = max_steps
self.steps = 0
screen_size = self.driver.get_window_size()
self.screen_size = (screen_size["width"], screen_size["height"])
def terminate(self):
self.emulator_process.terminate()
try:
self.emulator_process.wait(timeout=20)
except subprocess.TimeoutExpired:
self.emulator_process.kill()
self.emulator_process.wait()
def refresh_driver(self):
self.driver.quit()
self.driver = webdriver.Remote(self.appium_server_url, options=self.options)
def get_obs(self):
screenshot_str = self.driver.get_screenshot_as_base64()
imgdata = base64.b64decode(screenshot_str)
image = Image.open(BytesIO(imgdata))
# Assuming 'image' is your PIL Image object in RGBA mode
if image.mode == 'RGBA':
image = image.convert('RGB')
return image
def step(self, action: AndroidAction):
self.steps += 1
if self.steps > self.max_steps:
action = AndroidAction(action_type=ActionType.TaskImpossible)
cprint(colored(f"Terminate the Emulator: Max Steps Exceeded {self.max_steps}.", "red"))
if self.terminated:
raise Exception("The emulator is terminated.")
screenshot = None
info = {}
try:
if action.action_type == ActionType.DualPoint:
assert len(action.touch_point) == 2
assert len(action.lift_point) == 2
touch_x = action.touch_point[0] * self.screen_size[0]
touch_y = action.touch_point[1] * self.screen_size[1]
lift_x = action.lift_point[0] * self.screen_size[0]
lift_y = action.lift_point[1] * self.screen_size[1]
self.driver.swipe(touch_x, touch_y, lift_x, lift_y)
elif action.action_type == ActionType.Type:
# This doesn't work well because of active element
# element = self.driver.switch_to.active_element
# try:
# element.send_keys(action.typed_text)
# except Exception as e:
# cprint(f"Type Error: {e}", "red")
t = escape_shell_text(action.typed_text)
self.driver.execute_script('mobile: shell', {
'command': 'input',
'args': ['text', t],
'includeStderr': True,
'timeout': 5000
})
elif action.action_type == ActionType.GoBack:
self.driver.back()
elif action.action_type == ActionType.GoHome:
self.driver.press_keycode(3)
elif action.action_type == ActionType.Enter:
self.driver.press_keycode(66)
elif action.action_type == ActionType.TaskComplete:
self.terminated = True
elif action.action_type == ActionType.TaskImpossible:
self.terminated = True
elif action.action_type == ActionType.Idle:
pass
else:
raise Exception(f"Unknown action type: {action.action_type}")
action_success = True
screenshot = self.get_obs()
if self.terminated:
self.driver.quit()
self.terminate()
except Exception as e:
action_success = False
info["error"] = str(e)
self.driver.quit()
self.terminate()
return screenshot, self.terminated, action_success, info
class AndroidEnv():
"""
This class wraps around the android emulator and provides a more infrastructure for free-form GUI navigation
"""
def __init__(self,
avd_name,
cache_avd_name,
android_avd_home: str = '~/.android/avd',
emulator_path: str = '~/Android/Sdk/emulator/emulator',
adb_path: str = "~/Library/Android/sdk/platform-tools/adb",
run_headless: bool = False,
max_steps: int = 20):
self.android_avd_home = os.path.expanduser(android_avd_home)
self.emulator_path = os.path.expanduser(emulator_path)
self.adb_path = os.path.expanduser(adb_path)
self.avd_name = avd_name
self.cache_avd_name = cache_avd_name
self.run_headless = run_headless
self.max_steps = max_steps
def reset(self):
"""
Reset the emulator to a clean state
"""
# If the emulator is already running, kill it,
# Then delete the cache AVD
kill_all_emulators(self.adb_path)
if hasattr(self, "emulator_process"):
self.emulator_process.send_signal(signal.SIGINT)
self.emulator_process.wait()
cache_avd_path = os.path.join(self.android_avd_home, self.cache_avd_name + ".avd")
cache_avd_ini_path = os.path.join(self.android_avd_home, self.cache_avd_name + ".ini")
if os.path.exists(cache_avd_path):
shutil.rmtree(cache_avd_path)
if os.path.exists(cache_avd_ini_path):
os.remove(cache_avd_ini_path)
sleep(2)
# Clone the source AVD and start the emulator
clone_avd(self.avd_name, self.cache_avd_name, self.android_avd_home)
self.emulator = AndroidEmulator(avd_name=self.cache_avd_name, emulator_path=self.emulator_path, max_steps=self.max_steps)
return self.emulator.get_obs()
def step(self, action):
if not self.emulator:
raise Exception("Please call reset() before calling step()")
return self.emulator.step(action) | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/action_matching.py | exps/android_exp/models/Auto-UI/action_matching.py | '''
Adapted from https://github.com/google-research/google-research/tree/master/android_in_the_wild
'''
# import jax
# import jax.numpy as jnp
import numpy as np
import action_type as action_type_lib
_TAP_DISTANCE_THRESHOLD = 0.14 # Fraction of the screen
ANNOTATION_WIDTH_AUGMENT_FRACTION = 1.4
ANNOTATION_HEIGHT_AUGMENT_FRACTION = 1.4
# Interval determining if an action is a tap or a swipe.
_SWIPE_DISTANCE_THRESHOLD = 0.04
def _yx_in_bounding_boxes(
yx, bounding_boxes
):
"""Check if the (y,x) point is contained in each bounding box.
Args:
yx: The (y, x) coordinate in pixels of the point.
bounding_boxes: A 2D int array of shape (num_bboxes, 4), where each row
represents a bounding box: (y_top_left, x_top_left, box_height,
box_width). Note: containment is inclusive of the bounding box edges.
Returns:
is_inside: A 1D bool array where each element specifies if the point is
contained within the respective box.
"""
y, x = yx
# `bounding_boxes` has shape (n_elements, 4); we extract each array along the
# last axis into shape (n_elements, 1), then squeeze unneeded dimension.
top, left, height, width = [
jnp.squeeze(v, axis=-1) for v in jnp.split(bounding_boxes, 4, axis=-1)
]
# The y-axis is inverted for AndroidEnv, so bottom = top + height.
bottom, right = top + height, left + width
return jnp.logical_and(y >= top, y <= bottom) & jnp.logical_and(
x >= left, x <= right)
def _resize_annotation_bounding_boxes(
annotation_positions, annotation_width_augment_fraction,
annotation_height_augment_fraction):
"""Resize the bounding boxes by the given fractions.
Args:
annotation_positions: Array of shape (N, 4), where each row represents the
(y, x, height, width) of the bounding boxes.
annotation_width_augment_fraction: The fraction to augment the box widths,
E.g., 1.4 == 240% total increase.
annotation_height_augment_fraction: Same as described for width, but for box
height.
Returns:
Resized bounding box.
"""
height_change = (
annotation_height_augment_fraction * annotation_positions[:, 2])
width_change = (
annotation_width_augment_fraction * annotation_positions[:, 3])
# Limit bounding box positions to the screen.
resized_annotations = jnp.stack([
jnp.maximum(0, annotation_positions[:, 0] - (height_change / 2)),
jnp.maximum(0, annotation_positions[:, 1] - (width_change / 2)),
jnp.minimum(1, annotation_positions[:, 2] + height_change),
jnp.minimum(1, annotation_positions[:, 3] + width_change),
],
axis=1)
return resized_annotations
def is_tap_action(normalized_start_yx,
normalized_end_yx):
distance = jnp.linalg.norm(
jnp.array(normalized_start_yx) - jnp.array(normalized_end_yx))
return distance <= _SWIPE_DISTANCE_THRESHOLD
def _is_non_dual_point_action(action_type):
return jnp.not_equal(action_type, action_type_lib.ActionType.DUAL_POINT)
def _check_tap_actions_match(
tap_1_yx,
tap_2_yx,
annotation_positions,
matching_tap_distance_threshold_screen_percentage,
annotation_width_augment_fraction,
annotation_height_augment_fraction,
):
"""Determines if two tap actions are the same."""
resized_annotation_positions = _resize_annotation_bounding_boxes(
annotation_positions,
annotation_width_augment_fraction,
annotation_height_augment_fraction,
)
# Check if the ground truth tap action falls in an annotation's bounding box.
tap1_in_box = _yx_in_bounding_boxes(tap_1_yx, resized_annotation_positions)
tap2_in_box = _yx_in_bounding_boxes(tap_2_yx, resized_annotation_positions)
both_in_box = jnp.max(tap1_in_box & tap2_in_box)
# If the ground-truth tap action falls outside any of the annotation
# bounding boxes or one of the actions is inside a bounding box and the other
# is outside bounding box or vice versa, compare the points using Euclidean
# distance.
within_threshold = (
jnp.linalg.norm(jnp.array(tap_1_yx) - jnp.array(tap_2_yx))
<= matching_tap_distance_threshold_screen_percentage
)
return jnp.logical_or(both_in_box, within_threshold)
def _check_drag_actions_match(
drag_1_touch_yx,
drag_1_lift_yx,
drag_2_touch_yx,
drag_2_lift_yx,
):
"""Determines if two drag actions are the same."""
# Store drag deltas (the change in the y and x coordinates from touch to
# lift), magnitudes, and the index of the main axis, which is the axis with
# the greatest change in coordinate value (e.g. a drag starting at (0, 0) and
# ending at (0.3, 0.5) has a main axis index of 1).
drag_1_deltas = drag_1_lift_yx - drag_1_touch_yx
drag_1_magnitudes = jnp.abs(drag_1_deltas)
drag_1_main_axis = np.argmax(drag_1_magnitudes)
drag_2_deltas = drag_2_lift_yx - drag_2_touch_yx
drag_2_magnitudes = jnp.abs(drag_2_deltas)
drag_2_main_axis = np.argmax(drag_2_magnitudes)
return jnp.equal(drag_1_main_axis, drag_2_main_axis)
def check_actions_match(
action_1_touch_yx,
action_1_lift_yx,
action_1_action_type,
action_2_touch_yx,
action_2_lift_yx,
action_2_action_type,
annotation_positions,
tap_distance_threshold = _TAP_DISTANCE_THRESHOLD,
annotation_width_augment_fraction = ANNOTATION_WIDTH_AUGMENT_FRACTION,
annotation_height_augment_fraction = ANNOTATION_HEIGHT_AUGMENT_FRACTION,
):
"""Determines if two actions are considered to be the same.
Two actions being "the same" is defined here as two actions that would result
in a similar screen state.
Args:
action_1_touch_yx: The (y, x) coordinates of the first action's touch.
action_1_lift_yx: The (y, x) coordinates of the first action's lift.
action_1_action_type: The action type of the first action.
action_2_touch_yx: The (y, x) coordinates of the second action's touch.
action_2_lift_yx: The (y, x) coordinates of the second action's lift.
action_2_action_type: The action type of the second action.
annotation_positions: The positions of the UI annotations for the screen. It
is A 2D int array of shape (num_bboxes, 4), where each row represents a
bounding box: (y_top_left, x_top_left, box_height, box_width). Note that
containment is inclusive of the bounding box edges.
tap_distance_threshold: The threshold that determines if two taps result in
a matching screen state if they don't fall the same bounding boxes.
annotation_width_augment_fraction: The fraction to increase the width of the
bounding box by.
annotation_height_augment_fraction: The fraction to increase the height of
of the bounding box by.
Returns:
A boolean representing whether the two given actions are the same or not.
"""
action_1_touch_yx = jnp.asarray(action_1_touch_yx)
action_1_lift_yx = jnp.asarray(action_1_lift_yx)
action_2_touch_yx = jnp.asarray(action_2_touch_yx)
action_2_lift_yx = jnp.asarray(action_2_lift_yx)
# Checks if at least one of the actions is global (i.e. not DUAL_POINT),
# because if that is the case, only the actions' types need to be compared.
has_non_dual_point_action = jnp.logical_or(
_is_non_dual_point_action(action_1_action_type),
_is_non_dual_point_action(action_2_action_type),
)
different_dual_point_types = jnp.logical_xor(
is_tap_action(action_1_touch_yx, action_1_lift_yx),
is_tap_action(action_2_touch_yx, action_2_lift_yx),
)
is_tap = jnp.logical_and(
is_tap_action(action_1_touch_yx, action_1_lift_yx),
is_tap_action(action_2_touch_yx, action_2_lift_yx),
)
taps_match = _check_tap_actions_match(
action_1_touch_yx,
action_2_touch_yx,
annotation_positions,
tap_distance_threshold,
annotation_width_augment_fraction,
annotation_height_augment_fraction,
)
taps_match = jnp.logical_and(is_tap, taps_match)
drags_match = _check_drag_actions_match(
action_1_touch_yx, action_1_lift_yx, action_2_touch_yx, action_2_lift_yx
)
drags_match = jnp.where(is_tap, False, drags_match)
return jnp.where(
has_non_dual_point_action,
jnp.equal(action_1_action_type, action_2_action_type),
jnp.where(
different_dual_point_types,
False,
jnp.logical_or(taps_match, drags_match),
),
) | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/action_type.py | exps/android_exp/models/Auto-UI/action_type.py | '''
Adapted from https://github.com/google-research/google-research/tree/master/android_in_the_wild
'''
import enum
class ActionType(enum.IntEnum):
# Placeholders for unused enum values
UNUSED_0 = 0
UNUSED_1 = 1
UNUSED_2 = 2
UNUSED_8 = 8
UNUSED_9 = 9
########### Agent actions ###########
# A type action that sends text to the emulator. Note that this simply sends
# text and does not perform any clicks for element focus or enter presses for
# submitting text.
TYPE = 3
# The dual point action used to represent all gestures.
DUAL_POINT = 4
# These actions differentiate pressing the home and back button from touches.
# They represent explicit presses of back and home performed using ADB.
PRESS_BACK = 5
PRESS_HOME = 6
# An action representing that ADB command for hitting enter was performed.
PRESS_ENTER = 7
########### Episode status actions ###########
# An action used to indicate the desired task has been completed and resets
# the environment. This action should also be used in the case that the task
# has already been completed and there is nothing to do.
# e.g. The task is to turn on the Wi-Fi when it is already on
STATUS_TASK_COMPLETE = 10
# An action used to indicate that desired task is impossible to complete and
# resets the environment. This can be a result of many different things
# including UI changes, Android version differences, etc.
STATUS_TASK_IMPOSSIBLE = 11 | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/serve_base.py | exps/android_exp/models/Auto-UI/serve_base.py | import os
import numpy as np
import torch
import os
import re
import json
import argparse
import random
from transformers import AutoTokenizer, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer
from model import T5ForMultimodalGeneration
from infer_utils import ImageFeatureExtractor
from PIL import Image
img_dim = 1408
model_path = "/home/jiayipan/code/ICML_GUI/Auto-UI/weights/Auto-UI-Base"
checkpoint_path = "/home/jiayipan/yifei/logs/debug-endsuccess/trainer.pt"
model = T5ForMultimodalGeneration.from_pretrained(model_path, img_dim).cuda()
model.load_state_dict(torch.load(checkpoint_path)["model_state_dict"])
tokenizer = AutoTokenizer.from_pretrained(model_path)
img_extractor = ImageFeatureExtractor()
def generate(text: str, imgage: Image):
inputs = tokenizer(text, return_tensors="pt")
img_feat = img_extractor.to_feat(imgage)
img_feat = img_feat.float().cuda()
out = model.generate(
input_ids = inputs["input_ids"].to("cuda"),
attention_mask = inputs["attention_mask"].to("cuda"),
image_ids = img_feat.unsqueeze(0).to("cuda"),
max_length=128,
)
text = tokenizer.decode(out[0], skip_special_tokens=True)
return text
import gradio as gr
def main():
demo = gr.Interface(
fn=generate,
inputs=["textbox", "image"],
outputs="text")
demo.launch(share=True)
if __name__ == "__main__":
main()
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/utils_data.py | exps/android_exp/models/Auto-UI/utils_data.py | from torch.utils.data import Dataset
import torch
import pickle
from tqdm import tqdm
import action_matching, action_type
import numpy as np
import numpy as jnp
# import jax.numpy as jnp
import random
import re
img_shape = {
"resnet": (512, 2048),
"clip": (49, 2048),
"detr": (100, 256),
"vit": (577, 768),
"vit-large": (145, 1024),
"vit-global": (1, 768),
"vit-merge": (578, 768),
}
def load_data(args, split):
target_text = []
source_text = []
source_image = []
anno_positions = []
if args.all_data:
if split == "train":
data = []
for subdir in ["general", "google_apps", "install", "single", "web_shopping"]:
print(f"loading {subdir}", len(data))
with open(f"dataset/blip/{subdir}_{args.data_root}_{split}.obj", "rb") as rp:
sub_data = pickle.load(rp)
if subdir == "google_apps":
sub_data = random.sample(sub_data, int(len(sub_data) * args.all_data))
data.extend(sub_data)
else:
# we use general subset for dev/test
with open(f"{args.eval_subset}_{split}.obj", "rb") as rp:
data = pickle.load(rp)
else:
with open(f"{args.data_root}_{split}.obj", "rb") as rp:
data = pickle.load(rp)
if args.data_ratio:
data = random.sample(data, int(len(data) * args.data_ratio))
for qid, episode in enumerate(tqdm(data)):
episode_id = episode["episode_id"]
episode_data = episode["data"]
if args.use_history:
history_action = []
if args.use_img_history:
history_image = [torch.zeros(args.img_dim)] * args.use_history
for step_idx, step_data in enumerate(episode_data):
question = step_data["goal"]
question = f"Goal: {question}"
image = step_data["image"]
ui_positions = step_data["ui_positions"]
ui_text = step_data["ui_text"]
ui_type = step_data["ui_type"]
if args.use_layout:
icon_string = ""
for ui_idx, ui_type_i in enumerate(ui_type):
ui_axis = ui_positions[ui_idx]
top, left, height, width = ui_axis
# The y-axis is inverted for AndroidEnv, so bottom = top + height.
bottom, right = top + height, left + width
ui_axis = [top, left, bottom, right]
ui_axis = ["{:.4f}".format(axis) for axis in ui_axis]
ui_axis = f"({ui_axis[0]}, {ui_axis[1]}, {ui_axis[2]}, {ui_axis[3]})"
if ui_type_i == "TEXT":
icon_string += f'<p id={ui_idx} class="text" alt="{ui_axis}">{ui_text[ui_idx]}</p>\n'
elif "ICON" in ui_type_i:
icon_string += f'<img id={ui_idx} class={ui_type_i} alt="{ui_axis}">{ui_text[ui_idx]}</p>\n'
else:
print(icon_string)
assert "parsing ui failed!!!"
question = f"{question}\nScreen: {icon_string}"
# print(question)
result_touch_yx = step_data["result_touch_yx"]
result_lift_yx = step_data["result_lift_yx"]
result_action = step_data["result_action"][0]
result_text = step_data["result_action"][1]
result_text = result_text.replace("\\", "").replace('"','').replace("'","")
if args.transform_axis:
scroll_map = {
"up": [[0.8000, 0.5000], [0.2000, 0.5000]],
"down": [[0.2000, 0.5000], [0.8000, 0.5000]],
"left": [[0.8000, 0.5000], [0.2000, 0.5000]],
"right": [[0.2000, 0.5000], [0.8000, 0.5000]]
}
action_touch_yx = jnp.asarray(result_touch_yx)
action_lift_yx = jnp.asarray(result_lift_yx)
if result_action == "DUAL_POINT":
if is_tap_action(action_touch_yx, action_lift_yx):
result_touch_yx = [round(axis, 4) for axis in result_touch_yx]
# if touching, the lift can be the same as touch
result_lift_yx = result_touch_yx
else:
drags_match = _check_drag_actions_match(
action_touch_yx, action_lift_yx
)
result_touch_yx, result_lift_yx = scroll_map[drags_match]
target_action = f'"action_type": "{result_action}", "touch_point": "{result_touch_yx}", "lift_point": "{result_lift_yx}", "typed_text": "{result_text}"'
if args.use_history:
prev_actions = "\n".join(history_action)
question = f"Previous Actions: {prev_actions}\n{question}"
if args.use_img_history:
image = history_image + [image]
image = torch.stack(image)
if args.use_future:
future_actions = episode_data[step_idx:]
if len(future_actions) > args.use_future:
future_actions = future_actions[:args.use_future]
future_actions = "[" + ",".join([action_t["result_action"][0] for action_t in future_actions]) + "]\n"
target_action_label = "Action Plan: " + future_actions + "; Action Decision: " + target_action
source_text.append(question)
source_image.append(image)
target_text.append(target_action_label)
anno_positions.append(ui_positions)
if args.use_history:
history_action.append(target_action)
if args.use_img_history:
history_image.append(image[-1])
history_image.pop(0)
if len(history_action) > args.use_history:
history_action.pop(0)
if args.debug_num:
if int(qid) > args.debug_num:
break
return source_text, source_image, target_text, anno_positions
_SWIPE_DISTANCE_THRESHOLD = 0.04
def is_tap_action(normalized_start_yx, normalized_end_yx):
distance = jnp.linalg.norm(
jnp.array(normalized_start_yx) - jnp.array(normalized_end_yx))
return distance <= _SWIPE_DISTANCE_THRESHOLD
def _check_drag_actions_match(
drag_touch_yx,
drag_lift_yx,
):
"""Determines if two drag actions are the same."""
# Store drag deltas (the change in the y and x coordinates from touch to
# lift), magnitudes, and the index of the main axis, which is the axis with
# the greatest change in coordinate value (e.g. a drag starting at (0, 0) and
# ending at (0.3, 0.5) has a main axis index of 1).
drag_1_deltas = drag_lift_yx - drag_touch_yx
drag_1_magnitudes = jnp.abs(drag_1_deltas)
drag_1_main_axis = np.argmax(drag_1_magnitudes)
# y axis
if drag_1_main_axis == 0:
if drag_1_deltas[0] < 0:
scroll = "up"
else:
scroll = "down"
elif drag_1_main_axis == 1:
if drag_1_deltas[1] < 0:
scroll = "left"
else:
scroll = "right"
return scroll
class AITWDatasetImg(Dataset):
"""
Creating a custom dataset for reading the dataset and
loading it into the dataloader to pass it to the
neural network for finetuning the model
"""
def __init__(
self, data, tokenizer, source_len, target_len
):
"""
Initializes a Dataset class
Args:
dataframe (pandas.DataFrame): Input dataframe
tokenizer (transformers.tokenizer): Transformers tokenizer
source_len (int): Max length of source text
target_len (int): Max length of target text
source_text (str): column name of source text
target_text (str): column name of target text
"""
self.tokenizer = tokenizer
self.source_len = source_len
self.summ_len = target_len
self.source_text = data[0]
self.source_image = data[1]
self.target_text = data[2]
self.anno_positions = data[3]
def __len__(self):
"""returns the length of dataframe"""
return len(self.target_text)
def __getitem__(self, index):
"""return the input ids, attention masks and target ids"""
source_text = str(self.source_text[index])
source_image = self.source_image[index]
target_text_org = str(self.target_text[index])
# abc = self.tokenizer.tokenize(target_text)
# print(len(abc))
pattern = r'(?<=Action Decision:\s).*'
result = re.search(pattern, target_text_org)
target_text = result.group(0)
target_text = target_text.strip()
target_dict = eval("{" + target_text + "}")
action = action_type.ActionType[target_dict["action_type"]].value
touch_point = eval(target_dict["touch_point"])
lift_point = eval(target_dict["lift_point"])
# cleaning data so as to ensure data is in string type
source_text = " ".join(source_text.split())
target_text_org = " ".join(target_text_org.split())
source = self.tokenizer.batch_encode_plus(
[source_text],
max_length=self.source_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
target = self.tokenizer.batch_encode_plus(
[target_text_org],
max_length=self.summ_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
source_ids = source["input_ids"].squeeze()
source_mask = source["attention_mask"].squeeze()
target_ids = target["input_ids"].squeeze()
image_ids = torch.tensor(source_image).squeeze()
vis_attention_mask = torch.tensor([1]).squeeze()
act_ids = torch.tensor(action).squeeze()
touch_point = torch.tensor(touch_point).squeeze()
lift_point = torch.tensor(lift_point).squeeze()
return {
"input_ids": source_ids,
"attention_mask": source_mask,
"image_ids": image_ids,
"labels": target_ids,
"target_act": act_ids,
"target_touch": touch_point,
"target_lift": lift_point
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/fetch_features.py | exps/android_exp/models/Auto-UI/fetch_features.py | import action_type, action_matching
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import json
import jax.numpy as jnp
import argparse
import pickle
import torch
import tensorflow as tf
from PIL import Image
from transformers import AutoProcessor, Blip2Model
device = "cuda" if torch.cuda.is_available() else "cpu"
model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
model.to(device)
processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
dataset_directories = {
'general': 'gs://gresearch/android-in-the-wild/general/*',
'google_apps': 'gs://gresearch/android-in-the-wild/google_apps/*',
'install': 'gs://gresearch/android-in-the-wild/install/*',
'single': 'gs://gresearch/android-in-the-wild/single/*',
'web_shopping': 'gs://gresearch/android-in-the-wild/web_shopping/*',
}
def _decode_image(
example,
image_height,
image_width,
image_channels,
):
"""Decodes image from example and reshapes.
Args:
example: Example which contains encoded image.
image_height: The height of the raw image.
image_width: The width of the raw image.
image_channels: The number of channels in the raw image.
Returns:
Decoded and reshaped image tensor.
"""
image = tf.io.decode_raw(
example.features.feature['image/encoded'].bytes_list.value[0],
out_type=tf.uint8,
)
height = tf.cast(image_height, tf.int32)
width = tf.cast(image_width, tf.int32)
n_channels = tf.cast(image_channels, tf.int32)
return tf.reshape(image, (height, width, n_channels))
def parse_episode(
episode,
get_images = False,
get_annotations = False,
get_actions = False,
):
parsed_episode = []
for i, ex in enumerate(episode):
goal = ex.features.feature['goal_info'].bytes_list.value[0].decode('utf-8')
step_id = ex.features.feature['step_id'].int64_list.value[0]
# episode_id = ex.features.feature['episode_id'].bytes_list.value[0].decode('utf-8')
output_ep = {
"goal": goal,
"step_id": step_id
}
image_height = ex.features.feature['image/height'].int64_list.value[0]
image_width = ex.features.feature['image/width'].int64_list.value[0]
image_channels = ex.features.feature['image/channels'].int64_list.value[0]
if get_images:
image = _decode_image(ex, image_height, image_width, image_channels)
image = image.numpy()
image = Image.fromarray(image).convert('RGB')
with torch.no_grad():
inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
image_features = model.get_image_features(**inputs).pooler_output[0]
image_features = image_features.detach().cpu()
output_ep["image"] = image_features
if get_annotations:
flattened_positions = np.array(
ex.features.feature['image/ui_annotations_positions'].float_list.value
)
ui_text = ex.features.feature['image/ui_annotations_text'].bytes_list.value
ui_text = [value.decode('utf-8') for value in ui_text]
ui_type = ex.features.feature['image/ui_annotations_ui_types'].bytes_list.value
ui_type = [value.decode('utf-8') for value in ui_type]
positions = np.reshape(flattened_positions, (-1, 4)) #(y, x, height, width)
output_ep["ui_positions"] = positions
output_ep["ui_text"] = ui_text
output_ep["ui_type"] = ui_type
if get_actions:
touch_y, touch_x = ex.features.feature['results/yx_touch'].float_list.value
lift_y, lift_x = ex.features.feature['results/yx_lift'].float_list.value
ex_action_type = ex.features.feature['results/action_type'].int64_list.value[0]
ex_action_type = action_type.ActionType(ex_action_type).name
type_text = (ex.features.feature['results/type_action'].bytes_list.value[0].decode('utf-8'))
output_ep["result_touch_yx"] = [touch_y, touch_x]
output_ep["result_lift_yx"] = [lift_y, lift_x]
output_ep["result_action"] = [ex_action_type, type_text]
parsed_episode.append(output_ep)
return parsed_episode
def fetch_episode(dataset_name, data_split, get_images, get_annotations, get_actions):
filenames = tf.io.gfile.glob(dataset_directories[dataset_name])
dataset = tf.data.TFRecordDataset(filenames, compression_type='GZIP').as_numpy_iterator()
with open (data_split, "r") as rp:
split_data = json.load(rp)
train_data = split_data["train"]
val_data = split_data["val"]
test_data = split_data["test"]
print(f"train_data size: {len(train_data)}, val_data size: {len(val_data)}, test_data size: {len(test_data)}")
all_parsed_episode = {
"train": [],
"val": [],
"test": [],
}
total_screens = {
"train": 0,
"val": 0,
"test": 0,
}
episode = []
episode_id = None
for d in tqdm(dataset):
ex = tf.train.Example()
ex.ParseFromString(d)
ep_id = ex.features.feature['episode_id'].bytes_list.value[0].decode('utf-8')
# if (ep_id not in train_data) & (ep_id not in test_data):
# continue
if episode_id is None:
episode_id = ep_id
episode.append(ex)
elif ep_id == episode_id:
episode.append(ex)
else:
# save data
try:
output = parse_episode(episode, get_images=get_images, get_annotations=get_annotations, get_actions=get_actions)
except Exception as exc:
print(exc)
# bad data point; init a new episode
episode_id = ep_id
episode = [ex]
if episode_id in train_data:
curr_split = "train"
elif episode_id in val_data:
curr_split = "val"
elif episode_id in test_data:
curr_split = "test"
else:
assert "error episode"
all_parsed_episode[curr_split].append({"episode_id":episode_id, "data":output})
total_screens[curr_split] += len(episode)
# init a new episode
episode_id = ep_id
episode = [ex]
# last episode
if len(episode) > 0:
# save data
output = parse_episode(episode, get_images=get_images, get_annotations=get_annotations, get_actions=get_actions)
if episode_id in train_data:
curr_split = "train"
elif episode_id in val_data:
curr_split = "val"
elif episode_id in test_data:
curr_split = "test"
else:
assert "error episode"
all_parsed_episode[curr_split].append({"episode_id":episode_id, "data":output})
total_screens[curr_split] += len(episode)
print(len(all_parsed_episode["train"]), total_screens["train"], len(all_parsed_episode["val"]), total_screens["val"], len(all_parsed_episode["test"]), total_screens["test"])
return all_parsed_episode
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='general')
parser.add_argument("--split_file", type=str, default="dataset/general_texts_splits.json")
parser.add_argument('--output_dir', type=str, default='dataset/t5/general_parsed_episode_t5_clip')
parser.add_argument('--get_images', default=True, action='store_true')
parser.add_argument('--get_annotations', default=True, action='store_true')
parser.add_argument('--get_actions', default=True, action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=False))
all_parsed_episode = fetch_episode(args.dataset, args.split_file, args.get_images, args.get_annotations, args.get_actions)
with open(f"{args.output_dir}_train.obj", "wb") as wp:
pickle.dump(all_parsed_episode["train"],wp)
with open(f"{args.output_dir}_val.obj", "wb") as wp:
pickle.dump(all_parsed_episode["val"],wp)
with open(f"{args.output_dir}_test.obj", "wb") as wp:
pickle.dump(all_parsed_episode["test"],wp)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/model.py | exps/android_exp/models/Auto-UI/model.py | '''
Adapted from https://github.com/huggingface/transformers
'''
from transformers import T5Config, T5ForConditionalGeneration
from transformers.models.t5.modeling_t5 import T5Stack, __HEAD_MASK_WARNING_MSG
import copy
from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
from typing import Optional, Tuple
import warnings
from typing import Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
class T5ForMultimodalGeneration(T5ForConditionalGeneration):
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
r"lm_head.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: T5Config, img_dim, num_actions=12):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.image_dense = nn.Linear(img_dim, config.d_model)
self.mha_layer = torch.nn.MultiheadAttention(embed_dim=config.hidden_size, kdim=config.hidden_size, vdim=config.hidden_size, num_heads=1, batch_first=True)
self.gate_dense = nn.Linear(2*config.hidden_size, config.hidden_size)
self.sigmoid = nn.Sigmoid()
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
image_ids=None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
image_embedding = self.image_dense(image_ids)
# use pooled image features
if len(image_embedding.size()) == 2:
image_embedding = image_embedding.unsqueeze(1)
image_att, _ = self.mha_layer(hidden_states, image_embedding, image_embedding)
merge = torch.cat([hidden_states, image_att], dim=-1)
gate = self.sigmoid(self.gate_dense(merge))
hidden_states = (1 - gate) * hidden_states + gate * image_att
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.lm_head = self.lm_head.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.lm_head.weight.device)
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self, decoder_input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
output = {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
output["image_ids"] = kwargs['image_ids']
return output
def test_step(self, tokenizer, batch, **kwargs):
device = next(self.parameters()).device
input_ids = batch['input_ids'].to(device)
image_ids = batch['image_ids'].to(device)
output = self.generate(
input_ids=input_ids,
image_ids=image_ids,
**kwargs
)
generated_sents = tokenizer.batch_decode(output, skip_special_tokens=True)
targets = tokenizer.batch_decode(batch['labels'], skip_special_tokens=True)
result = {}
result['preds'] = generated_sents
result['targets'] = targets
return result | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/main.py | exps/android_exp/models/Auto-UI/main.py | import os
import numpy as np
import torch
import os
import re
import json
import argparse
import random
from transformers import AutoTokenizer, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer
from model import T5ForMultimodalGeneration
from utils_data import AITWDatasetImg, load_data
from rich.table import Column, Table
from rich import box
from rich.console import Console
console = Console(record=True)
import action_matching, action_type
import evaluate
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='dataset/blip/general_blip')
parser.add_argument('--output_dir', type=str, default='experiments')
parser.add_argument('--model', type=str, default='declare-lab/flan-alpaca-base')
parser.add_argument('--data_ratio', type=float, default=None)
parser.add_argument('--eval_name', type=str, default=None, help='the saved subset name used for evaluation')
parser.add_argument('--local_rank', type=int, default=-1)
parser.add_argument('--epoch', type=int, default=2)
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument('--warmup_ratio', type=float, default=0.1)
parser.add_argument('--bs', type=int, default=1)
parser.add_argument('--debug_num', type=int, default=None)
parser.add_argument('--input_len', type=int, default=512)
parser.add_argument('--output_len', type=int, default=256)
parser.add_argument('--img_dim', type=int, default=1408)
parser.add_argument('--eval_bs', type=int, default=16)
parser.add_argument('--eval_acc', type=int, default=None, help='evaluate accumulation step')
parser.add_argument('--all_data', type=float, default=None, help='whether using all the data for training. Set the ratio for google apps to save computation')
parser.add_argument('--eval_subset', type=str, default=None, help='use which subset for evaluation/test when training with all data')
parser.add_argument('--use_history', type=int, default=8, help='use textual action history')
parser.add_argument('--use_img_history', action='store_true', help='use screen history')
parser.add_argument('--use_future', type=int, default=16, help='planning the future actions before giving the current action')
parser.add_argument('--use_layout', action='store_true', help='use annotated layout information')
parser.add_argument('--transform_axis', default=True, action='store_true', help='use coordinate normalization')
parser.add_argument('--use_generate', default=True, action='store_true', help='only for baseline to improve inference speed')
parser.add_argument('--final_eval', action='store_true', help='only evaluate the model at the final epoch')
parser.add_argument('--user_msg', type=str, default="debug", help='experiment type in the save_dir')
parser.add_argument('--img_type', type=str, default="blip", help='type of image features')
parser.add_argument('--evaluate_dir', type=str, default=None, help='the directory of model for evaluation')
parser.add_argument('--seed', type=int, default=42, help='random seed')
args = parser.parse_args()
return args
if __name__ == '__main__':
# training logger to log training progress
training_logger = Table(
Column("Epoch", justify="center"),
Column("Steps", justify="center"),
Column("Loss", justify="center"),
title="Training Status",
pad_edge=False,
box=box.ASCII,
)
args = parse_args()
print("args",args)
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=False))
random.seed(args.seed)
torch.manual_seed(args.seed) # pytorch random seed
np.random.seed(args.seed) # numpy random seed
torch.backends.cudnn.deterministic = True
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
if args.evaluate_dir is not None:
args.model = args.evaluate_dir
# if args.evaluate_dir is not None:
# train_set = None
# else:
tokenizer = AutoTokenizer.from_pretrained("/home/jiayipan/code/ICML_GUI/Auto-UI/weights/Auto-UI-Base")
training_data = load_data(args, "train")
train_set = AITWDatasetImg(
training_data,
tokenizer,
args.input_len,
args.output_len
)
from IPython import embed; embed()
tokenizer = AutoTokenizer.from_pretrained(args.model)
console.log(f"""[Model]: Loading {args.model}...\n""")
console.log(f"[Data]: Reading data...\n")
if args.evaluate_dir is not None:
save_dir = args.evaluate_dir
else:
model_name = args.model.replace("/","-")
gpu_count = torch.cuda.device_count()
save_dir = f"{args.output_dir}/{args.user_msg}_{model_name}_{args.img_type}_lr{args.lr}_bs{args.bs * gpu_count}_ip{args.input_len}_op{args.output_len}_ep{args.epoch}"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
print(save_dir)
model = T5ForMultimodalGeneration.from_pretrained(args.model, args.img_dim)
eval_data = load_data(args, "val")
eval_set = AITWDatasetImg(
eval_data,
tokenizer,
args.input_len,
args.output_len
)
test_data = load_data(args, "test")
test_set = AITWDatasetImg(
test_data,
tokenizer,
args.input_len,
args.output_len
)
datacollator = DataCollatorForSeq2Seq(tokenizer)
print("model parameters: ", model.num_parameters())
# rougel for rationale generation
metric = evaluate.load("rouge")
def compute_metrics_rouge(eval_preds):
preds, targets = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
preds= np.where(preds != -100, preds, tokenizer.pad_token_id)
preds = tokenizer.batch_decode(preds, skip_special_tokens=True, clean_up_tokenization_spaces=True)
targets = tokenizer.batch_decode(targets, skip_special_tokens=True, clean_up_tokenization_spaces=True)
result = metric.compute(predictions=preds, references=targets)
result = {k: round(v * 100, 4) for k, v in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
return result
# only use the last model for evaluation to save time
if args.final_eval:
training_args = Seq2SeqTrainingArguments(
save_dir,
do_train=True if args.evaluate_dir is None else False,
do_eval=False,
warmup_ratio=args.warmup_ratio,
evaluation_strategy="no",
logging_strategy="steps",
save_strategy="epoch",
save_total_limit = 2,
learning_rate= args.lr,
eval_accumulation_steps=args.eval_acc,
per_device_train_batch_size=args.bs,
per_device_eval_batch_size=args.eval_bs,
weight_decay=0.01,
num_train_epochs=args.epoch,
predict_with_generate=args.use_generate,
generation_max_length=args.output_len,
report_to="none",
local_rank=args.local_rank
)
# evaluate at each epoch
else:
training_args = Seq2SeqTrainingArguments(
save_dir,
do_train=True if args.evaluate_dir is None else False,
do_eval=True,
warmup_ratio=args.warmup_ratio,
evaluation_strategy="epoch",
logging_strategy="steps",
save_strategy="epoch",
save_total_limit = 2,
learning_rate= args.lr,
eval_accumulation_steps=args.eval_acc,
per_device_train_batch_size=args.bs,
per_device_eval_batch_size=args.eval_bs,
weight_decay=0.01,
num_train_epochs=args.epoch,
metric_for_best_model="rougeL",
predict_with_generate=args.use_generate,
generation_max_length=args.output_len,
load_best_model_at_end=True,
report_to="none",
local_rank=args.local_rank
)
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_set,
eval_dataset=eval_set,
data_collator=datacollator,
tokenizer=tokenizer,
compute_metrics = compute_metrics_rouge
)
if args.evaluate_dir is None:
trainer.train()
trainer.save_model(save_dir)
# metrics = trainer.evaluate(eval_dataset = test_set, max_length=args.output_len)
# trainer.log_metrics("test", metrics)
# trainer.save_metrics("test", metrics)
metrics = {}
predict_results = trainer.predict(test_dataset=test_set, max_length=args.output_len)
if trainer.is_world_process_zero():
preds, targets = predict_results.predictions, predict_results.label_ids
preds= np.where(preds != -100, preds, tokenizer.pad_token_id)
preds = tokenizer.batch_decode(preds, skip_special_tokens=True, clean_up_tokenization_spaces=True)
targets = tokenizer.batch_decode(targets, skip_special_tokens=True, clean_up_tokenization_spaces=True)
action_correct = 0
text_correct = 0
type_correct = 0
reference_test_positions = test_set.anno_positions
output_data = []
pattern = r'(?<=Action Decision:\s).*'
assert len(preds) == len(targets) == len(reference_test_positions)
for idx, pred in enumerate(preds):
try:
result = re.search(pattern, targets[idx])
target_text = result.group(0)
target_text = target_text.strip()
reference = eval("{" + target_text + "}")
except:
print("reference error")
continue
try:
result = re.search(pattern, preds[idx])
pred_text = result.group(0)
pred_text = pred_text.strip()
pred = eval("{" + pred_text + "}")
action_1_touch_yx = eval(pred["touch_point"])
action_1_lift_yx = eval(pred["lift_point"])
action_1_action_type = action_type.ActionType[pred["action_type"]].value
action_1_typed_text = pred["typed_text"].lower()
action_1_typed_text = action_1_typed_text.strip()
action_1_wrap = f'"action_type": "{action_1_action_type}", "touch_point": "{action_1_touch_yx}", "lift_point": "{action_1_lift_yx}", "typed_text": "{action_1_typed_text}"'
action_1_wrap = action_1_wrap.replace('"', "'")
except:
pred = '{ "action_type": "TYPE", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": "Invalid"}'
action_2_touch_yx = eval(reference["touch_point"])
action_2_lift_yx = eval(reference["lift_point"])
action_2_action_type = action_type.ActionType[reference["action_type"]].value
action_2_typed_text = reference["typed_text"].lower()
action_2_wrap = f'"action_type": "{action_2_action_type}", "touch_point": "{action_2_touch_yx}", "lift_point": "{action_2_lift_yx}", "typed_text": "{action_2_typed_text}"'
action_2_wrap = action_2_wrap.replace('"', "'")
annotation_positions = reference_test_positions[idx]
try:
check_match = action_matching.check_actions_match(
action_1_touch_yx,
action_1_lift_yx,
action_1_action_type,
action_2_touch_yx,
action_2_lift_yx,
action_2_action_type,
annotation_positions
)
except Exception as exc:
print(idx, action_1_touch_yx, action_1_lift_yx)
check_match = False
match_label = "invalid"
if check_match:
action_correct += 1
match_label = 1
else:
match_label = 0
if check_match and (action_1_typed_text in action_2_typed_text or action_2_typed_text in action_1_typed_text):
text_correct += 1
if action_1_action_type == action_2_action_type:
type_correct += 1
action_data = {"pred": action_1_wrap, "target": action_2_wrap, "match_label": match_label}
output_data.append(action_data)
metrics["accuracy"] = "{:.2f}".format(action_correct/len(targets) * 100)
metrics["text_acc"] = "{:.2f}".format(text_correct/len(targets) * 100)
metrics["type_acc"] = "{:.2f}".format(type_correct/len(targets) * 100)
metrics["action_correct"] = action_correct
metrics["text_correct"] = text_correct
metrics["type_correct"] = type_correct
metrics["total_num"] = len(targets)
print(metrics)
output_data = {
"metrics": metrics,
"data": output_data
}
print(save_dir)
if args.eval_name:
output_prediction_file = os.path.join(save_dir,f"predictions_ans_test_{args.eval_name}.json")
else:
output_prediction_file = os.path.join(save_dir,"predictions_ans_test.json")
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(output_data, indent=4))
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/serve_large.py | exps/android_exp/models/Auto-UI/serve_large.py | import os
import numpy as np
import torch
import os
import re
import json
import argparse
import random
from transformers import AutoTokenizer, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer
from model import T5ForMultimodalGeneration
from infer_utils import ImageFeatureExtractor
from PIL import Image
img_dim = 1408
model_path = "/home/jiayipan/code/ICML_GUI/Auto-UI/weights/Auto-UI-Large"
model = T5ForMultimodalGeneration.from_pretrained(model_path, img_dim).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_path)
img_extractor = ImageFeatureExtractor()
def generate(text: str, imgage: Image):
inputs = tokenizer(text, return_tensors="pt")
img_feat = img_extractor.to_feat(imgage)
img_feat = img_feat.float().cuda()
out = model.generate(
input_ids = inputs["input_ids"].to("cuda"),
attention_mask = inputs["attention_mask"].to("cuda"),
image_ids = img_feat.unsqueeze(0).to("cuda"),
max_length=128,
)
text = tokenizer.decode(out[0], skip_special_tokens=True)
return text
import gradio as gr
def main():
demo = gr.Interface(
fn=generate,
inputs=["textbox", "image"],
outputs="text")
demo.launch(share=True)
if __name__ == "__main__":
main() | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/Auto-UI/infer_utils.py | exps/android_exp/models/Auto-UI/infer_utils.py | import torch
from PIL import Image
from transformers import AutoProcessor, Blip2Model, AutoTokenizer
from model import T5ForMultimodalGeneration
class ImageFeatureExtractor:
def __init__(self):
# Set device based on CUDA availability
self.device = "cuda" if torch.cuda.is_available() else "cpu"
# Initialize and load the BLIP2 model and processor
self.model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b").to(self.device)
self.processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
def to_feat(self, image: Image.Image):
"""Converts a PIL image to a feature representation using the BLIP2 model.
Args:
image: A PIL.Image object representing the image to convert.
Returns:
A tensor representing the image feature.
"""
with torch.no_grad():
# Preprocess the image and move to the correct device
inputs = self.processor(images=image, return_tensors="pt").to(self.device)
# Get the image features from the model
image_features = self.model.get_image_features(**inputs).pooler_output[0]
# Detach the tensor from the graph and move it to CPU
image_features = image_features.detach().cpu()
return image_features
class AutoUI:
def __init__(self):
# Initialize the image feature extractor
img_dim = 1408
model_path = "/home/jiayipan/code/ICML_GUI/Auto-UI/weights/Auto-UI-Base"
self.image_feature_extractor = ImageFeatureExtractor()
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.model = T5ForMultimodalGeneration.from_pretrained(model_path, img_dim)
def generate(self, image: Image.Image, text: str):
# Convert the image to a feature representation
image_features = self.image_feature_extractor.to_feat(image)
# Encode the text
inputs = self.tokenizer(text, return_tensors="pt")
# Generate the output
outputs = self.model.generate(
input_ids=inputs["input_ids"].to(self.model.device),
attention_mask=inputs["attention_mask"].to(self.model.device),
image_features=image_features.unsqueeze(0).to(self.model.device),
max_length=128,
num_beams=4,
early_stopping=True,
do_sample=True,
top_k=50,
top_p=0.95,
temperature=1.0,
num_return_sequences=1,
decoder_start_token_id=self.tokenizer.pad_token_id,
)
# Decode the output
return self.tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
from enum import Enum
from dataclasses import dataclass
from typing import Tuple
class ActionType(Enum):
Idle=0
DualPoint=1
Type=2
GoBack=3
GoHome=4
Enter=5
TaskComplete=6
TaskImpossible=7
@dataclass
class AndroidAction():
action_type: ActionType
touch_point: Tuple[float, float] = None
lift_point: Tuple[float, float] = None
typed_text: str = None
def __str__(self):
# Construct the basic action type string.
components = [f"Action Type: {self.action_type.name}"]
# Format and add touch_point if it's not None.
if self.touch_point:
touch_point_str = f"({self.touch_point[0]:.4f}, {self.touch_point[1]:.4f})"
components.append(f"Touch Point: {touch_point_str}")
# Format and add lift_point if it's not None.
if self.lift_point:
lift_point_str = f"({self.lift_point[0]:.4f}, {self.lift_point[1]:.4f})"
components.append(f"Lift Point: {lift_point_str}")
# Add typed_text if it's not None.
if self.typed_text:
components.append(f"Typed Text: '{self.typed_text}'")
# Join all components into a single string.
return ", ".join(components)
def to_act(self):
pass
def to_autoui(self):
if self.action_type == ActionType.DualPoint:
return f'"action_type": "DUAL_POINT", "touch_point": "[{self.touch_point[0]:.4f}, {self.touch_point[1]:.4f}]", "lift_point": "[{self.lift_point[0]:.4f}, {self.lift_point[0]:.4f}]", "typed_text": ""'
elif self.action_type == ActionType.Type:
return f'"action_type": "TYPE", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": "{self.typed_text}"'
elif self.action_type == ActionType.GoBack:
return f'"action_type": "PRESS_BACK", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": ""'
elif self.action_type == ActionType.GoHome:
return f'"action_type": "PRESS_HOME", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": ""'
elif self.action_type == ActionType.Enter:
return f'"action_type": "PRESS_ENTER", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": ""'
elif self.action_type == ActionType.TaskComplete or self.action_type == ActionType.TaskImpossible:
return f'"action_type": "TASK_COMPLETE", "touch_point": "[-1.0, -1.0]", "lift_point": "[-1.0, -1.0]", "typed_text": ""'
def format_prompt(history, goal):
prompt = "Previous Actions: "
for act in history:
prompt += f"{act.to_autoui()} "
prompt += f"Goal: {goal}</s>"
return prompt | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/android_exp/models/CogAgent/web_demo_simple.py | exps/android_exp/models/CogAgent/web_demo_simple.py | """
This script is a simple web demo of the CogVLM and CogAgent models, designed for easy and quick demonstrations.
For a more sophisticated user interface, users are encouraged to refer to the 'composite_demo',
which is built with a more aesthetically pleasing Streamlit framework.
Usage:
- Use the interface to upload images and enter text prompts to interact with the models.
Requirements:
- Gradio (only 3.x,4.x is not support) and other necessary Python dependencies must be installed.
- Proper model checkpoints should be accessible as specified in the script.
Note: This demo is ideal for a quick showcase of the CogVLM and CogAgent models. For a more comprehensive and interactive
experience, refer to the 'composite_demo'.
"""
import gradio as gr
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from PIL import Image
import torch
import time
from sat.model.mixins import CachedAutoregressiveMixin
from sat.mpu import get_model_parallel_world_size
from sat.model import AutoModel
from utils.utils import chat, llama2_tokenizer, llama2_text_processor_inference, get_image_processor, parse_response
from utils.models import CogAgentModel, CogVLMModel
DESCRIPTION = '''<h1 style='text-align: center'> <a href="https://github.com/THUDM/CogVLM">CogVLM / CogAgent</a> </h1>'''
NOTES = '<h3> This app is adapted from <a href="https://github.com/THUDM/CogVLM">https://github.com/THUDM/CogVLM</a>. It would be recommended to check out the repo if you want to see the detail of our model, CogVLM & CogAgent. </h3>'
MAINTENANCE_NOTICE1 = 'Hint 1: If the app report "Something went wrong, connection error out", please turn off your proxy and retry.<br>Hint 2: If you upload a large size of image like 10MB, it may take some time to upload and process. Please be patient and wait.'
AGENT_NOTICE = 'Hint 1: To use <strong>Agent</strong> function, please use the <a href="https://github.com/THUDM/CogVLM/blob/main/utils/utils/template.py#L761">prompts for agents</a>.'
GROUNDING_NOTICE = 'Hint 2: To use <strong>Grounding</strong> function, please use the <a href="https://github.com/THUDM/CogVLM/blob/main/utils/utils/template.py#L344">prompts for grounding</a>.'
model = image_processor = text_processor_infer = None
is_grounding = False
def process_image_without_resize(image_prompt):
image = Image.open(image_prompt)
# print(f"height:{image.height}, width:{image.width}")
timestamp = int(time.time())
file_ext = os.path.splitext(image_prompt)[1]
filename_grounding = f"examples/{timestamp}_grounding{file_ext}"
return image, filename_grounding
from sat.quantization.kernels import quantize
import argparse
def load_model(args):
try:
world_size
except NameError:
world_size = 1
model, model_args = AutoModel.from_pretrained(
args.from_pretrained,
args=argparse.Namespace(
deepspeed=None,
local_rank=0,
rank=0,
world_size=world_size,
model_parallel_size=world_size,
mode='inference',
fp16=args.fp16,
bf16=args.bf16,
skip_init=True,
use_gpu_initialization=True if (torch.cuda.is_available() and args.quant is None) else False,
device='cpu' if args.quant else 'cuda'),
overwrite_args={'model_parallel_size': world_size} if world_size != 1 else {}
)
model = model.eval()
assert world_size == get_model_parallel_world_size(), "world size must equal to model parallel size for cli_demo!"
language_processor_version = model_args.text_processor_version if 'text_processor_version' in model_args else args.version
tokenizer = llama2_tokenizer(args.local_tokenizer, signal_type=language_processor_version)
image_processor = get_image_processor(model_args.eva_args["image_size"][0])
cross_image_processor = get_image_processor(model_args.cross_image_pix) if "cross_image_pix" in model_args else None
if args.quant:
quantize(model, args.quant)
if torch.cuda.is_available():
model = model.cuda()
model.add_mixin('auto-regressive', CachedAutoregressiveMixin())
text_processor_infer = llama2_text_processor_inference(tokenizer, args.max_length, model.image_length)
return model, image_processor, cross_image_processor, text_processor_infer
def post(
input_text,
temperature,
top_p,
top_k,
image_prompt,
result_previous,
hidden_image,
state
):
result_text = [(ele[0], ele[1]) for ele in result_previous]
for i in range(len(result_text)-1, -1, -1):
if result_text[i][0] == "" or result_text[i][0] == None:
del result_text[i]
print(f"history {result_text}")
global model, image_processor, cross_image_processor, text_processor_infer, is_grounding
try:
# if True:
with torch.no_grad():
pil_img, image_path_grounding = process_image_without_resize(image_prompt)
response, _, cache_image = chat(
image_path="",
model=model,
text_processor=text_processor_infer,
img_processor=image_processor,
query=input_text,
history=None,
cross_img_processor=cross_image_processor,
image=pil_img,
max_length=2048,
top_p=top_p,
temperature=temperature,
top_k=top_k,
invalid_slices=text_processor_infer.invalid_slices if hasattr(text_processor_infer, "invalid_slices") else [],
no_prompt=False,
args=state['args']
)
except Exception as e:
print("error message", e)
result_text.append((input_text, 'Timeout! Please wait a few minutes and retry.'))
return "", result_text, hidden_image
answer = response
if is_grounding:
parse_response(pil_img, answer, image_path_grounding)
new_answer = answer.replace(input_text, "")
result_text.append((input_text, new_answer))
result_text.append((None, (image_path_grounding,)))
else:
result_text.append((input_text, answer))
print(result_text)
print('finished')
return "", result_text, hidden_image
def main(args):
global model, image_processor, cross_image_processor, text_processor_infer, is_grounding
model, image_processor, cross_image_processor, text_processor_infer = load_model(args)
is_grounding = 'grounding' in args.from_pretrained
gr.close_all()
temperature = 0.8
top_p = 0.4
top_k = 10
from functools import partial
def easy_submit(input_text, image_prompt, temperature, top_p, top_k):
top_k = int(top_k)
state = {"args": args}
return post(input_text, temperature, top_p, top_k, image_prompt, "", "", state)[1][0][1]
demo = gr.Interface(
fn=easy_submit,
inputs=[
gr.Textbox(label='Input Text', placeholder='Please enter text prompt below and press ENTER.'),
gr.Image(type="filepath", label="Image Prompt", value=None),
gr.Number(value=0.8, label="Temperature", minimum=0, maximum=10),
gr.Number(value=0.4, label="Top P", minimum=0, maximum=1),
gr.Number(value=10, label="Top K(int)", minimum=0, maximum=100),],
outputs="text"
)
# demo.queue(concurrency_count=10)
demo.launch(share=True)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--max_length", type=int, default=2048, help='max length of the total sequence')
parser.add_argument("--top_p", type=float, default=0.4, help='top p for nucleus sampling')
parser.add_argument("--top_k", type=int, default=1, help='top k for top k sampling')
parser.add_argument("--temperature", type=float, default=.8, help='temperature for sampling')
parser.add_argument("--version", type=str, default="chat", choices=['chat', 'vqa', 'chat_old', 'base'], help='version of language process. if there is \"text_processor_version\" in model_config.json, this option will be overwritten')
parser.add_argument("--quant", choices=[8, 4], type=int, default=None, help='quantization bits')
parser.add_argument("--from_pretrained", type=str, default="cogagent-chat", help='pretrained ckpt')
parser.add_argument("--local_tokenizer", type=str, default="lmsys/vicuna-7b-v1.5", help='tokenizer path')
parser.add_argument("--fp16", action="store_true")
parser.add_argument("--bf16", action="store_true")
parser.add_argument("--stream_chat", action="store_true")
args = parser.parse_args()
rank = int(os.environ.get('RANK', 0))
world_size = int(os.environ.get('WORLD_SIZE', 1))
args = parser.parse_args()
main(args)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/ios_exp/collect_trajectories.py | exps/ios_exp/collect_trajectories.py | from gradio_client import Client
import argparse
from env import IOSEnv, ALL_TASKS
from utils import translate_action
from tqdm import tqdm
import json
import random
def main(args):
client = Client(args.gardio_http)
env = IOSEnv(save_path=args.save_path, udid=args.udid, device_path=args.device_path)
episodes = []
for _ in range(1):
random.shuffle(ALL_TASKS)
for current_task in tqdm(ALL_TASKS):
while True:
try:
episode = []
obs, _ = env.reset()
done = False
while not done:
raw_action = client.predict(
current_task, obs, 1.5, 1, 100, api_name="/predict"
)
translated_action, idb_action = translate_action(raw_action)
next_obs, _, done, _ = env.step(idb_action)
episode.append(
{
"observation": obs,
"raw_action": raw_action,
"translated_action": translated_action,
"idb_action": idb_action,
"next_observation": next_obs,
"done": done,
"task": current_task,
}
)
obs = next_obs
episodes.append(episode)
with open(args.output_path, "w") as fb:
json.dump(episodes, fb)
break
except ValueError:
print("error")
env.kill()
continue
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gardio-http", default="https://a66ecf765b66e00e21<removed>/")
parser.add_argument(
"--udid", default="16199A9E-A005-449E-92B1-10755C359799", type=str
)
parser.add_argument(
"--save-path",
default="/Users/<user>/Desktop/idb-test/zeroshot_train/images/",
type=str,
)
parser.add_argument(
"--device-path",
default="/Users/<user>/Library/Developer/CoreSimulator/Devices",
type=str,
)
parser.add_argument("--output-path", type=str)
args = parser.parse_args()
main(args)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/ios_exp/utils.py | exps/ios_exp/utils.py | import random
COMMAND_MAP = {
"swipe right": "idb ui swipe --udid {udid} 200 200 300 200 --duration 0.1",
"swipe left": "idb ui swipe --udid {udid} 400 200 300 200 --duration 0.1",
"swipe up": "idb ui swipe --udid {udid} 200 400 200 200 --duration 0.1",
"swipe down": "idb ui swipe --udid {udid} 200 200 200 400 --duration 0.1",
"press home": "idb ui button --udid {udid} HOME",
"press back": "idb ui button --udid {udid} HOME", #there is no back button so do press home
"task complete": "end",
}
#this is the configuration for iphone 14
WIDTH = 390
HEIGHT = 844
def translate_action(raw_action):
"""
Try Translate the Android raw outputted action from CogAgent to IOS idb action
supported actions are:
swipe right,
swipe left,
swipe down,
press home,
press back,
task complete,
tap,
type.
Because of difference of android and ios,
swipe up is 50% interpreted as swipe left and 50% interpreted as swipe right.
Invalid actions will be interpreted as task complete.
Returns:
translated_action: one of the supported actions listed above
idb_action: actual corresponding idb command of translated action
"""
try:
raw_action = raw_action.split('Grounded Operation:')[1]
#check if it is swipe or press home
for k in COMMAND_MAP.keys():
if "swipe up" in raw_action.lower():
if random.random() < 0.5:
return "swipe left", COMMAND_MAP["swipe left"]
else:
return "swipe right", COMMAND_MAP["swipe right"]
if k in raw_action.lower():
return k, COMMAND_MAP[k]
#check if it is tap
if "tap" in raw_action:
numbers = raw_action.split('[[')[1].split(',')
x = int(numbers[0])
y = int(numbers[1].split(']]')[0])
X = int(x * WIDTH/1000)
Y = int(y * HEIGHT/1000)
return raw_action, f"idb ui tap " + "--udid {udid} " + f"{X} {Y}"
if "type" in raw_action:
to_type = raw_action.split('"')[1]
return raw_action, f"idb ui text "+ "--udid {udid} " + f"\"{to_type}\""
return "Invalid action", COMMAND_MAP["task complete"]
except:
return "Invalid action", COMMAND_MAP["task complete"]
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/ios_exp/env.py | exps/ios_exp/env.py | import time
import os
import subprocess
import signal
import cv2
import random
from PIL import Image
import json
with open("train_tasks.json", "r") as fb:
ALL_TASKS = json.load(fb)
TASK_PROMPT = 'What steps do I need to take to "{task}"?(with grounding)'
ALL_TASKS = [TASK_PROMPT.format(task=task) for task in ALL_TASKS]
class IOSEnv:
def __init__(
self,
save_path="/Users/<user>/Desktop/idb-test/images/",
device_path="/Users/<user>/Library/Developer/CoreSimulator/Devices",
udid="6E44D401-6183-4F8E-9232-DD84BD9AC821",
max_steps=10,
):
"""_summary_
Args:
save_path (str): The path where the images observations should be saved. Defaults to "/Users/<user>/Desktop/idb-test/images/".
device_path (str): The path to the simulator devices. Defaults to "/Users/<user>/Library/Developer/CoreSimulator/Devices".
udid (str): The udid of the simulator to be used. Defaults to "6E44D401-6183-4F8E-9232-DD84BD9AC821".
max_steps (int, optional): maximum number of steps. Defaults to 10.
"""
self.save_path = save_path
# if id is not None:
# self.id = id
# else:
self.udid = udid
self.device_path = device_path
self.max_steps = max_steps
self.steps = 0
self.output_time = None
def get_observation(self):
time.sleep(3)
output_path = os.path.join(
self.save_path, f"{self.output_time}-{str(self.steps)}.jpg"
)
os.system(f"xcrun simctl io {self.udid} screenshot {output_path}")
# os.system(f"idb screenshot {output_path} --udid {self.udid}")
# # process = subprocess.Popen(["idb", "record", "video", "--udid", self.udid, output_path])
# # # process.communicate("sleep 1")
# # process.send_signal(signal.SIGINT)
# # process.wait(i
# # breakpoint()
# # import IPython; IPython.embed()
# os.system(f"timeout 4s idb record video --udid {self.udid} {output_path}")
# time.sleep(2)
# for i in range(5):
# try:
# vidcap = cv2.VideoCapture(output_path)
# _, image = vidcap.read()
# img = Image.fromarray(image)
# r,g,b = img.split()
# corrected_img = Image.merge("RGB", (b, g, r))
# corrected_img.save(output_path.replace("mp4", "jpg"))
# break
# except Exception as e:
# print(e)
# print("waiting for recording to complete")
# time.sleep(2)
# os.system(f'rm {output_path}')
return output_path
def reset(self, options=None):
# self.kill()
self.steps = 0
# self.udid = os.popen(f'xcrun simctl clone {self.back_udid} {self.id}').read().replace('\n', '')
os.system(f"idb boot {self.udid}")
os.system(f"idb set-location --udid {self.udid} 40.7128 -74.0060")
time.sleep(10)
self.output_time = str(time.time())
return self.get_observation(), None
def step(self, action):
"""
Args:
action (str): idb command
creates an image in self.save_path of current observation
Returns:
obs (str): the relative path in self.save_path for the image
"""
assert self.steps < self.max_steps
self.steps += 1
reward = 0
if action == "end":
self.steps = self.max_steps # set time limit to end the episode
else:
os.system(action.format(udid=self.udid))
time.sleep(1)
obs = self.get_observation()
done = self.steps >= self.max_steps
if done:
self.kill()
return obs, reward, done, {}
def kill(self):
"""
Kill the idb process and reset the environment
"""
# print("killing")
os.system(f"idb shutdown {self.udid}")
# restore to initial state
# breakpoint()
os.system(f"rm -r {os.path.join(self.device_path, self.udid)}")
os.system(
f"cp -rH {os.path.join(self.device_path, 'back')} {os.path.join(self.device_path, self.udid)}"
)
# def kill(self):
# """
# Kill the idb process and reset the environment
# """
# # print("killing")
# os.system(f"idb shutdown {self.udid}")
# os.system(f"xcrun simctl delete {self.udid}")
# # #restore to initial state
# # os.system(f"rm -r {os.path.join(self.device_path, self.udid)}")
# # os.system(f"cp -rH {os.path.join(self.device_path, 'back')} {os.path.join(self.device_path, self.udid)}")
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/run.py | exps/webarena_exp/run.py | """Script to run end-to-end evaluation on the benchmark"""
import argparse
import glob
import json
import logging
import os
import random
import subprocess
import tempfile
import time
from pathlib import Path
import openai
from agent import (
Agent,
PromptAgent,
TeacherForcingAgent,
construct_agent,
)
from agent.prompts import *
from browser_env import (
Action,
ActionTypes,
ScriptBrowserEnv,
StateInfo,
Trajectory,
create_stop_action,
)
from browser_env.actions import is_equivalent
from browser_env.auto_login import get_site_comb_from_filepath
from browser_env.helper_functions import (
RenderHelper,
get_action_description,
)
from evaluation_harness import evaluator_router
LOG_FOLDER = "log_files"
Path(LOG_FOLDER).mkdir(parents=True, exist_ok=True)
LOG_FILE_NAME = f"{LOG_FOLDER}/log_{time.strftime('%Y%m%d%H%M%S', time.localtime())}_{random.randint(0, 10000)}.log"
logger = logging.getLogger("logger")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
file_handler = logging.FileHandler(LOG_FILE_NAME)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
# Set the log format
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
def config() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Run end-to-end evaluation on the benchmark"
)
parser.add_argument(
"--render", action="store_true", help="Render the browser"
)
parser.add_argument(
"--slow_mo",
type=int,
default=0,
help="Slow down the browser by the specified amount",
)
parser.add_argument(
"--action_set_tag", default="id_accessibility_tree", help="Action type"
)
parser.add_argument(
"--observation_type",
choices=["accessibility_tree", "html", "image"],
default="accessibility_tree",
help="Observation type",
)
parser.add_argument(
"--current_viewport_only",
action="store_true",
help="Only use the current viewport for the observation",
)
parser.add_argument("--viewport_width", type=int, default=1280)
parser.add_argument("--viewport_height", type=int, default=720)
parser.add_argument("--save_trace_enabled", action="store_true")
parser.add_argument("--sleep_after_execution", type=float, default=0.0)
parser.add_argument("--max_steps", type=int, default=30)
# agent config
parser.add_argument("--agent_type", type=str, default="prompt")
parser.add_argument(
"--instruction_path",
type=str,
default="agents/prompts/state_action_agent.json",
)
parser.add_argument(
"--parsing_failure_th",
help="When concesecutive parsing failure exceeds this threshold, the agent will stop",
type=int,
default=3,
)
parser.add_argument(
"--repeating_action_failure_th",
help="When concesecutive repeating action exceeds this threshold, the agent will stop",
type=int,
default=3,
)
# lm config
parser.add_argument("--provider", type=str, default="openai")
parser.add_argument("--model", type=str, default="gpt-3.5-turbo-0613")
parser.add_argument("--mode", type=str, default="chat")
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--context_length", type=int, default=0)
parser.add_argument("--max_tokens", type=int, default=384)
parser.add_argument("--stop_token", type=str, default=None)
parser.add_argument(
"--max_retry",
type=int,
help="max retry times to perform generations when parsing fails",
default=1,
)
parser.add_argument(
"--max_obs_length",
type=int,
help="when not zero, will truncate the observation to this length before feeding to the model",
default=1920,
)
parser.add_argument(
"--model_endpoint",
help="huggingface model endpoint",
type=str,
default="",
)
# example config
parser.add_argument("--test_start_idx", type=int, default=0)
parser.add_argument("--test_end_idx", type=int, default=1000)
# logging related
parser.add_argument("--result_dir", type=str, default="")
args = parser.parse_args()
# check the whether the action space is compatible with the observation space
if (
args.action_set_tag == "id_accessibility_tree"
and args.observation_type != "accessibility_tree"
):
raise ValueError(
f"Action type {args.action_set_tag} is incompatible with the observation type {args.observation_type}"
)
return args
def early_stop(
trajectory: Trajectory, max_steps: int, thresholds: dict[str, int]
) -> tuple[bool, str]:
"""Check whether need to early stop"""
# reach the max step
num_steps = (len(trajectory) - 1) / 2
if num_steps >= max_steps:
return True, f"Reach max steps {max_steps}"
last_k_actions: list[Action]
action_seq: list[Action]
# Case: parsing failure for k times
k = thresholds["parsing_failure"]
last_k_actions = trajectory[1::2][-k:] # type: ignore[assignment]
if len(last_k_actions) >= k:
if all(
[
action["action_type"] == ActionTypes.NONE
for action in last_k_actions
]
):
return True, f"Failed to parse actions for {k} times"
# Case: same action for k times
k = thresholds["repeating_action"]
last_k_actions = trajectory[1::2][-k:] # type: ignore[assignment]
action_seq = trajectory[1::2] # type: ignore[assignment]
if len(action_seq) == 0:
return False, ""
last_action: Action = action_seq[-1]
if last_action["action_type"] != ActionTypes.TYPE:
if len(last_k_actions) >= k:
if all(
[
is_equivalent(action, last_action)
for action in last_k_actions
]
):
return True, f"Same action for {k} times"
else:
# check the action sequence
if (
sum([is_equivalent(action, last_action) for action in action_seq])
>= k
):
return True, f"Same typing action for {k} times"
return False, ""
def test(
args: argparse.Namespace,
agent: Agent | PromptAgent | TeacherForcingAgent,
config_file_list: list[str],
) -> None:
scores = []
max_steps = args.max_steps
early_stop_thresholds = {
"parsing_failure": args.parsing_failure_th,
"repeating_action": args.repeating_action_failure_th,
}
env = ScriptBrowserEnv(
headless=not args.render,
slow_mo=args.slow_mo,
observation_type=args.observation_type,
current_viewport_only=args.current_viewport_only,
viewport_size={
"width": args.viewport_width,
"height": args.viewport_height,
},
save_trace_enabled=args.save_trace_enabled,
sleep_after_execution=args.sleep_after_execution,
)
for config_file in config_file_list:
try:
render_helper = RenderHelper(
config_file, args.result_dir, args.action_set_tag
)
# get intent
with open(config_file) as f:
_c = json.load(f)
intent = _c["intent"]
task_id = _c["task_id"]
# automatically login
if _c["storage_state"]:
cookie_file_name = os.path.basename(_c["storage_state"])
comb = get_site_comb_from_filepath(cookie_file_name)
temp_dir = tempfile.mkdtemp()
# subprocess to renew the cookie
subprocess.run(
[
"python",
"browser_env/auto_login.py",
"--auth_folder",
temp_dir,
"--site_list",
*comb,
]
)
_c["storage_state"] = f"{temp_dir}/{cookie_file_name}"
assert os.path.exists(_c["storage_state"])
# update the config file
config_file = f"{temp_dir}/{os.path.basename(config_file)}"
with open(config_file, "w") as f:
json.dump(_c, f)
logger.info(f"[Config file]: {config_file}")
logger.info(f"[Intent]: {intent}")
agent.reset(config_file)
trajectory: Trajectory = []
obs, info = env.reset(options={"config_file": config_file})
state_info: StateInfo = {"observation": obs, "info": info}
trajectory.append(state_info)
meta_data = {"action_history": ["None"]}
while True:
early_stop_flag, stop_info = early_stop(
trajectory, max_steps, early_stop_thresholds
)
if early_stop_flag:
action = create_stop_action(f"Early stop: {stop_info}")
else:
try:
action = agent.next_action(
trajectory, intent, meta_data=meta_data
)
except ValueError as e:
# get the error message
action = create_stop_action(f"ERROR: {str(e)}")
trajectory.append(action)
action_str = get_action_description(
action,
state_info["info"]["observation_metadata"],
action_set_tag=args.action_set_tag,
prompt_constructor=agent.prompt_constructor
if isinstance(agent, PromptAgent)
else None,
)
render_helper.render(
action, state_info, meta_data, args.render_screenshot
)
meta_data["action_history"].append(action_str)
print(meta_data)
if action["action_type"] == ActionTypes.STOP:
break
obs, _, terminated, _, info = env.step(action)
state_info = {"observation": obs, "info": info}
trajectory.append(state_info)
if terminated:
# add a action place holder
trajectory.append(create_stop_action(""))
break
evaluator = evaluator_router(config_file)
score = evaluator(
trajectory=trajectory,
config_file=config_file,
page=env.page,
client=env.get_page_client(env.page),
)
scores.append(score)
if score == 1:
logger.info(f"[Result] (PASS) {config_file}")
else:
logger.info(f"[Result] (FAIL) {config_file}")
if args.save_trace_enabled:
env.save_trace(
Path(args.result_dir) / "traces" / f"{task_id}.zip"
)
except openai.error.OpenAIError as e:
logger.info(f"[OpenAI Error] {repr(e)}")
except Exception as e:
logger.info(f"[Unhandled Error] {repr(e)}]")
import traceback
# write to error file
with open(Path(args.result_dir) / "error.txt", "a") as f:
f.write(f"[Config file]: {config_file}\n")
f.write(f"[Unhandled Error] {repr(e)}\n")
f.write(traceback.format_exc()) # write stack trace to file
render_helper.close()
env.close()
logger.info(f"Average score: {sum(scores) / len(scores)}")
def prepare(args: argparse.Namespace) -> None:
# convert prompt python files to json
from agent.prompts import to_json
to_json.run()
# prepare result dir
result_dir = args.result_dir
if not result_dir:
result_dir = (
f"cache/results_{time.strftime('%Y%m%d%H%M%S', time.localtime())}"
)
if not Path(result_dir).exists():
Path(result_dir).mkdir(parents=True, exist_ok=True)
args.result_dir = result_dir
logger.info(f"Create result dir: {result_dir}")
if not (Path(result_dir) / "traces").exists():
(Path(result_dir) / "traces").mkdir(parents=True)
# log the log file
with open(os.path.join(result_dir, "log_files.txt"), "a+") as f:
f.write(f"{LOG_FILE_NAME}\n")
def get_unfinished(config_files: list[str], result_dir: str) -> list[str]:
result_files = glob.glob(f"{result_dir}/*.html")
task_ids = [
os.path.basename(f).split(".")[0].split("_")[1] for f in result_files
]
unfinished_configs = []
for config_file in config_files:
task_id = os.path.basename(config_file).split(".")[0]
if task_id not in task_ids:
unfinished_configs.append(config_file)
return unfinished_configs
def dump_config(args: argparse.Namespace) -> None:
config_file = Path(args.result_dir) / "config.json"
if not config_file.exists():
with open(config_file, "w") as f:
json.dump(vars(args), f, indent=4)
logger.info(f"Dump config to {config_file}")
if __name__ == "__main__":
args = config()
args.sleep_after_execution = 2.0
prepare(args)
test_file_list = []
st_idx = args.test_start_idx
ed_idx = args.test_end_idx
for i in range(st_idx, ed_idx):
test_file_list.append(f"config_files/{i}.json")
if "debug" not in args.result_dir:
test_file_list = get_unfinished(test_file_list, args.result_dir)
if len(test_file_list) == 0:
logger.info("No task left to run")
else:
print(f"Total {len(test_file_list)} tasks left")
args.render = False
args.render_screenshot = True
args.save_trace_enabled = True
args.current_viewport_only = True
dump_config(args)
agent = construct_agent(args)
test(args, agent, test_file_list) | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/setup.py | exps/webarena_exp/setup.py | from setuptools import setup
if __name__ == "__main__":
setup()
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/minimal_example.py | exps/webarena_exp/minimal_example.py | #!/usr/bin/env python3
# type: ignore
import json
import os
import re
import subprocess
import time
SLEEP = 1.5
# set the URLs of each website, we use the demo sites as an example
os.environ[
"SHOPPING"
] = "http://ec2-3-131-244-37.us-east-2.compute.amazonaws.com:7770"
os.environ[
"SHOPPING_ADMIN"
] = "http://ec2-3-131-244-37.us-east-2.compute.amazonaws.com:7780/admin"
os.environ[
"REDDIT"
] = "http://ec2-3-131-244-37.us-east-2.compute.amazonaws.com:9999"
os.environ[
"GITLAB"
] = "http://ec2-3-131-244-37.us-east-2.compute.amazonaws.com:8023"
os.environ[
"MAP"
] = "http://ec2-3-131-244-37.us-east-2.compute.amazonaws.com:3000"
os.environ[
"WIKIPEDIA"
] = "http://ec2-3-131-244-37.us-east-2.compute.amazonaws.com:8888/wikipedia_en_all_maxi_2022-05/A/User:The_other_Kiwix_guy/Landing"
os.environ[
"HOMEPAGE"
] = "PASS" # The home page is not currently hosted in the demo site
print("Done setting up URLs")
# First, run `python scripts/generate_test_data.py` to generate the config files
p = subprocess.run(
["python", "scripts/generate_test_data.py"], capture_output=True
)
# It will generate individual config file for each test example in config_files
assert os.path.exists("config_files/0.json")
# Make sure the URLs in the config files are replaced properly
with open("config_files/0.json", "r") as f:
config = json.load(f)
assert os.environ["SHOPPING_ADMIN"] in config["start_url"], (
os.environ["SHOPPING_ADMIN"],
config["start_url"],
)
print("Done generating config files with the correct URLs")
# run bash prepare.sh to save all account cookies, this only needs to be done once
subprocess.run(["bash", "prepare.sh"])
print("Done saving account cookies")
# Init an environment
from browser_env import (
Action,
ActionTypes,
ObservationMetadata,
ScriptBrowserEnv,
StateInfo,
Trajectory,
action2str,
create_id_based_action,
create_stop_action,
)
from evaluation_harness.evaluators import evaluator_router
# Init the environment
env = ScriptBrowserEnv(
headless=False,
slow_mo=100,
observation_type="accessibility_tree",
current_viewport_only=True,
viewport_size={"width": 1280, "height": 720},
)
# example 156 as an example
config_file = "config_files/156.json"
# maintain a trajectory
trajectory: Trajectory = []
# set the environment for the current example
obs, info = env.reset(options={"config_file": config_file})
actree_obs = obs["text"]
print(actree_obs)
# You should see some output like this:
"""
[4] RootWebArea 'Projects · Dashboard · GitLab' focused: True
[12] link 'Skip to content'
[28] link 'Dashboard'
[2266] button '' hasPopup: menu expanded: False
[63] textbox 'Search GitLab' required: False
[61] generic 'Use the shortcut key <kbd>/</kbd> to start a search'
[79] link 'Create new...'
[95] link 'Issues'
[97] generic '13 assigned issues'
[101] link 'Merge requests'
[104] generic '8 merge requests'"""
# save the state info to the trajectory
state_info: StateInfo = {"observation": obs, "info": info}
trajectory.append(state_info)
# Now let's try to perform the action of clicking the "Merge request" link
# As the element ID is dynamic each time, we use regex to match the element as the demo
match = re.search(r"\[(\d+)\] link 'Merge requests'", actree_obs).group(1)
# Create the action click [ELEMENT_ID]
click_action = create_id_based_action(f"click [{match}]")
# Add the action to the trajectory
trajectory.append(click_action)
# Step and get the new observation
obs, _, terminated, _, info = env.step(click_action)
# New observation
actree_obs = obs["text"]
print(actree_obs)
time.sleep(SLEEP)
state_info = {"observation": obs, "info": info}
trajectory.append(state_info)
# Next click "assign to you"
match = re.search(r"\[(\d+)\] link 'Assigned to you", actree_obs).group(1)
click_action = create_id_based_action(f"click [{match}]")
trajectory.append(click_action)
obs, _, terminated, _, info = env.step(click_action)
actree_obs = obs["text"]
print(actree_obs)
time.sleep(SLEEP)
state_info = {"observation": obs, "info": info}
trajectory.append(state_info)
# add a stop action to mark the end of the trajectory
trajectory.append(create_stop_action(""))
# Demo evaluation
evaluator = evaluator_router(config_file)
score = evaluator(
trajectory=trajectory,
config_file=config_file,
page=env.page,
client=env.get_page_client(env.page),
)
# as we manually perform the task, the task should be judged as correct
assert score == 1.0
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/run_reflexion.py | exps/webarena_exp/run_reflexion.py | """Script to run end-to-end evaluation on the benchmark"""
import argparse
import glob
import json
import logging
import os
import copy
import random
import subprocess
import tempfile
import time
from pathlib import Path
import openai
import openai.error
from agent import (
Agent,
PromptAgent,
TeacherForcingAgent,
construct_agent,
)
from agent.prompts import *
from browser_env import (
Action,
ActionTypes,
ScriptBrowserEnv,
StateInfo,
Trajectory,
create_stop_action,
)
from browser_env.actions import is_equivalent
from browser_env.auto_login import get_site_comb_from_filepath
from browser_env.helper_functions import (
RenderHelper,
get_action_description,
save_img
)
from evaluation_harness import evaluator_router
from pprint import pprint
LOG_FOLDER = "log_files"
Path(LOG_FOLDER).mkdir(parents=True, exist_ok=True)
LOG_FILE_NAME = f"{LOG_FOLDER}/log_{time.strftime('%Y%m%d%H%M%S', time.localtime())}_{random.randint(0, 10000)}.log"
logger = logging.getLogger("logger")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
file_handler = logging.FileHandler(LOG_FILE_NAME)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
# Set the log format
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
def config() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Run end-to-end evaluation on the benchmark"
)
parser.add_argument(
"--render", action="store_true", help="Render the browser"
)
parser.add_argument(
"--slow_mo",
type=int,
default=0,
help="Slow down the browser by the specified amount",
)
parser.add_argument(
"--action_set_tag", default="id_accessibility_tree", help="Action type"
)
parser.add_argument(
"--observation_type",
choices=["accessibility_tree", "html", "image"],
default="accessibility_tree",
help="Observation type",
)
parser.add_argument(
"--current_viewport_only",
action="store_true",
help="Only use the current viewport for the observation",
)
parser.add_argument("--viewport_width", type=int, default=1280)
parser.add_argument("--viewport_height", type=int, default=720)
parser.add_argument("--save_trace_enabled", action="store_true")
parser.add_argument("--sleep_after_execution", type=float, default=0.0)
parser.add_argument("--max_steps", type=int, default=30)
# agent config
parser.add_argument("--agent_type", type=str, default="reflexion", choices=["reflexion"])
parser.add_argument(
"--instruction_path",
type=str,
default="agent/prompts/jsons/p_cot_id_actree_2s_reflexion.json",
)
parser.add_argument(
"--parsing_failure_th",
help="When concesecutive parsing failure exceeds this threshold, the agent will stop",
type=int,
default=3,
)
parser.add_argument(
"--repeating_action_failure_th",
help="When concesecutive repeating action exceeds this threshold, the agent will stop",
type=int,
default=3,
)
parser.add_argument(
"--max_num_attempts",
type=int,
default=1,
)
parser.add_argument(
"--reflexion_evaluator",
type=str,
default="oracle",
choices=["oracle", "model"],
)
# lm config
parser.add_argument("--provider", type=str, default="openai")
parser.add_argument("--model", type=str, default="gpt-3.5-turbo-0613")
parser.add_argument("--mode", type=str, default="chat")
parser.add_argument("--temperature", type=float, default=0.0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--context_length", type=int, default=0)
parser.add_argument("--max_tokens", type=int, default=384)
parser.add_argument("--stop_token", type=str, default=None)
parser.add_argument(
"--max_retry",
type=int,
help="max retry times to perform generations when parsing fails",
default=1,
)
parser.add_argument(
"--max_obs_length",
type=int,
help="when not zero, will truncate the observation to this length before feeding to the model",
default=1920,
)
parser.add_argument(
"--model_endpoint",
help="huggingface model endpoint",
type=str,
default="",
)
parser.add_argument(
"--eval_lm_model",
type=str,
default="mixtral",
choices=["gpt-3.5", "gpt-4", "mixtral", "gpt-4v"],
)
parser.add_argument(
"--eval_prompt_version",
type=str,
default="final-v3",
choices=["final-v2", "final-v3", "final-v3-gpt4v"],
)
parser.add_argument(
"--eval_status_for_reflexion",
type=str,
default="language",
choices=["binary", "language"],
)
# example config
parser.add_argument("--test_start_idx", type=int, default=0)
parser.add_argument("--test_end_idx", type=int, default=1000)
parser.add_argument("--test_indexes", type=int, nargs="+", default=[])
parser.add_argument("--test_file", type=str, default="")
# logging related
parser.add_argument("--result_dir", type=str, default="")
parser.add_argument("--baseline_dir", type=str, default="")
args = parser.parse_args()
# check the whether the action space is compatible with the observation space
if (
args.action_set_tag == "id_accessibility_tree"
and args.observation_type != "accessibility_tree"
):
raise ValueError(
f"Action type {args.action_set_tag} is incompatible with the observation type {args.observation_type}"
)
return args
def early_stop(
trajectory: Trajectory, max_steps: int, thresholds: dict[str, int]
) -> tuple[bool, str]:
"""Check whether need to early stop"""
# reach the max step
num_steps = (len(trajectory) - 1) / 2
if num_steps >= max_steps:
return True, f"Reach max steps {max_steps}"
last_k_actions: list[Action]
action_seq: list[Action]
# Case: parsing failure for k times
k = thresholds["parsing_failure"]
last_k_actions = trajectory[1::2][-k:] # type: ignore[assignment]
if len(last_k_actions) >= k:
if all(
[
action["action_type"] == ActionTypes.NONE
for action in last_k_actions
]
):
return True, f"Failed to parse actions for {k} times"
# Case: same action for k times
k = thresholds["repeating_action"]
last_k_actions = trajectory[1::2][-k:] # type: ignore[assignment]
action_seq = trajectory[1::2] # type: ignore[assignment]
if len(action_seq) == 0:
return False, ""
last_action: Action = action_seq[-1]
if last_action["action_type"] != ActionTypes.TYPE:
if len(last_k_actions) >= k:
if all(
[
is_equivalent(action, last_action)
for action in last_k_actions
]
):
return True, f"Same action for {k} times"
else:
# check the action sequence
if (
sum([is_equivalent(action, last_action) for action in action_seq])
>= k
):
return True, f"Same typing action for {k} times"
return False, ""
def test(
args: argparse.Namespace,
agent: Agent | PromptAgent | TeacherForcingAgent,
config_file_list: list[str],
) -> None:
scores = []
results = {}
max_steps = args.max_steps
assert args.agent_type == "reflexion"
max_num_attempts = args.max_num_attempts
early_stop_thresholds = {
"parsing_failure": args.parsing_failure_th,
"repeating_action": args.repeating_action_failure_th,
}
env = ScriptBrowserEnv(
headless=not args.render,
slow_mo=args.slow_mo,
observation_type=args.observation_type,
current_viewport_only=args.current_viewport_only,
viewport_size={
"width": args.viewport_width,
"height": args.viewport_height,
},
save_trace_enabled=args.save_trace_enabled,
sleep_after_execution=args.sleep_after_execution,
)
for config_file in config_file_list:
# with open(config_file) as f:
# cfg = json.load(f)
# if "map" in cfg["sites"]:
# logger.info("Skip map domain: " + config_file)
# continue
render_helper = None
try:
meta_data = {
"action_history": ["None"],
"memory": []
}
# iterate over the max_num_attempts
for trail_idx in range(max_num_attempts):
render_save_dir = Path(args.result_dir) / "renders"
if not render_save_dir.exists():
render_save_dir.mkdir(parents=True)
record_save_dir = Path(args.result_dir) / "records"
if not record_save_dir.exists():
record_save_dir.mkdir(parents=True)
# get intent
with open(config_file) as f:
_c = json.load(f)
intent = _c["intent"]
task_id = _c["task_id"]
# automatically login
if _c["storage_state"]:
cookie_file_name = os.path.basename(_c["storage_state"])
comb = get_site_comb_from_filepath(cookie_file_name)
temp_dir = tempfile.mkdtemp()
# subprocess to renew the cookie
subprocess.run(
[
"python",
"browser_env/auto_login.py",
"--auth_folder",
temp_dir,
"--site_list",
*comb,
]
)
_c["storage_state"] = f"{temp_dir}/{cookie_file_name}"
assert os.path.exists(_c["storage_state"]), _c["storage_state"]
# update the config file
config_file = f"{temp_dir}/{os.path.basename(config_file)}"
with open(config_file, "w") as f:
json.dump(_c, f)
if task_id not in results:
results[task_id] = {"intent": intent, "trails": []}
logger.info(f"[Config file]: {config_file}")
logger.info(f"[Intent]: {intent}")
logger.info(f"#### Start trail: {trail_idx}")
# See whether there is a baseline experiment and the result can be directly used
baseline_dir = args.baseline_dir
baseline_records = {}
baseline_reflections = []
if baseline_dir:
# load the baseline records
baseline_file = f"{baseline_dir}/records/{task_id}_{trail_idx}.json"
if os.path.exists(baseline_file):
with open(baseline_file) as f:
baseline_records = json.load(f)
logger.info(f"Loaded a baseline record from {baseline_file}")
# also copy the render file to the target directory
src_render_file = f"{baseline_dir}/renders/render_{task_id}_{trail_idx}.html"
dst_render_file = f"{args.result_dir}/renders/render_{task_id}_{trail_idx}.html"
assert os.path.exists(src_render_file), src_render_file
if src_render_file != dst_render_file:
os.system(f"cp {src_render_file} {dst_render_file}")
baseline_memory_file = f"{baseline_dir}/memory/memory_{task_id}.json"
if os.path.exists(baseline_memory_file):
with open(baseline_memory_file) as f:
baseline_memory = json.load(f)
baseline_reflections = baseline_memory["memory"]
trajectory: Trajectory = []
if baseline_records:
# use the baseline and no need to rerun
records = copy.deepcopy(baseline_records)
else:
render_helper = RenderHelper(
config_file, render_save_dir, args.action_set_tag, trail_idx
)
# reset the records
records = {
"uid": task_id,
"trail_idx": trail_idx,
"memory": meta_data["memory"],
"intent": intent,
"response": "",
"steps": [],
"other": {"config": _c},
}
agent.reset(config_file)
obs, info = env.reset(options={"config_file": config_file})
state_info: StateInfo = {"observation": obs, "info": info}
trajectory.append(state_info)
step_idx = 0
img_name = save_img(obs["image"], Path(args.result_dir) / "images", task_id, step_idx, trail_idx)
records["steps"].append(
{
"img": img_name,
"accessibility_tree": obs["text"],
"url": info["page"].url
}
)
meta_data["action_history"] = ["None"]
while True:
early_stop_flag, stop_info = early_stop(
trajectory, max_steps, early_stop_thresholds
)
if early_stop_flag:
action = create_stop_action(f"Early stop: {stop_info}")
else:
try:
action = agent.next_action(
trajectory, intent, meta_data=meta_data
)
except ValueError as e:
# get the error message
action = create_stop_action(f"ERROR: {str(e)}")
trajectory.append(action)
action_str = get_action_description(
action,
state_info["info"]["observation_metadata"],
action_set_tag=args.action_set_tag,
prompt_constructor=agent.prompt_constructor
if isinstance(agent, PromptAgent)
else None,
)
render_helper.render(
action, state_info, meta_data, args.render_screenshot
)
meta_data["action_history"].append(action_str)
records["steps"][-1]["other"] = {"raw_action": action_str}
if action["action_type"] == ActionTypes.STOP:
break
obs, _, terminated, _, info = env.step(action)
state_info = {"observation": obs, "info": info}
trajectory.append(state_info)
step_idx += 1
img_name = save_img(obs["image"], Path(args.result_dir) / "images", task_id, step_idx, trail_idx)
records["steps"].append(
{
"img": img_name,
"accessibility_tree": obs["text"],
"url": info["page"].url
}
)
if terminated:
# add a action place holder
trajectory.append(create_stop_action(""))
records["steps"][-1]["other"] = {"raw_action": "stop []"}
break
evaluator = evaluator_router(config_file)
oracle_score = evaluator(
trajectory=trajectory,
config_file=config_file,
page=env.page,
client=env.get_page_client(env.page),
)
records["response"] = action['answer']
records["oracle_score"] = oracle_score
### END OF TRAIL ###
# start reflection
if args.reflexion_evaluator == "oracle":
score_source = "gt"
score = records["oracle_score"]
status = "PASSED" if score == 1 else "FAILED"
logger.info(f"[Trail {trail_idx}] GT eval: {score} | {status}")
else:
print("Running GAE evaluation ...")
score_source = "gae-nl"
score, status = agent.evaluator(records)
if args.eval_status_for_reflexion == "binary":
score_source = "gae-binary"
status = "PASSED" if score == 1 else "FAILED"
logger.info(f"[Trail {trail_idx}] GAE eval: {score} | {status}")
# save the captions to the original baseline records
if args.baseline_dir and os.path.exists(baseline_file):
with open(baseline_file, "w") as f:
json.dump(records, f, indent=4)
records["score"] = score
records["status"] = status
records["score_source"] = score_source
# pprint(records)
# save the records
with open(record_save_dir / f"{task_id}_{trail_idx}.json", "w") as f:
json.dump(records, f, indent=4)
results[task_id]["trails"].append({
"trail_idx": trail_idx,
"response": records["response"],
"score": score,
"oracle_score": records["oracle_score"],
"status": status,
})
# early stop if succeed
if score == 1:
break
# no need to reflect for the last trail
if trail_idx == (max_num_attempts - 1):
break
# add a reflection to be used in the next trail
if len(baseline_reflections) > trail_idx and baseline_records["status"] == status:
print("Reuse the reflection from baseline")
reflection = baseline_reflections[trail_idx]
else:
print("Generating a reflection ...")
reflection = agent.generate_reflection(records)
meta_data["memory"].append(reflection)
### END OF TASK ###
if not trajectory:
# if the final trajectory is obtained from the baseline
score = records["oracle_score"]
else:
# evaluate the final, fresh trajectory
evaluator = evaluator_router(config_file)
score = evaluator(
trajectory=trajectory,
config_file=config_file,
page=env.page,
client=env.get_page_client(env.page),
)
if args.save_trace_enabled:
env.save_trace(
Path(args.result_dir) / "traces" / f"{task_id}.zip"
)
scores.append(score)
if score == 1:
logger.info(f"[Result] (PASS) {config_file}")
else:
logger.info(f"[Result] (FAIL) {config_file}")
# save the memory
memory_save_dir = Path(args.result_dir) / "memory"
if not memory_save_dir.exists():
memory_save_dir.mkdir(parents=True)
with open(f"{memory_save_dir}/memory_{task_id}.json", "w") as f:
memory_save = {
"memory": meta_data["memory"],
"score": score
}
json.dump(memory_save, f, indent=4)
results[task_id]["score"] = score
results[task_id]["final_trail_idx"] = trail_idx
except openai.error.OpenAIError as e:
logger.info(f"[OpenAI Error] {repr(e)}")
except Exception as e:
logger.info(f"[Unhandled Error] {repr(e)}]")
import traceback
# write to error file
with open(Path(args.result_dir) / "error.txt", "a") as f:
f.write(f"[Config file]: {config_file}\n")
f.write(f"[Unhandled Error] {repr(e)}\n")
f.write(traceback.format_exc()) # write stack trace to file
if render_helper is not None:
render_helper.close()
env.close()
if scores:
logger.info(f"Average score: {sum(scores) / len(scores)}")
result_fn = f"results_{args.max_num_attempts}.json"
if os.path.exists(Path(args.result_dir) / result_fn):
with open(Path(args.result_dir) / result_fn) as f:
existing_results = json.load(f)
existing_results.update(results)
results = existing_results
with open(Path(args.result_dir) / result_fn, "w") as f:
json.dump(results, f, indent=4)
def prepare(args: argparse.Namespace) -> None:
# convert prompt python files to json
from agent.prompts import to_json
to_json.run()
# prepare result dir
result_dir = args.result_dir
if not result_dir:
result_dir = (
f"cache/results_{time.strftime('%Y%m%d%H%M%S', time.localtime())}"
)
if not Path(result_dir).exists():
Path(result_dir).mkdir(parents=True, exist_ok=True)
args.result_dir = result_dir
logger.info(f"Create result dir: {result_dir}")
if not (Path(result_dir) / "traces").exists():
(Path(result_dir) / "traces").mkdir(parents=True)
# log the log file
with open(os.path.join(result_dir, "log_files.txt"), "a+") as f:
f.write(f"{LOG_FILE_NAME}\n")
def get_unfinished(config_files: list[str], result_dir: str, max_num_attempts: int) -> list[str]:
# result_files = glob.glob(f"{result_dir}/traces/*.zip")
# task_ids = [
# os.path.basename(f).split(".")[0].split("_")[1] for f in result_files
# ]
# task_ids = [os.path.basename(f).split(".")[0] for f in result_files]
results = {}
if os.path.exists(f"{result_dir}/results_{max_num_attempts}.json"):
with open(f"{result_dir}/results_{max_num_attempts}.json") as f:
results = json.load(f)
task_ids = [task_id for task_id in results.keys() if "score" in results[task_id]]
unfinished_configs = []
for config_file in config_files:
task_id = os.path.basename(config_file).split(".")[0]
if task_id not in task_ids:
unfinished_configs.append(config_file)
return unfinished_configs
def dump_config(args: argparse.Namespace) -> None:
config_file = Path(args.result_dir) / "config.json"
if not config_file.exists():
with open(config_file, "w") as f:
json.dump(vars(args), f, indent=4)
logger.info(f"Dump config to {config_file}")
if __name__ == "__main__":
args = config()
args.sleep_after_execution = 2.0
prepare(args)
test_file_list = []
if args.test_indexes:
assert not args.test_file, "Cannot specify both test_indexes and test_file"
for i in args.test_indexes:
test_file_list.append(f"config_files/{i}.json")
elif args.test_file:
with open(args.test_file) as f:
test_file_list = json.load(f)
st_idx = args.test_start_idx
ed_idx = args.test_end_idx
test_file_list = test_file_list[st_idx:ed_idx]
else:
st_idx = args.test_start_idx
ed_idx = args.test_end_idx
for i in range(st_idx, ed_idx):
test_file_list.append(f"config_files/{i}.json")
if "debug" not in args.result_dir:
test_file_list = get_unfinished(test_file_list, args.result_dir, args.max_num_attempts)
if len(test_file_list) == 0:
logger.info("No task left to run")
else:
print(f"Total {len(test_file_list)} tasks left")
args.render = False
args.render_screenshot = True
args.save_trace_enabled = True
args.current_viewport_only = True
dump_config(args)
agent = construct_agent(args)
test(args, agent, test_file_list)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/scripts/generate_test_data.py | exps/webarena_exp/scripts/generate_test_data.py | """Replace the website placeholders with website domains from env_config
Generate the test data"""
import json
from browser_env.env_config import *
def main() -> None:
with open("config_files/test.raw.json", "r") as f:
raw = f.read()
raw = raw.replace("__GITLAB__", GITLAB)
raw = raw.replace("__REDDIT__", REDDIT)
raw = raw.replace("__SHOPPING__", SHOPPING)
raw = raw.replace("__SHOPPING_ADMIN__", SHOPPING_ADMIN)
raw = raw.replace("__WIKIPEDIA__", WIKIPEDIA)
raw = raw.replace("__MAP__", MAP)
with open("config_files/test.json", "w") as f:
f.write(raw)
# split to multiple files
data = json.loads(raw)
for idx, item in enumerate(data):
with open(f"config_files/{idx}.json", "w") as f:
json.dump(item, f, indent=2)
if __name__ == "__main__":
main()
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/scripts/check_error_runs.py | exps/webarena_exp/scripts/check_error_runs.py | """Some executions may failed.
This script checks the recordings, print the task ids.
It deletes the recordings if needed."""
import argparse
import glob
import os
import shutil
import sys
def merge_logs(result_folder: str, args: argparse.Namespace) -> str:
if not os.path.exists(f"{result_folder}/log_files.txt"):
sys.exit(1)
with open(f"{result_folder}/log_files.txt", "r") as f:
log_files = f.readlines()
merged_results = {}
for file in log_files:
with open(file.strip(), "r") as f:
lines = f.readlines()
cur_log: list[str] = []
index = None
for line in lines:
if "[Config file]" in line:
if (
cur_log
and index
and os.path.exists(f"{result_folder}/render_{index}.html")
and len(cur_log) >= 3
):
merged_results[index] = cur_log
# update index and log
index = line.split("/")[-1].split(".")[0]
cur_log = [line]
else:
cur_log.append(line)
if (
cur_log
and index
and os.path.exists(f"{result_folder}/render_{index}.html")
and len(cur_log) >= 3
):
merged_results[index] = cur_log
# sort by the key
merged_results = dict(
sorted(merged_results.items(), key=lambda x: int(x[0]))
)
merged_log_path = f"{result_folder}/tmp_merged_log.txt"
with open(merged_log_path, "w") as f:
for k, v in merged_results.items():
for line in v:
f.write(line)
print(f"Number of examples: {len(merged_results)}")
unlog_examples = []
for i in range(812):
if (
os.path.exists(f"{result_folder}/render_{i}.html")
and str(i) not in merged_results
):
unlog_examples.append(i)
print(f"Number of unlogged examples: {len(unlog_examples)}")
print(unlog_examples)
if (
args.delete_errors
or input("Do you want to delete these examples? (y/n)") == "y"
):
for idx in unlog_examples:
os.remove(f"{args.result_folder}/render_{idx}.html")
unifinished_examples = [
i for i in range(0, 812) if str(i) not in merged_results
]
print(f"Number of unfinished examples: {len(unifinished_examples)}")
print(unifinished_examples)
return merged_log_path
def check_unhandled_errors(args: argparse.Namespace) -> int:
log_path = merge_logs(args.result_folder, args)
with open(log_path, "r") as f:
logs = f.read()
error_examples = []
for line in logs.split("\n"):
if "[Config file]" in line:
example_idx = line.split("/")[-1].split(".")[0]
if "[Unhandled Error]" in line or "[OpenAI Error]" in line:
error_examples.append(int(example_idx))
num_errors = len(error_examples)
print(f"Number of unhandled errors: {len(error_examples)}")
print(error_examples)
if (
args.delete_errors
or input("Do you want to delete these examples? (y/n)") == "y"
):
for idx in error_examples:
if os.path.exists(f"{args.result_folder}/render_{idx}.html"):
os.remove(f"{args.result_folder}/render_{idx}.html")
return num_errors
def check_unexpected_logout(args: argparse.Namespace) -> int:
target_strings = set(
[
"Creating an account has many benefits: check out faster",
"Welcome, please sign in",
"Username or email",
"Keep me logged in",
]
)
error_examples = []
for render_file in glob.glob(f"{args.result_folder}/render_*.html"):
with open(render_file, "r") as f:
contents = f.read()
if any([s in contents for s in target_strings]):
task_id = int(
render_file.split("/")[-1].split(".")[0].split("_")[-1]
)
error_examples.append(task_id)
print(f"Number of unexpected logout: {len(error_examples)}")
print(error_examples)
num_errors = len(error_examples)
if (
args.delete_errors
or input("Do you want to delete these examples? (y/n)") == "y"
):
for idx in error_examples:
if os.path.exists(f"{args.result_folder}/render_{idx}.html"):
os.remove(f"{args.result_folder}/render_{idx}.html")
return num_errors
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("result_folder", type=str)
parser.add_argument("--delete_errors", action="store_true")
parser.add_argument("--tolerance", type=int, default=0)
args = parser.parse_args()
n1 = check_unhandled_errors(args)
n2 = check_unexpected_logout(args)
if n1 + n2 > args.tolerance:
sys.exit(1)
else:
sys.exit(0)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/scripts/html2json.py | exps/webarena_exp/scripts/html2json.py | import argparse
import base64
import glob
import json
import os
from collections import defaultdict
from typing import Any
from bs4 import BeautifulSoup
def main(result_folder: str, config_json: str) -> None:
all_data = {}
template_to_id: dict[str, Any] = defaultdict(lambda: len(template_to_id))
with open(config_json, "r") as f:
data_configs = json.load(f)
data_configs = {int(item["task_id"]): item for item in data_configs}
for k, v in data_configs.items():
v.pop("require_login")
v.pop("storage_state")
v.pop("start_url")
v.pop("geolocation")
v.pop("require_reset")
v.pop("intent_template_id")
v["intent_template_id"] = template_to_id[v["intent_template"]]
v["eval_types"] = v["eval"].pop("eval_types")
if v["eval"]["reference_answers"]:
v["reference_answers"] = v["eval"].pop("reference_answers")
if v["eval"]["reference_url"]:
v["reference_url"] = v["eval"].pop("reference_url")
v.pop("eval")
if v.get("reference_answers", {}).get("exact_match", "") == "N/A":
v["achievable"] = False
else:
v["achievable"] = True
with open(f"{result_folder}/merged_log.txt", "r") as f:
results = {}
for line in f:
if "[Result]" in line:
id = line.strip().split(".")[-2].split("/")[-1]
results[int(id)] = True if "(PASS)" in line else False
files = list(glob.glob(f"{result_folder}/render_*.html"))
files = [x for x in files if os.path.exists(x)]
print(f"Total number of files: {len(files)}")
for render_file in files:
task_id = int(render_file.split("_")[-1].split(".")[0])
with open(render_file, "r") as f:
try:
content = f.read()
soup = BeautifulSoup(content, "html.parser")
observations = [
obv.find("pre").text
for obv in soup.find_all("div", {"class": "state_obv"})
]
base64_images = [
img["src"].split(",")[1] for img in soup.find_all("img")
]
image_observations = []
# save image to file and change the value to be path
image_folder = f"images/{os.path.basename(result_folder)}"
os.makedirs(image_folder, exist_ok=True)
for i, image in enumerate(base64_images):
image_data = base64.b64decode(image)
filename = f"{image_folder}/image_{task_id}_{i}.png"
with open(filename, "wb") as f: # type: ignore[assignment]
f.write(image_data) # type: ignore[arg-type]
image_observations.append(filename)
urls = [
url.get_text()
for url in soup.find_all("h3", {"class": "url"})
]
actions = [
action.get_text()
for action in soup.find_all(
"div", {"class": "raw_parsed_prediction"}
)
]
parsed_actions = [
action.get_text()
for action in soup.find_all(
"div", {"class": "parsed_action"}
)
]
# fill action with parsed action if action is empty
for i in range(len(actions)):
if actions[i] == "":
actions[i] = parsed_actions[i]
messages = []
for o, u, a, image in zip(
observations, urls, actions, image_observations
):
messages.append(
{
"user": f"{u}\n\nobservation:\n{o}",
"image": image,
}
)
messages.append({"assistant": a})
all_data[f"example_{task_id}"] = {
**data_configs[task_id],
"messages": messages,
"success": results.get(task_id, False),
}
except Exception as e:
print(e)
print(f"Error in {render_file}")
with open(f"{result_folder}/json_dump.json", "w+") as f:
json.dump(all_data, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--result_folder", type=str)
parser.add_argument(
"--config_json", type=str, default="config_files/test.raw.json"
)
args = parser.parse_args()
main(args.result_folder, args.config_json)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/scripts/collect_obs.py | exps/webarena_exp/scripts/collect_obs.py | """Simple script to quickly get the observation of a page"""
import json
import re
import time
from typing import Dict, Optional, Tuple, Type, Union, cast
import pytest
from playwright.sync_api import Page, expect
from browser_env import (
ScriptBrowserEnv,
create_id_based_action,
create_key_press_action,
create_playwright_action,
create_scroll_action,
)
from browser_env.env_config import *
HEADLESS = False
def gen_tmp_storage_state() -> None:
with open(f"scripts/tmp_storage_state.json", "w") as f:
json.dump({"storage_state": ".auth/shopping_admin_state.json"}, f)
def get_observation(
observation_type: str, current_viewport_only: bool
) -> None:
env = ScriptBrowserEnv(
observation_type=observation_type,
current_viewport_only=current_viewport_only,
headless=HEADLESS,
sleep_after_execution=2.0,
)
env.reset(options={"config_file": f"scripts/tmp_storage_state.json"})
s = f"""page.goto("http://ec2-3-131-244-37.us-east-2.compute.amazonaws.com:7780/admin/admin/dashboard/")
page.get_by_label("", exact=True).fill("reviews")
page.get_by_label("", exact=True).press("Enter")
page.scroll(down)"""
action_seq = s.split("\n")
for action in action_seq:
action = action.strip()
obs, success, _, _, info = env.step(create_playwright_action(action))
print(obs["text"])
_ = input("Press enter to continue")
if __name__ == "__main__":
gen_tmp_storage_state()
obs_type = "accessibility_tree"
current_viewport_only = True
get_observation(obs_type, current_viewport_only)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/tests/conftest.py | exps/webarena_exp/tests/conftest.py | from typing import AsyncGenerator, Generator
import pytest
import pytest_asyncio
from browser_env import AsyncScriptBrowserEnv, ScriptBrowserEnv
HEADLESS = True
SLOW_MO = 0
@pytest.fixture(scope="function")
def script_browser_env() -> Generator[ScriptBrowserEnv, None, None]:
"""Create a ScriptBrowserEnv instance for testing.
It is automatically closed after the test session.
This is helpful when the test failed and the browser is still open.
"""
env = ScriptBrowserEnv(
headless=HEADLESS,
slow_mo=SLOW_MO,
)
yield env
env.close()
@pytest.fixture(scope="function")
def current_viewport_script_browser_env() -> Generator[
ScriptBrowserEnv, None, None
]:
env = ScriptBrowserEnv(
headless=HEADLESS,
slow_mo=SLOW_MO,
current_viewport_only=True,
)
yield env
env.close()
@pytest.fixture(scope="function")
def accessibility_tree_script_browser_env() -> Generator[
ScriptBrowserEnv, None, None
]:
env = ScriptBrowserEnv(
headless=HEADLESS,
slow_mo=SLOW_MO,
observation_type="accessibility_tree",
)
yield env
env.close()
@pytest.fixture(scope="function")
def accessibility_tree_current_viewport_script_browser_env() -> Generator[
ScriptBrowserEnv, None, None
]:
env = ScriptBrowserEnv(
headless=HEADLESS,
slow_mo=SLOW_MO,
observation_type="accessibility_tree",
current_viewport_only=True,
)
yield env
env.close()
@pytest_asyncio.fixture(scope="function", autouse=True)
async def async_script_browser_env() -> AsyncGenerator[
AsyncScriptBrowserEnv, None
]:
env = AsyncScriptBrowserEnv(headless=HEADLESS, slow_mo=SLOW_MO)
yield env
await env.aclose()
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/tests/test_browser_env/test_actions.py | exps/webarena_exp/tests/test_browser_env/test_actions.py | import numpy as np
from browser_env import *
def test_is_equivalent() -> None:
for action_type in ActionTypes.__members__.values():
action_a = create_random_action()
action_b = create_random_action()
if action_a["action_type"] != action_b["action_type"]:
assert not is_equivalent(action_a, action_b)
action_a["action_type"] = action_type
action_b["action_type"] = action_type
match action_type:
case ActionTypes.MOUSE_CLICK | ActionTypes.MOUSE_HOVER:
if not np.allclose(action_a["coords"], action_b["coords"]):
assert not is_equivalent(action_a, action_b)
action_a["coords"] = action_b["coords"]
assert is_equivalent(action_a, action_b)
case ActionTypes.KEYBOARD_TYPE:
if action_a["text"] != action_b["text"]:
assert not is_equivalent(action_a, action_b)
action_a["text"] = action_b["text"]
assert is_equivalent(action_a, action_b)
case ActionTypes.CLICK | ActionTypes.HOVER | ActionTypes.TYPE:
if action_a["element_id"] and action_b["element_id"]:
if action_a["element_id"] != action_b["element_id"]:
assert not is_equivalent(action_a, action_b)
action_a["element_id"] = action_b["element_id"]
assert is_equivalent(action_a, action_b)
elif action_a["element_id"] and action_b["element_id"]:
if action_a["element_role"] != action_b["element_role"]:
assert not is_equivalent(action_a, action_b)
action_a["element_role"] = action_b["element_role"]
if action_a["element_name"] != action_b["element_name"]:
assert not is_equivalent(action_a, action_b)
action_a["element_name"] = action_b["element_name"]
assert is_equivalent(action_a, action_b)
elif action_a["pw_code"] and action_b["pw_code"]:
if action_a["pw_code"] != action_b["pw_code"]:
assert not is_equivalent(action_a, action_b)
action_a["pw_code"] = action_b["pw_code"]
assert is_equivalent(action_a, action_b)
else:
action_a["element_id"] = action_b["element_id"]
assert is_equivalent(action_a, action_b)
case ActionTypes.GOTO_URL:
if action_a["url"] != action_b["url"]:
assert not is_equivalent(action_a, action_b)
action_a["url"] = action_b["url"]
assert is_equivalent(action_a, action_b)
case ActionTypes.PAGE_FOCUS:
if action_a["page_number"] != action_b["page_number"]:
assert not is_equivalent(action_a, action_b)
action_a["page_number"] = action_b["page_number"]
assert is_equivalent(action_a, action_b)
case ActionTypes.SCROLL:
da = "up" if "up" in action_a["direction"] else "down"
db = "up" if "up" in action_b["direction"] else "down"
if da != db:
assert not is_equivalent(action_a, action_b)
action_a["direction"] = action_b["direction"]
assert is_equivalent(action_a, action_b)
case ActionTypes.KEY_PRESS:
if action_a["key_comb"] != action_b["key_comb"]:
assert not is_equivalent(action_a, action_b)
action_a["key_comb"] = action_b["key_comb"]
assert is_equivalent(action_a, action_b)
case ActionTypes.CHECK | ActionTypes.SELECT_OPTION:
if action_a["pw_code"] != action_b["pw_code"]:
assert not is_equivalent(action_a, action_b)
action_a["pw_code"] = action_b["pw_code"]
assert is_equivalent(action_a, action_b)
case ActionTypes.STOP:
if action_a["answer"] != action_b["answer"]:
assert not is_equivalent(action_a, action_b)
action_a["answer"] = action_b["answer"]
assert is_equivalent(action_a, action_b)
case _:
assert is_equivalent(action_a, action_b)
def test_action2create_function() -> None:
for _ in range(1000):
action = create_random_action()
create_function = action2create_function(action)
assert is_equivalent(action, eval(create_function))
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/tests/test_browser_env/test_auth_cookie.py | exps/webarena_exp/tests/test_browser_env/test_auth_cookie.py | import asyncio
import json
from browser_env import *
auth_json = {
"cookies": [
{
"name": "session-username",
"value": "standard_user",
"domain": "www.saucedemo.com",
"path": "/",
"httpOnly": False,
"secure": False,
"sameSite": "Lax",
}
],
"origins": [],
}
def test_auth_cookie() -> None:
env = ScriptBrowserEnv()
env.reset()
_, reward, _, _, info = env.step(
create_goto_url_action("https://www.saucedemo.com/inventory.html"),
)
assert reward == 1
assert "page" in info and isinstance(info["page"], DetachedPage)
assert info["page"].url == "https://www.saucedemo.com/"
json.dump(auth_json, open("/tmp/auth.json", "w"))
instance_config = {"storage_state": "/tmp/auth.json"}
json.dump(instance_config, open("/tmp/config.json", "w"))
env.reset(options={"config_file": "/tmp/config.json"})
_, reward, _, _, info = env.step(
create_goto_url_action("https://www.saucedemo.com/inventory.html"),
)
assert reward == 1
assert "page" in info and isinstance(info["page"], DetachedPage)
assert info["page"].url == "https://www.saucedemo.com/inventory.html"
env.close()
def test_async_auth_cookie() -> None:
env = AsyncScriptBrowserEnv()
async def _test() -> None:
await env.areset()
_, reward, _, _, info = await env.astep(
create_goto_url_action("https://www.saucedemo.com/inventory.html"),
)
assert reward == 1
assert "page" in info and isinstance(info["page"], DetachedPage)
assert info["page"].url == "https://www.saucedemo.com/"
json.dump(auth_json, open("/tmp/auth.json", "w"))
instance_config = {"storage_state": "/tmp/auth.json"}
json.dump(instance_config, open("/tmp/config.json", "w"))
await env.areset(options={"config_file": "/tmp/config.json"})
_, reward, _, _, info = await env.astep(
create_goto_url_action("https://www.saucedemo.com/inventory.html"),
)
assert reward == 1
assert "page" in info and isinstance(info["page"], DetachedPage)
assert info["page"].url == "https://www.saucedemo.com/inventory.html"
await env.aclose()
asyncio.run(_test())
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/tests/test_browser_env/test_action_functionalities.py | exps/webarena_exp/tests/test_browser_env/test_action_functionalities.py | import re
from typing import Dict, Optional, Tuple, Type, Union, cast
import pytest
from playwright.sync_api import Page, expect
from browser_env import (
ScriptBrowserEnv,
create_id_based_action,
create_key_press_action,
create_playwright_action,
create_scroll_action,
)
HEADLESS = True
SLOW_MO = 0
def test_frame_locator(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
seq = """page.goto("https://www.littlewebhut.com/articles/html_iframe_example/")
page.frame_locator("iframe[name=\\"imgbox\\"]").get_by_role("img").click()"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
def test_basic(script_browser_env: ScriptBrowserEnv) -> None:
# click, fill, press, check, goto
env = script_browser_env
seq = """page.goto("https://demo.playwright.dev/todomvc/")
page.get_by_placeholder("What needs to be done?").click()
page.get_by_placeholder("What needs to be done?").fill("hello")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_placeholder("What needs to be done?").fill("world")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_placeholder("What needs to be done?").fill("yes")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_placeholder("What needs to be done?").fill("no")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_role("listitem").filter(has_text="world").get_by_role("checkbox", name="Toggle Todo").check()
page.get_by_role("button", name="Clear completed").click()"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
def test_hover(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
seq = """page.goto("https://ianlunn.github.io/Hover/")
page.get_by_role("link", name="Download on GitHub").hover()"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
def test_select_option(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
seq = """page.goto("https://russmaxdesign.github.io/exercise/#link-two")
page.get_by_role("combobox", name="Favourite mammal").select_option("African Wild Dog")"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
def test_xpath(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
seq = """page.goto("https://demo.playwright.dev/todomvc/")
page.goto("https://demo.playwright.dev/todomvc/#/")
page.get_by_placeholder("What needs to be done?").click()
page.get_by_placeholder("What needs to be done?").fill("hello")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_role("link", name="Completed").click()
page.locator("xpath=/html/body/section/div/header/input").fill("no")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.goto("https://bic-berkeley.github.io/psych-214-fall-2016/string_literals.html")
page.locator("xpath=//*[@id=\'searchbox\']/div/form/input[1]").fill("type")"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
def test_inter_page_actions(
script_browser_env: ScriptBrowserEnv,
) -> None:
env = script_browser_env
seq = """page.goto("https://demo.playwright.dev/todomvc/")
browser.new_tab()
browser.page_focus(0)
browser.page_focus(1)
page.page_close()
page.goto("https://google.com")
page.goto("https://demo.playwright.dev/todomvc/")
page.go_back()
page.go_forward()"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
assert "https://demo.playwright.dev/todomvc" in info["page"].url
def test_scroll(
current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
env = current_viewport_script_browser_env
env.reset()
_, success, _, _, _ = env.step(create_scroll_action("down"))
assert success
_, success, _, _, _ = env.step(create_scroll_action("up"))
assert success
def test_id_click(
accessibility_tree_current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
env = accessibility_tree_current_viewport_script_browser_env
env.reset()
obs, success, _, _, info = env.step(
create_playwright_action(
'page.goto("https://russmaxdesign.github.io/exercise/")'
)
)
assert success
assert "link 'McKenna/Bell'" in obs["text"]
# get the id of the link
element_id = re.search(r"\[(\d+)\] link 'McKenna/Bell'", obs["text"]).group(1) # type: ignore
obs, success, _, _, info = env.step(
create_id_based_action(f"click [{element_id}]")
)
assert success
assert (
info["page"].url
== "https://russmaxdesign.github.io/exercise/#link-four"
)
obs, success, _, _, info = env.step(create_scroll_action("down"))
assert "link 'Classification'" in obs["text"]
element_id = re.search(r"\[(\d+)\] link 'Classification'", obs["text"]).group(1) # type: ignore
obs, success, _, _, info = env.step(
create_id_based_action(f"click [{element_id}]")
)
assert success
assert (
info["page"].url
== "https://russmaxdesign.github.io/exercise/#link-two"
)
assert "radio 'Weekly'" in obs["text"]
element_id = re.search(r"\[(\d+)\] radio 'Weekly'", obs["text"]).group(1) # type: ignore
obs, success, _, _, info = env.step(
create_id_based_action(f"click [{element_id}]")
)
assert success
assert "radio 'Weekly'" in obs["text"]
def test_id_hover(
accessibility_tree_current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
env = accessibility_tree_current_viewport_script_browser_env
env.reset()
obs, success, _, _, info = env.step(
create_playwright_action(
'page.goto("https://ianlunn.github.io/Hover/")'
)
)
assert success
assert "link 'Download on GitHub'" in obs["text"]
element_id = re.search(r"\[(\d+)\] link 'Download on GitHub'", obs["text"]).group(1) # type: ignore
obs, success, _, _, info = env.step(
create_id_based_action(f"hover [{element_id}]")
)
assert success
def test_key_press(
accessibility_tree_current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
env = accessibility_tree_current_viewport_script_browser_env
env.reset()
obs, success, _, _, info = env.step(
create_playwright_action(
'page.goto("https://russmaxdesign.github.io/exercise/")'
)
)
assert success
assert "textbox 'Full name'" in obs["text"]
element_id = re.search(r"\[(\d+)\] textbox 'Full name'", obs["text"]).group(1) # type: ignore
s = "My Name IS XYZ"
obs, success, _, _, info = env.step(
create_id_based_action(f"type [{element_id}] [{s}] [0]")
)
assert success
expect(env.page.get_by_label("Full name")).to_be_focused()
expect(env.page.get_by_label("Full name")).to_have_value(s)
obs, success, _, _, info = env.step(
create_id_based_action("press [meta+a]")
)
assert success
env.page.get_by_label("Full name").type(s)
expect(env.page.get_by_label("Full name")).to_have_value(s)
obs, success, _, _, info = env.step(create_key_press_action("Enter"))
assert success
expect(env.page.get_by_label("Email")).to_be_focused()
def test_id_type(
accessibility_tree_current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
env = accessibility_tree_current_viewport_script_browser_env
env.reset()
obs, success, _, _, info = env.step(
create_playwright_action(
'page.goto("https://russmaxdesign.github.io/exercise/")'
)
)
assert success
assert "textbox 'Full name'" in obs["text"]
s = "My Name IS XYZ"
element_id = re.search(r"\[(\d+)\] textbox 'Full name'", obs["text"]).group(1) # type: ignore
obs, success, _, _, info = env.step(
create_id_based_action(f"type [{element_id}] [{s}]")
)
assert success
locator = env.page.get_by_label("Full name")
expect(locator).to_have_value(s)
def test_e2e_id_based_actions(
accessibility_tree_script_browser_env: ScriptBrowserEnv,
) -> None:
env = accessibility_tree_script_browser_env
env.reset()
obs, *_ = env.step(
create_id_based_action(
"goto [https://russmaxdesign.github.io/exercise/]"
)
)
element_id = re.search(r"\[(\d+)\] link 'What are mammals\?'", obs["text"]).group(1) # type: ignore
obs, *_ = env.step(create_id_based_action(f"click [{element_id}]"))
element_id = re.search(r"\[(\d+)\] textbox 'Email'", obs["text"]).group(1) # type: ignore
env.step(
create_id_based_action(f"type [{element_id}] [test@gmail.com] [0]")
)
env.step(create_id_based_action("scroll [down]"))
env.step(create_id_based_action("scroll [up]"))
env.step(create_id_based_action("new_tab"))
env.step(create_id_based_action("tab_focus [0]"))
env.step(create_id_based_action("tab_focus [1]"))
env.step(create_id_based_action("goto [https://example.com/]"))
env.step(create_id_based_action("go_back"))
x = env.step(create_id_based_action("go_forward"))
assert x[-1]["page"].url == "https://example.com/"
x = env.step(create_id_based_action("tab_focus [0]"))
assert (
x[-1]["page"].url
== "https://russmaxdesign.github.io/exercise/#link-one"
)
def test_id_delete_input(
accessibility_tree_current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
env = accessibility_tree_current_viewport_script_browser_env
env.reset()
obs, success, _, _, info = env.step(
create_playwright_action(
'page.goto("https://russmaxdesign.github.io/exercise/")'
)
)
assert success
assert "textbox 'Full name'" in obs["text"]
s = "My Name IS XYZ"
element_id = re.search(r"\[(\d+)\] textbox 'Full name'", obs["text"]).group(1) # type: ignore
obs, success, _, _, info = env.step(
create_id_based_action(f"type [{element_id}] [{s}]")
)
assert success
locator = env.page.get_by_label("Full name")
expect(locator).to_have_value(s)
obs, success, _, _, info = env.step(
create_id_based_action(f"click [{element_id}]")
)
assert success
obs, success, _, _, info = env.step(
create_id_based_action(f"press [Meta+a]")
)
assert success
obs, success, _, _, info = env.step(
create_id_based_action("press [backspace]")
)
assert success
new_s = "NEW"
obs, success, _, _, info = env.step(
create_id_based_action(f"type [{element_id}] [{new_s}]")
)
locator = env.page.get_by_label("Full name")
expect(locator).to_have_value(new_s)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/tests/test_browser_env/test_script_browser_env.py | exps/webarena_exp/tests/test_browser_env/test_script_browser_env.py | import asyncio
import collections
import json
import tempfile
from typing import Callable, Dict, Optional, Tuple, Type, Union, cast
import pytest
from gymnasium.vector import AsyncVectorEnv
from playwright.sync_api import Page
from browser_env import (
Action,
AsyncScriptBrowserEnv,
DetachedPage,
ScriptBrowserEnv,
create_focus_and_click_action,
create_goto_url_action,
create_keyboard_type_action,
create_playwright_action,
create_scroll_action,
)
from browser_env.actions import create_id_based_action
from browser_env.env_config import (
ACCOUNTS,
GITLAB,
REDDIT,
SHOPPING,
SHOPPING_ADMIN,
)
def test_script_browser_env(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
env.reset()
env.step(
create_goto_url_action("http://www.example.com"),
)
env.step(
create_focus_and_click_action(
element_role="link",
element_name="More",
),
)
_, _, _, _, info = env.step(
create_focus_and_click_action(
element_role="link",
element_name="2606",
)
)
assert isinstance(info["page"], DetachedPage)
assert info["page"].url == "https://www.rfc-editor.org/rfc/rfc2606.html"
@pytest.mark.asyncio
async def test_async_script_browser_env(
async_script_browser_env: AsyncScriptBrowserEnv,
) -> None:
env = async_script_browser_env
await env.areset()
await env.astep(
create_goto_url_action("http://www.example.com"),
)
await env.astep(
create_focus_and_click_action(
element_role="link",
element_name="More",
),
)
_, _, _, _, info = await env.astep(
create_focus_and_click_action(
element_role="link",
element_name="2606",
)
)
assert isinstance(info["page"], DetachedPage)
assert info["page"].url == "https://www.rfc-editor.org/rfc/rfc2606.html"
def collate_actions(actions: list[Action]) -> dict[str, list[object]]:
action_dict = collections.defaultdict(list)
for action in actions:
for key, value in action.items():
action_dict[key].append(value)
return action_dict
@pytest.mark.skip(reason="Gym doesn't support self-defined observations")
def test_parallel_script_browser_env() -> None:
vector_env = AsyncVectorEnv(
[
lambda: ScriptBrowserEnv(),
lambda: ScriptBrowserEnv(),
],
shared_memory=True,
)
vector_env.reset()
vector_env.step(
collate_actions(
[
create_goto_url_action("http://www.example.com"),
]
* 2
)
)
vector_env.step(
collate_actions(
[
create_focus_and_click_action(
element_role="link",
element_name="More",
),
]
* 2
)
)
_, _, _, _, info = vector_env.step(
collate_actions(
[
create_focus_and_click_action(
element_role="link",
element_name="2606",
),
create_focus_and_click_action(
element_role="link",
element_name="6761",
),
]
)
)
# assert is_bearable(info["page"].tolist(), list[DetachedPage])
assert info["page"][0].url == "https://www.rfc-editor.org/rfc/rfc2606.html"
assert info["page"][1].url == "https://www.rfc-editor.org/rfc/rfc6761.html"
vector_env.close() # type: ignore[no-untyped-call]
def test_focus_placeholder_and_label(
script_browser_env: ScriptBrowserEnv,
) -> None:
env = script_browser_env
env.reset()
for action in [
create_goto_url_action("https://demo.applitools.com"),
create_focus_and_click_action("placeholder", "Enter your username"),
create_keyboard_type_action("abc"),
create_focus_and_click_action("placeholder", "Enter your password"),
create_keyboard_type_action("123"),
create_focus_and_click_action("label", "Remember Me"),
create_focus_and_click_action("link", "Sign in"),
]:
_, success, _, _, info = env.step(action)
assert success
assert info["page"].url == "https://demo.applitools.com/app.html"
def test_html_current_viewport(
current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
s1 = "detailed information about how mammals could be classified."
s2 = "Types of mammals"
env = current_viewport_script_browser_env
env.reset()
obs, success, _, _, info = env.step(
create_playwright_action(
'page.goto("https://russmaxdesign.github.io/exercise/")'
)
)
assert success
assert s1 in obs["text"] and s2 not in obs["text"]
obs, success, _, _, info = env.step(create_scroll_action("down"))
assert success
assert s1 not in obs["text"] and s2 in obs["text"]
def test_accessibility_tree(
accessibility_tree_script_browser_env: ScriptBrowserEnv,
) -> None:
s1 = "checkbox 'Yes'"
s2 = "button 'Submit'"
env = accessibility_tree_script_browser_env
env.reset()
obs, success, _, _, info = env.step(
create_playwright_action(
'page.goto("https://russmaxdesign.github.io/exercise/")'
)
)
assert success
assert s1 in obs["text"] and s2 in obs["text"]
def test_accessibility_tree_viewport(
accessibility_tree_current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
s1 = "combobox 'Favourite mammal'"
s2 = "gridcell 'Canyon bat'"
s3 = "heading 'Useful links'"
env = accessibility_tree_current_viewport_script_browser_env
env.reset()
obs, success, _, _, info = env.step(
create_playwright_action(
'page.goto("https://russmaxdesign.github.io/exercise/")'
)
)
assert success
assert (
s1 in obs["text"] and s2 not in obs["text"] and s3 not in obs["text"]
)
obs, success, _, _, info = env.step(create_scroll_action("down"))
assert success
assert (
s1 not in obs["text"] and s2 in obs["text"] and s3 not in obs["text"]
)
obs, success, _, _, info = env.step(create_scroll_action("down"))
assert success
assert s1 not in obs["text"] and s2 in obs["text"] and s3 in obs["text"]
def test_multiple_start_url(script_browser_env: ScriptBrowserEnv) -> None:
temp_config = tempfile.NamedTemporaryFile("w", delete=False)
config = {
"require_login": False,
"start_url": f"{REDDIT} |AND| {REDDIT}/forums",
}
json.dump(config, temp_config)
temp_config.close()
env = script_browser_env
env.reset(options={"config_file": temp_config.name})
assert len(env.context.pages) == 2
assert env.context.pages[0].url == f"{REDDIT}/"
assert env.context.pages[1].url == f"{REDDIT}/forums", env.context.pages[
1
].url
def test_observation_tab_information(
accessibility_tree_current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
env = accessibility_tree_current_viewport_script_browser_env
env.reset()
obs, *_ = env.step(
create_id_based_action(
"goto [https://russmaxdesign.github.io/exercise/]"
)
)
obs, *_ = env.step(create_id_based_action("new_tab"))
obs, *_ = env.step(
create_id_based_action("goto [https:///www.google.com]")
)
assert obs["text"].startswith( # type: ignore[union-attr]
"Tab 0: Exercise page for keyboard and screen reader use | Tab 1 (current): Google"
)
obs, *_ = env.step(create_id_based_action("tab_focus [0]"))
assert obs["text"].startswith( # type: ignore[union-attr]
"Tab 0 (current): Exercise page for keyboard and screen reader use | Tab 1: Google"
)
def test_accessibility_tree_observation_update(
accessibility_tree_current_viewport_script_browser_env: ScriptBrowserEnv,
) -> None:
env = accessibility_tree_current_viewport_script_browser_env
env.reset()
obs, *_ = env.step(
create_playwright_action(
"page.goto('https://russmaxdesign.github.io/exercise/')"
)
)
obs, *_ = env.step(
create_playwright_action(
'page.get_by_label("Full name").fill("UNIQUE_NAME")'
)
)
assert "UNIQUE_NAME" in obs["text"]
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/tests/test_browser_env/test_playwright_actions.py | exps/webarena_exp/tests/test_browser_env/test_playwright_actions.py | from typing import Dict, Generator, Optional, Tuple, Type, Union, cast
import pytest
from playwright.sync_api import Page
from browser_env import ScriptBrowserEnv, create_playwright_action
HEADLESS = True
SLOW_MO = 0
def test_frame_locator(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
seq = """page.goto("https://www.littlewebhut.com/articles/html_iframe_example/")
page.frame_locator("iframe[name=\\"imgbox\\"]").get_by_role("img").click()"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
def test_basic(script_browser_env: ScriptBrowserEnv) -> None:
# click, fill, press, check, goto
env = script_browser_env
seq = """page.goto("https://demo.playwright.dev/todomvc/")
page.get_by_placeholder("What needs to be done?").click()
page.get_by_placeholder("What needs to be done?").fill("hello")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_placeholder("What needs to be done?").fill("world")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_placeholder("What needs to be done?").fill("yes")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_placeholder("What needs to be done?").fill("no")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_role("listitem").filter(has_text="world").get_by_role("checkbox", name="Toggle Todo").check()
page.get_by_role("button", name="Clear completed").click()"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
@pytest.mark.skip(reason="not important, but the site is flaky")
def test_hover(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
seq = """page.goto("https://www.w3schools.com/cssref/tryit.php?filename=trycss_sel_hover")
page.frame_locator("iframe[name=\\'iframeResult\\']").get_by_role("link", name="w3schools.com").hover()"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
@pytest.mark.skip(reason="not important, but the site is flaky")
def test_select_option(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
seq = """page.goto("https://www.w3schools.com/tags/tryit.asp?filename=tryhtml_select")
page.frame_locator("iframe[name=\\'iframeResult\\']").get_by_role("combobox", name="Choose a car:").select_option("opel")"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
def test_xpath(script_browser_env: ScriptBrowserEnv) -> None:
env = script_browser_env
seq = """page.goto("https://demo.playwright.dev/todomvc/")
page.goto("https://demo.playwright.dev/todomvc/#/")
page.get_by_placeholder("What needs to be done?").click()
page.get_by_placeholder("What needs to be done?").fill("hello")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.get_by_role("link", name="Completed").click()
page.locator("xpath=/html/body/section/div/header/input").fill("no")
page.get_by_placeholder("What needs to be done?").press("Enter")
page.goto("https://bic-berkeley.github.io/psych-214-fall-2016/string_literals.html")
page.locator("xpath=//*[@id=\'searchbox\']/div/form/input[1]").fill("type")"""
env.reset()
for action in seq.split("\n"):
action = action.strip()
_, success, _, _, info = env.step(create_playwright_action(action))
assert success
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/tests/test_evaluation_harness/test_evaluators.py | exps/webarena_exp/tests/test_evaluation_harness/test_evaluators.py | import json
import os
import random
from glob import glob
from pathlib import Path
from typing import Any
import pytest
from py import test
from agent import Agent, TeacherForcingAgent
from browser_env import ActionTypes, ScriptBrowserEnv
from browser_env.env_config import *
from evaluation_harness import (
HTMLContentEvaluator,
StringEvaluator,
URLEvaluator,
)
from evaluation_harness.evaluators import EvaluatorComb
IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true"
HEADLESS = True
config_file_folder = "tests/test_evaluation_harness/configs"
def tf_roll_out(
agent: Agent, env: ScriptBrowserEnv, config_file: str
) -> list[Any]:
"""Roll out the agent using teacher forcing actions"""
obs, state_info = env.reset(options={"config_file": config_file})
trajectory: list[Any] = [{"observation": obs, "info": state_info}]
while True:
action = agent.next_action(
trajectory=trajectory, intent="", meta_data={}
)
trajectory.append(action)
if action["action_type"] == ActionTypes.STOP:
break
# preceed to next action
obs, reward, terminated, truncated, info = env.step(action)
state_info = {"observation": obs, "info": info}
trajectory.append(state_info)
return trajectory
def test_string_match_success(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/string_match.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = """page.stop("The date is 1985/04/18")"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = StringEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 1.0
def test_string_match_fail(script_browser_env: ScriptBrowserEnv) -> None:
config_file = f"{config_file_folder}/string_match.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = """page.stop("The date is 1936/04/18")"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = StringEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 0.0
def test_url_exact_match_success(script_browser_env: ScriptBrowserEnv) -> None:
config_file = f"{config_file_folder}/url_exact_match.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.goto("https://www.google.com/")
page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = URLEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 1.0
def test_url_exact_match_fail(script_browser_env: ScriptBrowserEnv) -> None:
config_file = f"{config_file_folder}/url_exact_match.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.goto("{GITLAB}")
page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = URLEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
print(env.page.url)
assert score == 0.0
def test_html_content_match_success(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/html_content_exact_match.json"
# randomly sample a string
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.goto("https://russmaxdesign.github.io/exercise")
page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = HTMLContentEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 1.0
def test_html_content_match_fail(script_browser_env: ScriptBrowserEnv) -> None:
config_file = f"{config_file_folder}/html_content_exact_match.json"
# randomly sample a string
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = """page.goto("https://www.google.com/")
page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = HTMLContentEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 0.0
def test_html_content_element_match_success(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/html_content_element_exact_match.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.goto("https://russmaxdesign.github.io/exercise/")
page.get_by_label("Full name").fill("Hello World")
page.get_by_label("Email").click()
page.get_by_label("Email").fill("alexisxy@hotmail.com")
page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = HTMLContentEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 1.0
def test_html_content_element_match_fail(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/html_content_element_exact_match.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.goto("https://russmaxdesign.github.io/exercise/")
page.get_by_label("Full name").fill("Hello")
page.get_by_label("Email").click()
page.get_by_label("Email").fill("alexisxy@hotmail.com")
page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = HTMLContentEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 0.0
def test_html_content_url_comb_success(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/html_content_url_comb.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.goto("https://russmaxdesign.github.io/exercise/")
page.get_by_label("Full name").fill("Hello World")
page.get_by_label("Email").click()
page.get_by_label("Email").fill("alexisxy@hotmail.com")
page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evaluators = EvaluatorComb([URLEvaluator(), HTMLContentEvaluator()])
score = evaluators(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 1.0
@pytest.mark.skipif(
IN_GITHUB_ACTIONS, reason="Won't work using the demo sites"
)
def test_func_success(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/func_eval_success.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = HTMLContentEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 1.0
@pytest.mark.skipif(
IN_GITHUB_ACTIONS, reason="Won't work using the demo sites"
)
def test_func_fail(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/func_eval_fail.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = HTMLContentEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 0.0
def test_func_url_func_last_success(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/func_url_func_1.json"
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.goto("{REDDIT}/f/wallstreetbets/50431/-/comment/676875")
page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, config_file)
evalutor = HTMLContentEvaluator()
score = evalutor(
trajectory, config_file, env.page, env.get_page_client(env.page)
)
assert score == 1.0
def test_func_url_func_page_success(
script_browser_env: ScriptBrowserEnv,
) -> None:
config_file = f"{config_file_folder}/func_url_func_2.json"
# change the URL placeholder with the concrete URL
with open(config_file, "r") as f:
configs = json.load(f)
configs["eval"]["program_html"][0]["url"] = configs["eval"][
"program_html"
][0]["url"].replace("__GITLAB__", GITLAB)
configs["eval"]["program_html"][1]["url"] = configs["eval"][
"program_html"
][1]["url"].replace("__GITLAB__", GITLAB)
tmp_config = config_file.replace(".json", ".tmp.json")
with open(tmp_config, "w+") as f:
json.dump(configs, f, indent=4)
agent = TeacherForcingAgent()
agent.set_action_set_tag(tag="playwright")
action_seq = f"""page.stop()"""
agent.set_actions(action_seq)
env = script_browser_env
trajectory = tf_roll_out(agent, env, tmp_config)
evalutor = HTMLContentEvaluator()
score = evalutor(
trajectory, tmp_config, env.page, env.get_page_client(env.page)
)
assert score == 1.0
os.remove(tmp_config)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/tests/test_evaluation_harness/test_helper_functions.py | exps/webarena_exp/tests/test_evaluation_harness/test_helper_functions.py | import json
import os
from pathlib import Path
from browser_env import ScriptBrowserEnv
from browser_env.env_config import *
from evaluation_harness.helper_functions import (
gitlab_get_project_memeber_role,
)
HEADLESS = True
config_file_folder = "tests/test_evaluation_harness/configs"
def test_gitlab_get_project_memeber_role(
script_browser_env: ScriptBrowserEnv,
) -> None:
env = script_browser_env
config_file = f"{config_file_folder}/tmp_config.json"
with open(config_file, "w") as f:
json.dump({"storage_state": ".auth/gitlab_state.json"}, f)
env.reset(options={"config_file": config_file})
env.page.goto(f"{GITLAB}/primer/design/-/project_members")
role1 = gitlab_get_project_memeber_role(env.page, "byteblaze")
assert role1 == "Developer"
role2 = gitlab_get_project_memeber_role(env.page, "primer")
assert role2 == "Owner"
# remove tmp config file
os.remove(config_file)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/evaluation_harness/evaluators.py | exps/webarena_exp/evaluation_harness/evaluators.py | """base class for evaluation"""
# answer string match
import collections
import html
import importlib
import json
import time
import urllib
from pathlib import Path
from typing import Any, Tuple, Union
from beartype import beartype
from nltk.tokenize import word_tokenize # type: ignore
from playwright.sync_api import CDPSession, Page
from browser_env.actions import Action
from browser_env.utils import StateInfo
from evaluation_harness.helper_functions import (
PseudoPage,
gitlab_get_project_memeber_role,
llm_fuzzy_match,
llm_ua_match,
reddit_get_post_url,
shopping_get_latest_order_url,
shopping_get_sku_latest_review_author,
shopping_get_sku_latest_review_rating,
)
Trajectory = list[Union[Action, StateInfo]]
class Evaluator(object):
def __init__(self, eval_tag: str = "") -> None:
self.eval_tag = eval_tag
@beartype
def __call__(
self,
trajectory: Trajectory,
config_file: Path | str,
page: Page | PseudoPage,
client: CDPSession,
) -> float:
raise NotImplementedError
@staticmethod
def get_last_action(trajectory: Trajectory) -> Action:
try:
# is_bearable(trajectory[-1], Action)
last_action = trajectory[-1]
except Exception:
raise ValueError(
"The last element of trajectory should be an action, add a fake stop action if needed"
)
return last_action # type: ignore[return-value]
@staticmethod
def get_last_state(trajectory: Trajectory) -> StateInfo:
try:
# is_bearable(trajectory[-2], StateInfo)
last_state = trajectory[-2]
except Exception:
raise ValueError(
"The second last element of trajectory should be a state, add a fake stop action if needed"
)
return last_state # type: ignore[return-value]
class StringEvaluator(Evaluator):
"""Check whether the answer is correct with:
exact match: the answer is exactly the same as the reference answer
must include: each phrase in the reference answer must be included in the answer
fuzzy match: the answer is similar to the reference answer, using LLM judge
"""
@staticmethod
@beartype
def clean_answer(answer: str) -> str:
answer = answer.strip()
if answer.startswith("'") and answer.endswith("'"):
answer = answer[1:-1]
elif answer.startswith('"') and answer.endswith('"'):
answer = answer[1:-1]
return answer.lower()
@staticmethod
@beartype
def exact_match(ref: str, pred: str) -> float:
return float(
StringEvaluator.clean_answer(pred)
== StringEvaluator.clean_answer(ref)
)
@staticmethod
@beartype
def must_include(ref: str, pred: str, tokenize: bool = False) -> float:
clean_ref = StringEvaluator.clean_answer(ref)
clean_pred = StringEvaluator.clean_answer(pred)
# tokenize the answer if the ref is a single word
# prevent false positive (e.g, 0)
if (
tokenize
and len(clean_ref) == 1
and len(word_tokenize(clean_ref)) == 1
):
tok_pred = word_tokenize(clean_pred)
return float(clean_ref in tok_pred)
else:
return float(clean_ref in clean_pred)
@staticmethod
@beartype
def fuzzy_match(ref: str, pred: str, intent: str) -> float:
return llm_fuzzy_match(pred, ref, intent)
@staticmethod
@beartype
def ua_match(ref: str, pred: str, intent: str) -> float:
return llm_ua_match(pred, ref, intent)
def __call__(
self,
trajectory: Trajectory,
config_file: Path | str,
page: Page | PseudoPage | None = None,
client: CDPSession | None = None,
) -> float:
with open(config_file, "r") as f:
configs = json.load(f)
last_action = self.get_last_action(trajectory)
pred = self.clean_answer(last_action["answer"])
score = 1.0
for approach, value in configs["eval"]["reference_answers"].items():
match approach:
case "exact_match":
score *= self.exact_match(ref=value, pred=pred)
case "must_include":
assert isinstance(value, list)
for must_value in value:
score *= self.must_include(
ref=must_value,
pred=pred,
tokenize=(len(value) == 1),
)
case "fuzzy_match":
intent = configs["intent"]
if value == "N/A":
# if the instruction only asks the model to generate N/A when encountering an unachievable task
# without more concrete reasons
score *= self.exact_match(ref=value, pred=pred)
# if the instruction also asks the model to generate the reason why the task is unachievable
# this should be the default as it will prevent false positive N/A`
if score != 1:
score = 1.0 * self.ua_match(
intent=configs["intent"],
ref=configs["eval"]["string_note"],
pred=pred,
)
else:
assert isinstance(value, list)
for reference in value:
score *= self.fuzzy_match(
ref=reference, pred=pred, intent=intent
)
return score
class URLEvaluator(Evaluator):
"""Check URL matching"""
@beartype
def __call__(
self,
trajectory: Trajectory,
config_file: Path | str,
page: Page | PseudoPage,
client: CDPSession | None = None,
) -> float:
with open(config_file, "r") as f:
configs = json.load(f)
def clean_url(url: str) -> str:
url = str(url)
url = url.rstrip("/")
return url
def parse_url(url: str) -> tuple[str, dict[str, list[str]]]:
"""Parse a URL into its base, path, and query components."""
parsed_url = urllib.parse.urlparse(url)
base_path = parsed_url.netloc + parsed_url.path
query = urllib.parse.parse_qs(parsed_url.query)
return base_path, query
def parse_urls(
urls: list[str],
) -> tuple[list[str], dict[str, set[str]]]:
"""Parse a list of URLs."""
base_paths = []
queries = collections.defaultdict(set)
for url in urls:
base_path, query = parse_url(url)
base_paths.append(base_path)
for k, v in query.items():
queries[k].update(v)
return base_paths, queries
pred = clean_url(page.url)
ref_urls = configs["eval"]["reference_url"].split(" |OR| ")
ref_urls = [clean_url(url) for url in ref_urls]
matching_rule = configs["eval"].get("url_note", "GOLD in PRED")
if matching_rule == "GOLD in PRED":
ref_base_paths, ref_queries = parse_urls(ref_urls)
pred_base_paths, pred_query = parse_url(pred)
base_score = float(
any(
[
ref_base_path in pred_base_paths
for ref_base_path in ref_base_paths
]
)
)
query_score = 1.0
for k, possible_values in ref_queries.items():
query_score *= float(
any(
possible_ref_value in pred_query.get(k, [])
for possible_ref_value in possible_values
)
)
score = base_score * query_score
else:
raise ValueError(f"Unknown matching rule: {matching_rule}")
return score
class HTMLContentEvaluator(Evaluator):
"""Check whether the contents appear in the page"""
@beartype
def __call__(
self,
trajectory: Trajectory,
config_file: Path | str,
page: Page | PseudoPage,
client: CDPSession | None = None,
) -> float:
with open(config_file, "r") as f:
configs = json.load(f)
targets = configs["eval"]["program_html"]
score = 1.0
for target in targets:
target_url: str = target["url"] # which url to check
if target_url.startswith("func"):
func = target_url.split("func:")[1]
func = func.replace("__last_url__", page.url)
target_url = eval(func)
locator: str = target["locator"] # js element locator
# navigate to that url
if target_url != "last":
page.goto(target_url)
time.sleep(3) # TODO [shuyanzh]: fix this hard-coded sleep
# empty, use the full page
if not locator.strip():
selected_element = page.content()
# use JS to select the element
elif locator.startswith("document.") or locator.startswith(
"[...document."
):
if "prep_actions" in target:
try:
for prep_action in target["prep_actions"]:
page.evaluate(f"() => {prep_action}")
except Exception:
pass
try:
selected_element = str(page.evaluate(f"() => {locator}"))
if not selected_element:
selected_element = ""
except Exception:
# the page is wrong, return empty
selected_element = ""
# run program to call API
elif locator.startswith("func:"): # a helper function
func = locator.split("func:")[1]
func = func.replace("__page__", "page")
selected_element = eval(func)
else:
raise ValueError(f"Unknown locator: {locator}")
selected_element = html.unescape(selected_element)
if "exact_match" in target["required_contents"]:
required_contents = target["required_contents"]["exact_match"]
cur_score = StringEvaluator.exact_match(
ref=required_contents, pred=selected_element
)
score *= float(cur_score)
# print(f"[exact match] {cur_score}, selected element: {selected_element}, required contents: {required_contents}")
elif "must_include" in target["required_contents"]:
required_contents = target["required_contents"]["must_include"]
assert isinstance(required_contents, list)
for content in required_contents:
content_or = content.split(" |OR| ")
cur_score = any(
[
StringEvaluator.must_include(
ref=content,
pred=selected_element,
tokenize=False,
)
for content in content_or
]
)
score *= float(cur_score)
# print(f"[must include] {cur_score}, selected element: {selected_element}, required contents: {content_or}")
else:
raise ValueError(
f"Unknown required_contents: {target['required_contents'].keys()}"
)
return score
class EvaluatorComb:
def __init__(self, evaluators: list[Evaluator]) -> None:
self.evaluators = evaluators
@beartype
def __call__(
self,
trajectory: Trajectory,
config_file: Path | str,
page: Page | PseudoPage,
client: CDPSession,
) -> float:
score = 1.0
for evaluator in self.evaluators:
cur_score = evaluator(trajectory, config_file, page, client)
score *= cur_score
return score
@beartype
def evaluator_router(config_file: Path | str) -> EvaluatorComb:
"""Router to get the evaluator class"""
with open(config_file, "r") as f:
configs = json.load(f)
eval_types = configs["eval"]["eval_types"]
evaluators: list[Evaluator] = []
for eval_type in eval_types:
match eval_type:
case "string_match":
evaluators.append(StringEvaluator())
case "url_match":
evaluators.append(URLEvaluator())
case "program_html":
evaluators.append(HTMLContentEvaluator())
case _:
raise ValueError(f"eval_type {eval_type} is not supported")
return EvaluatorComb(evaluators)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/evaluation_harness/__init__.py | exps/webarena_exp/evaluation_harness/__init__.py | from .evaluators import *
from .helper_functions import (
shopping_get_latest_order_url,
shopping_get_sku_latest_review_author,
shopping_get_sku_latest_review_rating,
)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/evaluation_harness/helper_functions.py | exps/webarena_exp/evaluation_harness/helper_functions.py | """Implements helper functions to assist evaluation cases where other evaluators are not suitable."""
import json
from typing import Any
from urllib.parse import urlparse
import requests
from playwright.sync_api import CDPSession, Page
from browser_env.env_config import (
ACCOUNTS,
GITLAB,
MAP,
REDDIT,
SHOPPING,
SHOPPING_ADMIN,
WIKIPEDIA,
)
from llms.providers.openai_utils import (
generate_from_openai_chat_completion,
)
def shopping_get_auth_token() -> str:
response = requests.post(
url=f"{SHOPPING}/rest/default/V1/integration/admin/token",
headers={"content-type": "application/json"},
data=json.dumps(
{
"username": ACCOUNTS["shopping_site_admin"]["username"],
"password": ACCOUNTS["shopping_site_admin"]["password"],
}
),
)
token: str = response.json()
return token
def shopping_get_latest_order_url() -> str:
"""Get the latest order url from the shopping website."""
header = {
"Authorization": f"Bearer {shopping_get_auth_token()}",
"Content-Type": "application/json",
}
params = {
"searchCriteria[sortOrders][0][field]": "created_at",
"searchCriteria[sortOrders][0][direction]": "DESC",
"searchCriteria[pageSize]": "1",
}
response = requests.get(
f"{SHOPPING}/rest/V1/orders", params=params, headers=header
)
assert response.status_code == 200
response_obj = response.json()["items"][0]
order_id = int(response_obj["increment_id"])
order_url = f"{SHOPPING}/sales/order/view/order_id/{order_id}/"
return order_url
def shopping_get_sku_latest_review_author(sku: str) -> str:
"""Get the latest review for shopping admin."""
header = {
"Authorization": f"Bearer {shopping_get_auth_token()}",
"Content-Type": "application/json",
}
response = requests.get(
f"{SHOPPING}/rest/V1/products/{sku}/reviews", headers=header
)
assert response.status_code == 200
response_obj = response.json()
if len(response_obj) == 0:
return ""
author: str = response_obj[-1]["nickname"]
return author
def shopping_get_sku_latest_review_rating(sku: str) -> str:
"""Get the latest review for shopping admin."""
header = {
"Authorization": f"Bearer {shopping_get_auth_token()}",
"Content-Type": "application/json",
}
response = requests.get(
f"{SHOPPING}/rest/V1/products/{sku}/reviews", headers=header
)
assert response.status_code == 200
response_obj = response.json()
if len(response_obj) == 0:
return ""
assert response_obj[0]["ratings"][0]["rating_name"] == "Rating"
rating: str = str(response_obj[-1]["ratings"][0]["percent"])
return rating
def reddit_get_post_url(url: str) -> str:
"""Get the post url"""
# Url is http://domain/f/subreddit/post_id/...
# get domain, subreddit, post_id
domain = urlparse(url).netloc
tok_url = urlparse(url).path.split("/")
# not a valid post/comment url, return the url as is
if len(tok_url) < 4:
return url
if tok_url[1] != "f":
return url
subreddit = urlparse(url).path.split("/")[2]
post_id = urlparse(url).path.split("/")[3]
scheme = urlparse(url).scheme
post_url = f"{scheme}://{domain}/f/{subreddit}/{post_id}/"
return post_url
def gitlab_get_project_memeber_role(page: Page, account_name: str) -> str:
# get the account index
try:
account_idx = page.evaluate(
f"""(() => {{
const elements = document.querySelectorAll("td[data-label='Account'] span.gl-avatar-labeled-sublabel");
let index = -1; // Default value if not found
for(let i = 0; i < elements.length; i++) {{
if(elements[i].outerText === '@{account_name}') {{
index = i;
break;
}}
}}
return index;
}})()"""
)
# get the role
role: str = page.evaluate(
f"""(() => {{
return document.querySelectorAll("td.col-max-role span")[{account_idx}].outerText;
}})()"""
)
except Exception:
role = ""
return role
def llm_fuzzy_match(pred: str, reference: str, question: str) -> float:
"""Check whether the prediction matches the reference with GPT4-turbo"""
messages: list[dict[str, Any]] = []
# construct the question to ask
message = "Help a teacher to grade the answer of a student given a question. Keep in mind that the student may use different phrasing or wording to answer the question. The goal is to evaluate whether the answer is semantically equivalent to the reference answer.\n"
message += f"question: {question}\n"
message += f"reference answer: {reference}\n"
message += "all the string 'N/A' that you see is a special sequence that means 'not achievable'\n"
message += f"student answer: {pred}\n"
message += "Conclude the judgement by correct/incorrect/partially correct."
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": message},
]
response = generate_from_openai_chat_completion(
model="gpt-4-1106-preview",
messages=messages,
temperature=0,
max_tokens=768,
top_p=1.0,
context_length=0,
).lower()
if "partially correct" in response or "incorrect" in response:
return 0.0
else:
assert "correct" in response
return 1.0
def llm_ua_match(pred: str, reference: str, question: str) -> float:
"""Check whether the prediction matches the reference with GPT-turbo"""
messages: list[dict[str, Any]] = []
# construct the question to ask
message = ""
message += f"task: {question}\n"
message += f"actual unachievable reason: {reference}\n"
message += f"reported unachievable reason: {pred}\n"
message += (
"The task described above is inherently unachievable due to the reason specified under 'actual unachievable reason'. "
"An individual previously attempted this task and was unable to complete it. They provided a reason for their failure, "
"which is listed under 'reported unachievable reason'. Your role is to review both the actual and reported reasons. "
"Determine if the reported reason aligns with the actual reason, even if implicitly. "
"If the stated reason is in line with the actual reason, respond with 'same'. Otherwise, respond with 'different'."
)
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": message},
]
response = generate_from_openai_chat_completion(
model="gpt-4-1106-preview",
messages=messages,
temperature=0,
max_tokens=768,
top_p=1.0,
context_length=0,
).lower()
if "different" in response:
return 0.0
else:
assert "same" in response
return 1.0
class PseudoPage:
def __init__(self, original_page: Page, url: str):
self.url = url
self.original_page = original_page
def __getattr__(self, attr: str) -> Any:
# Delegate attribute access to the original page object
if attr not in ["url"]:
return getattr(self.original_page, attr)
else:
return getattr(self, attr)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/constants.py | exps/webarena_exp/browser_env/constants.py | from typing import Literal
ROLES = (
"alert",
"alertdialog",
"application",
"article",
"banner",
"blockquote",
"button",
"caption",
"cell",
"checkbox",
"code",
"columnheader",
"combobox",
"complementary",
"contentinfo",
"definition",
"deletion",
"dialog",
"directory",
"document",
"emphasis",
"feed",
"figure",
"form",
"generic",
"grid",
"gridcell",
"group",
"heading",
"img",
"insertion",
"link",
"list",
"listbox",
"listitem",
"log",
"main",
"marquee",
"math",
"meter",
"menu",
"menubar",
"menuitem",
"menuitemcheckbox",
"menuitemradio",
"navigation",
"none",
"note",
"option",
"paragraph",
"presentation",
"progressbar",
"radio",
"radiogroup",
"region",
"row",
"rowgroup",
"rowheader",
"scrollbar",
"search",
"searchbox",
"separator",
"slider",
"spinbutton",
"status",
"strong",
"subscript",
"superscript",
"switch",
"tab",
"table",
"tablist",
"tabpanel",
"term",
"textbox",
"time",
"timer",
"toolbar",
"tooltip",
"tree",
"treegrid",
"treeitem",
)
SPECIAL_LOCATORS = (
"alt_text",
"label",
"placeholder",
)
ASCII_CHARSET = "".join(chr(x) for x in range(32, 128))
FREQ_UNICODE_CHARSET = "".join(chr(x) for x in range(129, 1000))
UTTERANCE_MAX_LENGTH = 8192
ATTRIBUTE_MAX_LENGTH = 256
TEXT_MAX_LENGTH = 256
TYPING_MAX_LENGTH = 64
URL_MAX_LENGTH = 256
MAX_ELEMENT_INDEX_IN_VIEWPORT = 10
MAX_ELEMENT_ID = 1000
MAX_ANSWER_LENGTH = 512
MIN_REF = -1000000
MAX_REF = 1000000
WINDOW_WIDTH = 500
WINDOW_HEIGHT = 240
TASK_WIDTH = 160
TASK_HEIGHT = 210
FLIGHT_WINDOW_WIDTH = 600
FLIGHT_WINDOW_HEIGHT = 700
FLIGHT_TASK_WIDTH = 375
FLIGHT_TASK_HEIGHT = 667
MAX_PAGE_NUMBER = 10
SPECIAL_KEYS = (
"Enter",
"Tab",
"Control",
"Shift",
"Meta",
"Backspace",
"Delete",
"Escape",
"ArrowUp",
"ArrowDown",
"ArrowLeft",
"ArrowRight",
"PageDown",
"PageUp",
"Meta+a",
)
SPECIAL_KEY_MAPPINGS = {
"backquote": "Backquote",
"minus": "Minus",
"equal": "Equal",
"backslash": "Backslash",
"backspace": "Backspace",
"meta": "Meta",
"tab": "Tab",
"delete": "Delete",
"escape": "Escape",
"arrowdown": "ArrowDown",
"end": "End",
"enter": "Enter",
"home": "Home",
"insert": "Insert",
"pagedown": "PageDown",
"pageup": "PageUp",
"arrowright": "ArrowRight",
"arrowup": "ArrowUp",
"f1": "F1",
"f2": "F2",
"f3": "F3",
"f4": "F4",
"f5": "F5",
"f6": "F6",
"f7": "F7",
"f8": "F8",
"f9": "F9",
"f10": "F10",
"f11": "F11",
"f12": "F12",
}
RolesType = Literal[
"alert",
"alertdialog",
"application",
"article",
"banner",
"blockquote",
"button",
"caption",
"cell",
"checkbox",
"code",
"columnheader",
"combobox",
"complementary",
"contentinfo",
"definition",
"deletion",
"dialog",
"directory",
"document",
"emphasis",
"feed",
"figure",
"form",
"generic",
"grid",
"gridcell",
"group",
"heading",
"img",
"insertion",
"link",
"list",
"listbox",
"listitem",
"log",
"main",
"marquee",
"math",
"meter",
"menu",
"menubar",
"menuitem",
"menuitemcheckbox",
"menuitemradio",
"navigation",
"none",
"note",
"option",
"paragraph",
"presentation",
"progressbar",
"radio",
"radiogroup",
"region",
"row",
"rowgroup",
"rowheader",
"scrollbar",
"search",
"searchbox",
"separator",
"slider",
"spinbutton",
"status",
"strong",
"subscript",
"superscript",
"switch",
"tab",
"table",
"tablist",
"tabpanel",
"term",
"textbox",
"time",
"timer",
"toolbar",
"tooltip",
"tree",
"treegrid",
"treeitem",
"alt_text",
"label",
"placeholder",
]
MAX_VANILLA_STR_LENGTH = 1000
PLAYWRIGHT_LOCATORS = (
"get_by_role",
"get_by_text",
"get_by_label",
"get_by_placeholder",
"get_by_alt_text",
"get_by_title",
"get_by_test_id",
"filter",
"frame_locator",
"locator",
)
PLAYWRIGHT_ACTIONS = (
"fill",
"check",
"select_option",
"click",
"hover",
"dclick",
"type",
"focus",
"goto",
"press",
"scroll",
)
IGNORED_ACTREE_PROPERTIES = (
"focusable",
"editable",
"readonly",
"level",
"settable",
"multiline",
"invalid",
)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/auto_login.py | exps/webarena_exp/browser_env/auto_login.py | """Script to automatically login each website"""
import argparse
import glob
import os
import time
from concurrent.futures import ThreadPoolExecutor
from itertools import combinations
from pathlib import Path
from playwright.sync_api import sync_playwright
from browser_env.env_config import (
ACCOUNTS,
GITLAB,
REDDIT,
SHOPPING,
SHOPPING_ADMIN,
)
HEADLESS = True
SLOW_MO = 0
SITES = ["gitlab", "shopping", "shopping_admin", "reddit"]
URLS = [
f"{GITLAB}/-/profile",
f"{SHOPPING}/wishlist/",
f"{SHOPPING_ADMIN}/dashboard",
f"{REDDIT}/user/{ACCOUNTS['reddit']['username']}/account",
]
EXACT_MATCH = [True, True, True, True]
KEYWORDS = ["", "", "Dashboard", "Delete"]
def is_expired(
storage_state: Path, url: str, keyword: str, url_exact: bool = True
) -> bool:
"""Test whether the cookie is expired"""
if not storage_state.exists():
return True
context_manager = sync_playwright()
playwright = context_manager.__enter__()
browser = playwright.chromium.launch(headless=True, slow_mo=SLOW_MO)
context = browser.new_context(storage_state=storage_state)
page = context.new_page()
page.goto(url)
time.sleep(1)
d_url = page.url
content = page.content()
context_manager.__exit__()
if keyword:
return keyword not in content
else:
if url_exact:
return d_url != url
else:
return url not in d_url
def renew_comb(comb: list[str], auth_folder: str = "./.auth") -> None:
context_manager = sync_playwright()
playwright = context_manager.__enter__()
browser = playwright.chromium.launch(headless=HEADLESS)
context = browser.new_context()
page = context.new_page()
if "shopping" in comb:
username = ACCOUNTS["shopping"]["username"]
password = ACCOUNTS["shopping"]["password"]
page.goto(f"{SHOPPING}/customer/account/login/")
page.get_by_label("Email", exact=True).fill(username)
page.get_by_label("Password", exact=True).fill(password)
page.get_by_role("button", name="Sign In").click()
if "reddit" in comb:
username = ACCOUNTS["reddit"]["username"]
password = ACCOUNTS["reddit"]["password"]
page.goto(f"{REDDIT}/login")
page.get_by_label("Username").fill(username)
page.get_by_label("Password").fill(password)
page.get_by_role("button", name="Log in").click()
if "shopping_admin" in comb:
username = ACCOUNTS["shopping_admin"]["username"]
password = ACCOUNTS["shopping_admin"]["password"]
page.goto(f"{SHOPPING_ADMIN}")
page.get_by_placeholder("user name").fill(username)
page.get_by_placeholder("password").fill(password)
page.get_by_role("button", name="Sign in").click()
if "gitlab" in comb:
username = ACCOUNTS["gitlab"]["username"]
password = ACCOUNTS["gitlab"]["password"]
page.goto(f"{GITLAB}/users/sign_in")
page.get_by_test_id("username-field").click()
page.get_by_test_id("username-field").fill(username)
page.get_by_test_id("username-field").press("Tab")
page.get_by_test_id("password-field").fill(password)
page.get_by_test_id("sign-in-button").click()
context.storage_state(path=f"{auth_folder}/{'.'.join(comb)}_state.json")
context_manager.__exit__()
def get_site_comb_from_filepath(file_path: str) -> list[str]:
comb = os.path.basename(file_path).rsplit("_", 1)[0].split(".")
return comb
def main(auth_folder: str = "./.auth") -> None:
pairs = list(combinations(SITES, 2))
max_workers = 8
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for pair in pairs:
# TODO[shuyanzh] auth don't work on these two sites
if "reddit" in pair and (
"shopping" in pair or "shopping_admin" in pair
):
continue
executor.submit(
renew_comb, list(sorted(pair)), auth_folder=auth_folder
)
for site in SITES:
executor.submit(renew_comb, [site], auth_folder=auth_folder)
futures = []
cookie_files = list(glob.glob(f"{auth_folder}/*.json"))
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for c_file in cookie_files:
comb = get_site_comb_from_filepath(c_file)
for cur_site in comb:
url = URLS[SITES.index(cur_site)]
keyword = KEYWORDS[SITES.index(cur_site)]
match = EXACT_MATCH[SITES.index(cur_site)]
future = executor.submit(
is_expired, Path(c_file), url, keyword, match
)
futures.append(future)
for i, future in enumerate(futures):
assert not future.result(), f"Cookie {cookie_files[i]} expired."
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--site_list", nargs="+", default=[])
parser.add_argument("--auth_folder", type=str, default="./.auth")
args = parser.parse_args()
if not args.site_list:
main()
else:
if "all" in args.site_list:
main(auth_folder=args.auth_folder)
else:
renew_comb(args.site_list, auth_folder=args.auth_folder)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/env_config.py | exps/webarena_exp/browser_env/env_config.py | # websites domain
import os
REDDIT = os.environ.get("REDDIT", "")
SHOPPING = os.environ.get("SHOPPING", "")
SHOPPING_ADMIN = os.environ.get("SHOPPING_ADMIN", "")
GITLAB = os.environ.get("GITLAB", "")
WIKIPEDIA = os.environ.get("WIKIPEDIA", "")
MAP = os.environ.get("MAP", "")
HOMEPAGE = os.environ.get("HOMEPAGE", "")
assert (
REDDIT
and SHOPPING
and SHOPPING_ADMIN
and GITLAB
and WIKIPEDIA
and MAP
and HOMEPAGE
), (
f"Please setup the URLs to each site. Current: \n"
+ f"Reddit: {REDDIT}\n"
+ f"Shopping: {SHOPPING}\n"
+ f"Shopping Admin: {SHOPPING_ADMIN}\n"
+ f"Gitlab: {GITLAB}\n"
+ f"Wikipedia: {WIKIPEDIA}\n"
+ f"Map: {MAP}\n"
+ f"Homepage: {HOMEPAGE}\n"
)
ACCOUNTS = {
"reddit": {"username": "MarvelsGrantMan136", "password": "test1234"},
"gitlab": {"username": "byteblaze", "password": "hello1234"},
"shopping": {
"username": "emma.lopez@gmail.com",
"password": "Password.123",
},
"shopping_admin": {"username": "admin", "password": "admin1234"},
"shopping_site_admin": {"username": "admin", "password": "admin1234"},
}
URL_MAPPINGS = {
REDDIT: "http://reddit.com",
SHOPPING: "http://onestopmarket.com",
SHOPPING_ADMIN: "http://luma.com/admin",
GITLAB: "http://gitlab.com",
WIKIPEDIA: "http://wikipedia.org",
MAP: "http://openstreetmap.org",
HOMEPAGE: "http://homepage.com",
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/processors.py | exps/webarena_exp/browser_env/processors.py | import json
import re
from collections import defaultdict
from typing import Any, TypedDict, Union
import numpy as np
import numpy.typing as npt
from gymnasium import spaces
from playwright.sync_api import CDPSession, Page, ViewportSize
from browser_env.constants import (
ASCII_CHARSET,
FREQ_UNICODE_CHARSET,
IGNORED_ACTREE_PROPERTIES,
UTTERANCE_MAX_LENGTH,
)
from .utils import (
AccessibilityTree,
AccessibilityTreeNode,
BrowserConfig,
BrowserInfo,
DOMNode,
DOMTree,
Observation,
png_bytes_to_numpy,
)
IN_VIEWPORT_RATIO_THRESHOLD = 0.6
class ObservationProcessor:
def process(self, page: Page, client: CDPSession) -> Observation:
raise NotImplementedError
class ObservationMetadata(TypedDict):
obs_nodes_info: dict[str, Any]
def create_empty_metadata() -> ObservationMetadata:
return {
"obs_nodes_info": {},
}
class TextObervationProcessor(ObservationProcessor):
def __init__(
self,
observation_type: str,
current_viewport_only: bool,
viewport_size: ViewportSize,
):
self.observation_type = observation_type
self.current_viewport_only = current_viewport_only
self.viewport_size = viewport_size
self.observation_tag = "text"
self.meta_data = (
create_empty_metadata()
) # use the store meta data of this observation type
def fetch_browser_info(
self,
page: Page,
client: CDPSession,
) -> BrowserInfo:
# extract domtree
tree = client.send(
"DOMSnapshot.captureSnapshot",
{
"computedStyles": [],
"includeDOMRects": True,
"includePaintOrder": True,
},
)
# calibrate the bounds, in some cases, the bounds are scaled somehow
bounds = tree["documents"][0]["layout"]["bounds"]
b = bounds[0]
n = b[2] / self.viewport_size["width"]
bounds = [[x / n for x in bound] for bound in bounds]
tree["documents"][0]["layout"]["bounds"] = bounds
# extract browser info
win_top_bound = page.evaluate("window.pageYOffset")
win_left_bound = page.evaluate("window.pageXOffset")
win_width = page.evaluate("window.screen.width")
win_height = page.evaluate("window.screen.height")
win_right_bound = win_left_bound + win_width
win_lower_bound = win_top_bound + win_height
device_pixel_ratio = page.evaluate("window.devicePixelRatio")
assert device_pixel_ratio == 1.0, "devicePixelRatio is not 1.0"
config: BrowserConfig = {
"win_top_bound": win_top_bound,
"win_left_bound": win_left_bound,
"win_width": win_width,
"win_height": win_height,
"win_right_bound": win_right_bound,
"win_lower_bound": win_lower_bound,
"device_pixel_ratio": device_pixel_ratio,
}
# assert len(tree['documents']) == 1, "More than one document in the DOM tree"
info: BrowserInfo = {"DOMTree": tree, "config": config}
return info
@staticmethod
def get_bounding_client_rect(
client: CDPSession, backend_node_id: str
) -> dict[str, Any]:
try:
remote_object = client.send(
"DOM.resolveNode", {"backendNodeId": int(backend_node_id)}
)
remote_object_id = remote_object["object"]["objectId"]
response = client.send(
"Runtime.callFunctionOn",
{
"objectId": remote_object_id,
"functionDeclaration": """
function() {
if (this.nodeType == 3) {
var range = document.createRange();
range.selectNode(this);
var rect = range.getBoundingClientRect().toJSON();
range.detach();
return rect;
} else {
return this.getBoundingClientRect().toJSON();
}
}
""",
"returnByValue": True,
},
)
return response
except Exception as e:
return {"result": {"subtype": "error"}}
@staticmethod
def get_element_in_viewport_ratio(
elem_left_bound: float,
elem_top_bound: float,
width: float,
height: float,
config: BrowserConfig,
) -> float:
elem_right_bound = elem_left_bound + width
elem_lower_bound = elem_top_bound + height
win_left_bound = 0
win_right_bound = config["win_width"]
win_top_bound = 0
win_lower_bound = config["win_height"]
# Compute the overlap in x and y axes
overlap_width = max(
0,
min(elem_right_bound, win_right_bound)
- max(elem_left_bound, win_left_bound),
)
overlap_height = max(
0,
min(elem_lower_bound, win_lower_bound)
- max(elem_top_bound, win_top_bound),
)
# Compute the overlap area
ratio = overlap_width * overlap_height / width * height
return ratio
def fetch_page_html(
self,
info: BrowserInfo,
page: Page,
client: CDPSession,
current_viewport_only: bool,
) -> DOMTree:
# adopted from [natbot](https://github.com/nat/natbot)
tree = info["DOMTree"]
strings = tree["strings"]
document = tree["documents"][0]
nodes = document["nodes"]
# make a dom tree that is easier to navigate
dom_tree: DOMTree = []
graph = defaultdict(list)
for node_idx in range(len(nodes["nodeName"])):
cur_node: DOMNode = {
"nodeId": "",
"nodeType": "",
"nodeName": "",
"nodeValue": "",
"attributes": "",
"backendNodeId": "",
"parentId": "",
"childIds": [],
"cursor": 0,
"union_bound": None,
}
node_type_idx = nodes["nodeType"][node_idx]
node_type = "generic"
if node_type_idx >= 0 and node_type_idx < len(strings):
node_type = strings[node_type_idx]
node_name = strings[nodes["nodeName"][node_idx]]
node_value_idx = nodes["nodeValue"][node_idx]
node_value = ""
if node_value_idx >= 0 and node_value_idx < len(strings):
node_value = " ".join(strings[node_value_idx].split())
node_attributes = [
strings[i] for i in nodes["attributes"][node_idx]
]
node_attributes_str = ""
for i in range(0, len(node_attributes), 2):
a = node_attributes[i]
b = node_attributes[i + 1]
b = " ".join(b.split())
node_attributes_str += f'{a}="{b}" '
node_attributes_str = node_attributes_str.strip()
cur_node["nodeId"] = str(node_idx)
cur_node["nodeType"] = node_type
cur_node["nodeName"] = node_name
cur_node["nodeValue"] = node_value
cur_node["attributes"] = node_attributes_str
cur_node["backendNodeId"] = str(nodes["backendNodeId"][node_idx])
cur_node["parentId"] = str(nodes["parentIndex"][node_idx])
if cur_node["parentId"] != "-1":
graph[cur_node["parentId"]].append(str(cur_node["nodeId"]))
# get the bound
if cur_node["parentId"] == "-1":
cur_node["union_bound"] = [0.0, 0.0, 10.0, 10.0]
else:
response = self.get_bounding_client_rect(
client, cur_node["backendNodeId"]
)
if response.get("result", {}).get("subtype", "") == "error":
cur_node["union_bound"] = None
else:
x = response["result"]["value"]["x"]
y = response["result"]["value"]["y"]
width = response["result"]["value"]["width"]
height = response["result"]["value"]["height"]
cur_node["union_bound"] = [x, y, width, height]
dom_tree.append(cur_node)
# add parent children index to the node
for parent_id, child_ids in graph.items():
dom_tree[int(parent_id)]["childIds"] = child_ids
# remove the nodes that are not in the current viewport
if current_viewport_only:
def remove_node_in_graph(node: DOMNode) -> None:
# update the node information in the accessibility tree
node_id = node["nodeId"]
parent_id = node["parentId"]
child_ids = node["childIds"]
# update the children of the parent node
assert dom_tree[int(parent_id)]["parentId"] != "[REMOVED]"
# remove the nodeid from parent
index = dom_tree[int(parent_id)]["childIds"].index(node_id)
dom_tree[int(parent_id)]["childIds"].pop(index)
# Insert children_nodeids in the same location
for child_id in child_ids:
dom_tree[int(parent_id)]["childIds"].insert(
index, child_id
)
index += 1
# update children node's parent
for child_id in child_ids:
dom_tree[int(child_id)]["parentId"] = parent_id
# mark as removed
dom_tree[int(node_id)]["parentId"] = "[REMOVED]"
config = info["config"]
for cursor, node in enumerate(dom_tree):
if not node["union_bound"]:
remove_node_in_graph(node)
continue
[x, y, width, height] = node["union_bound"]
# invisible node
if width == 0.0 or height == 0.0:
remove_node_in_graph(node)
continue
in_viewport_ratio = self.get_element_in_viewport_ratio(
elem_left_bound=float(x),
elem_top_bound=float(y),
width=float(width),
height=float(height),
config=config,
)
if in_viewport_ratio < IN_VIEWPORT_RATIO_THRESHOLD:
remove_node_in_graph(node)
dom_tree = [
node
for node in dom_tree
if node.get("parentId", "-1") != "[REMOVED]"
]
return dom_tree
@staticmethod
def parse_html(dom_tree: DOMTree) -> tuple[str, dict[str, Any]]:
"""Parse the html tree into a string text"""
obs_nodes_info = {}
nodeid_to_cursor = {
node["nodeId"]: idx for idx, node in enumerate(dom_tree)
}
def dfs(node_cursor: int, depth: int) -> str:
tree_str = ""
node = dom_tree[node_cursor]
indent = "\t" * depth
valid_node = True
try:
node_str = f"[{node_cursor}] <{node['nodeName']}"
if node["attributes"]:
node_str += f" {node['attributes']}"
node_str += f"> {node['nodeValue']}"
valid_node = bool(node["attributes"] or node["nodeValue"])
if valid_node:
obs_nodes_info[str(node_cursor)] = {
"backend_id": node["backendNodeId"],
"union_bound": node["union_bound"],
"text": node_str,
}
tree_str += f"{indent}{node_str}\n"
except Exception as e:
valid_node = False
for child_ids in node["childIds"]:
child_cursor = nodeid_to_cursor[child_ids]
child_depth = depth + 1 if valid_node else depth
child_str = dfs(child_cursor, child_depth)
tree_str += child_str
return tree_str
html = dfs(0, 0)
return html, obs_nodes_info
def fetch_page_accessibility_tree(
self,
info: BrowserInfo,
client: CDPSession,
current_viewport_only: bool,
) -> AccessibilityTree:
accessibility_tree: AccessibilityTree = client.send(
"Accessibility.getFullAXTree", {}
)["nodes"]
# a few nodes are repeated in the accessibility tree
seen_ids = set()
_accessibility_tree = []
for node in accessibility_tree:
if node["nodeId"] not in seen_ids:
_accessibility_tree.append(node)
seen_ids.add(node["nodeId"])
accessibility_tree = _accessibility_tree
nodeid_to_cursor = {}
for cursor, node in enumerate(accessibility_tree):
nodeid_to_cursor[node["nodeId"]] = cursor
# usually because the node is not visible etc
if "backendDOMNodeId" not in node:
node["union_bound"] = None
continue
backend_node_id = str(node["backendDOMNodeId"])
if node["role"]["value"] == "RootWebArea":
# always inside the viewport
node["union_bound"] = [0.0, 0.0, 10.0, 10.0]
else:
response = self.get_bounding_client_rect(
client, backend_node_id
)
if response.get("result", {}).get("subtype", "") == "error":
node["union_bound"] = None
else:
x = response["result"]["value"]["x"]
y = response["result"]["value"]["y"]
width = response["result"]["value"]["width"]
height = response["result"]["value"]["height"]
node["union_bound"] = [x, y, width, height]
# filter nodes that are not in the current viewport
if current_viewport_only:
def remove_node_in_graph(node: AccessibilityTreeNode) -> None:
# update the node information in the accessibility tree
nodeid = node["nodeId"]
node_cursor = nodeid_to_cursor[nodeid]
parent_nodeid = node["parentId"]
children_nodeids = node["childIds"]
parent_cursor = nodeid_to_cursor[parent_nodeid]
# update the children of the parent node
assert (
accessibility_tree[parent_cursor].get("parentId", "Root")
is not None
)
# remove the nodeid from parent's childIds
index = accessibility_tree[parent_cursor]["childIds"].index(
nodeid
)
accessibility_tree[parent_cursor]["childIds"].pop(index)
# Insert children_nodeids in the same location
for child_nodeid in children_nodeids:
accessibility_tree[parent_cursor]["childIds"].insert(
index, child_nodeid
)
index += 1
# update children node's parent
for child_nodeid in children_nodeids:
child_cursor = nodeid_to_cursor[child_nodeid]
accessibility_tree[child_cursor][
"parentId"
] = parent_nodeid
# mark as removed
accessibility_tree[node_cursor]["parentId"] = "[REMOVED]"
config = info["config"]
for node in accessibility_tree:
if not node["union_bound"]:
remove_node_in_graph(node)
continue
[x, y, width, height] = node["union_bound"]
# invisible node
if width == 0 or height == 0:
remove_node_in_graph(node)
continue
in_viewport_ratio = self.get_element_in_viewport_ratio(
elem_left_bound=float(x),
elem_top_bound=float(y),
width=float(width),
height=float(height),
config=config,
)
if in_viewport_ratio < IN_VIEWPORT_RATIO_THRESHOLD:
remove_node_in_graph(node)
accessibility_tree = [
node
for node in accessibility_tree
if node.get("parentId", "Root") != "[REMOVED]"
]
return accessibility_tree
@staticmethod
def parse_accessibility_tree(
accessibility_tree: AccessibilityTree,
) -> tuple[str, dict[str, Any]]:
"""Parse the accessibility tree into a string text"""
node_id_to_idx = {}
for idx, node in enumerate(accessibility_tree):
node_id_to_idx[node["nodeId"]] = idx
obs_nodes_info = {}
def dfs(idx: int, obs_node_id: str, depth: int) -> str:
tree_str = ""
node = accessibility_tree[idx]
indent = "\t" * depth
valid_node = True
try:
role = node["role"]["value"]
name = node["name"]["value"]
node_str = f"[{obs_node_id}] {role} {repr(name)}"
properties = []
for property in node.get("properties", []):
try:
if property["name"] in IGNORED_ACTREE_PROPERTIES:
continue
properties.append(
f'{property["name"]}: {property["value"]["value"]}'
)
except KeyError:
pass
if properties:
node_str += " " + " ".join(properties)
# check valid
if not node_str.strip():
valid_node = False
# empty generic node
if not name.strip():
if not properties:
if role in [
"generic",
"img",
"list",
"strong",
"paragraph",
"banner",
"navigation",
"Section",
"LabelText",
"Legend",
"listitem",
]:
valid_node = False
elif role in ["listitem"]:
valid_node = False
if valid_node:
tree_str += f"{indent}{node_str}"
obs_nodes_info[obs_node_id] = {
"backend_id": node["backendDOMNodeId"],
"union_bound": node["union_bound"],
"text": node_str,
}
except Exception as e:
valid_node = False
for _, child_node_id in enumerate(node["childIds"]):
if child_node_id not in node_id_to_idx:
continue
# mark this to save some tokens
child_depth = depth + 1 if valid_node else depth
child_str = dfs(
node_id_to_idx[child_node_id], child_node_id, child_depth
)
if child_str.strip():
if tree_str.strip():
tree_str += "\n"
tree_str += child_str
return tree_str
tree_str = dfs(0, accessibility_tree[0]["nodeId"], 0)
return tree_str, obs_nodes_info
@staticmethod
def clean_accesibility_tree(tree_str: str) -> str:
"""further clean accesibility tree"""
clean_lines: list[str] = []
for line in tree_str.split("\n"):
# remove statictext if the content already appears in the previous line
if "statictext" in line.lower():
prev_lines = clean_lines[-3:]
pattern = r"\[\d+\] StaticText (.+)"
match = re.search(pattern, line, re.DOTALL)
if match:
static_text = match.group(1)[1:-1] # remove the quotes
if static_text and all(
static_text not in prev_line
for prev_line in prev_lines
):
clean_lines.append(line)
else:
clean_lines.append(line)
return "\n".join(clean_lines)
def process(self, page: Page, client: CDPSession) -> str:
# get the tab info
open_tabs = page.context.pages
try:
tab_titles = [tab.title() for tab in open_tabs]
current_tab_idx = open_tabs.index(page)
for idx in range(len(open_tabs)):
if idx == current_tab_idx:
tab_titles[
idx
] = f"Tab {idx} (current): {open_tabs[idx].title()}"
else:
tab_titles[idx] = f"Tab {idx}: {open_tabs[idx].title()}"
tab_title_str = " | ".join(tab_titles)
except Exception:
tab_title_str = " | ".join(
["Tab {idx}" for idx in range(len(open_tabs))]
)
try:
browser_info = self.fetch_browser_info(page, client)
except Exception:
page.wait_for_load_state("load", timeout=500)
browser_info = self.fetch_browser_info(page, client)
if self.observation_type == "html":
dom_tree = self.fetch_page_html(
browser_info,
page,
client,
current_viewport_only=self.current_viewport_only,
)
content, obs_nodes_info = self.parse_html(dom_tree)
self.obs_nodes_info = obs_nodes_info
self.meta_data["obs_nodes_info"] = obs_nodes_info
elif self.observation_type == "accessibility_tree":
accessibility_tree = self.fetch_page_accessibility_tree(
browser_info,
client,
current_viewport_only=self.current_viewport_only,
)
content, obs_nodes_info = self.parse_accessibility_tree(
accessibility_tree
)
content = self.clean_accesibility_tree(content)
self.obs_nodes_info = obs_nodes_info
self.meta_data["obs_nodes_info"] = obs_nodes_info
else:
raise ValueError(
f"Invalid observatrion type: {self.observation_type}"
)
self.browser_config = browser_info["config"]
content = f"{tab_title_str}\n\n{content}"
return content
def get_element_center(self, element_id: str) -> tuple[float, float]:
node_info = self.obs_nodes_info[element_id]
node_bound = node_info["union_bound"]
x, y, width, height = node_bound
center_x = x + width / 2
center_y = y + height / 2
return (
center_x / self.viewport_size["width"],
center_y / self.viewport_size["height"],
)
class ImageObservationProcessor(ObservationProcessor):
def __init__(self, observation_type: str):
self.observation_type = observation_type
self.observation_tag = "image"
self.meta_data = create_empty_metadata()
def process(self, page: Page, client: CDPSession) -> npt.NDArray[np.uint8]:
try:
screenshot = png_bytes_to_numpy(page.screenshot())
except:
page.wait_for_event("load")
screenshot = png_bytes_to_numpy(page.screenshot())
return screenshot
class ObservationHandler:
"""Main entry point to access all observation processor"""
def __init__(
self,
main_observation_type: str,
text_observation_type: str,
image_observation_type: str,
current_viewport_only: bool,
viewport_size: ViewportSize,
) -> None:
self.main_observation_type = main_observation_type
self.text_processor = TextObervationProcessor(
text_observation_type, current_viewport_only, viewport_size
)
self.image_processor = ImageObservationProcessor(
image_observation_type
)
self.viewport_size = viewport_size
def get_observation_space(self) -> spaces.Dict:
text_space = spaces.Text(
min_length=0,
max_length=UTTERANCE_MAX_LENGTH,
charset=ASCII_CHARSET + FREQ_UNICODE_CHARSET,
)
image_space = spaces.Box(
# Each position stores the RGB values. Note the swapped axes (height first).
np.zeros(
(self.viewport_size["height"], self.viewport_size["width"], 3),
dtype=np.uint8,
),
np.ones(
(self.viewport_size["height"], self.viewport_size["width"], 3),
dtype=np.uint8,
)
* 255.0,
dtype=np.uint8,
)
return spaces.Dict({"text": text_space, "image": image_space})
def get_observation(
self, page: Page, client: CDPSession
) -> dict[str, Observation]:
text_obs = self.text_processor.process(page, client)
image_obs = self.image_processor.process(page, client)
return {"text": text_obs, "image": image_obs}
def get_observation_metadata(self) -> dict[str, ObservationMetadata]:
return {
"text": self.text_processor.meta_data,
"image": self.image_processor.meta_data,
}
@property
def action_processor(self) -> ObservationProcessor:
"""Return the main processor that is associated with the action space"""
if self.main_observation_type == "text":
return self.text_processor
elif self.main_observation_type == "image":
return self.image_processor
else:
raise ValueError("Invalid main observation type")
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/utils.py | exps/webarena_exp/browser_env/utils.py | from dataclasses import dataclass
from io import BytesIO
from typing import Any, Dict, TypedDict, Union
import numpy as np
import numpy.typing as npt
from PIL import Image
@dataclass
class DetachedPage:
url: str
content: str # html
def png_bytes_to_numpy(png: bytes) -> npt.NDArray[np.uint8]:
"""Convert png bytes to numpy array
Example:
>>> fig = go.Figure(go.Scatter(x=[1], y=[1]))
>>> plt.imshow(png_bytes_to_numpy(fig.to_image('png')))
"""
return np.array(Image.open(BytesIO(png)))
class AccessibilityTreeNode(TypedDict):
nodeId: str
ignored: bool
role: dict[str, Any]
chromeRole: dict[str, Any]
name: dict[str, Any]
properties: list[dict[str, Any]]
childIds: list[str]
parentId: str
backendDOMNodeId: str
frameId: str
bound: list[float] | None
union_bound: list[float] | None
offsetrect_bound: list[float] | None
class DOMNode(TypedDict):
nodeId: str
nodeType: str
nodeName: str
nodeValue: str
attributes: str
backendNodeId: str
parentId: str
childIds: list[str]
cursor: int
union_bound: list[float] | None
class BrowserConfig(TypedDict):
win_top_bound: float
win_left_bound: float
win_width: float
win_height: float
win_right_bound: float
win_lower_bound: float
device_pixel_ratio: float
class BrowserInfo(TypedDict):
DOMTree: dict[str, Any]
config: BrowserConfig
AccessibilityTree = list[AccessibilityTreeNode]
DOMTree = list[DOMNode]
Observation = str | npt.NDArray[np.uint8]
class StateInfo(TypedDict):
observation: dict[str, Observation]
info: Dict[str, Any]
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/async_envs.py | exps/webarena_exp/browser_env/async_envs.py | import asyncio
import json
from dataclasses import dataclass
from pathlib import Path
import numpy as np
import numpy.typing as npt
from gymnasium import Env
from gymnasium.spaces import Box, Text
from playwright.async_api import Page, ViewportSize, async_playwright
from .actions import Action, aexecute_action, get_action_space
from .utils import DetachedPage, png_bytes_to_numpy
class AsyncScriptBrowserEnv(Env[npt.NDArray[np.uint8], Action]):
"""
The goal of this environment is to produce a prototype of a browser environment.
In the end, we want to support a fully configurable browser environment with wide
range of action spaces and observation spaces, both structured and unstructured.
But in this prototype, we just support action space specified by Playwright script,
and observation space is the html content of the page.
"""
def __init__(
self,
max_page_length: int = 2048,
headless: bool = True,
slow_mo: int = 0,
timeout: int = 30000,
viewport_size: ViewportSize = {"width": 1280, "height": 720},
):
self.observation_space = Box(
0,
255,
(viewport_size["height"], viewport_size["width"], 4),
np.uint8,
)
# TODO: make Space[Action] = ActionSpace
self.action_space = get_action_space() # type: ignore[assignment]
self.headless = headless
self.slow_mo = slow_mo
self.reset_finished = False
self.timeout = timeout
self.viewport_size = viewport_size
async def setup(self, config_file: Path | None = None) -> None:
self.context_manager = async_playwright()
self.playwright = await self.context_manager.__aenter__()
self.browser = await self.playwright.chromium.launch(
headless=self.headless, slow_mo=self.slow_mo
)
if config_file:
with open(config_file, "r") as f:
instance_config = json.load(f)
else:
instance_config = {}
storage_state = instance_config.get("storage_state", None)
start_url = instance_config.get("start_url", None)
geolocation = instance_config.get("geolocation", None)
self.context = await self.browser.new_context(
viewport=self.viewport_size,
storage_state=storage_state,
geolocation=geolocation,
device_scale_factor=1,
)
self.page = await self.context.new_page()
if start_url:
await self.page.goto(start_url)
async def areset(
self,
*,
seed: int | None = None,
options: dict[str, str] | None = None,
) -> tuple[npt.NDArray[np.uint8], dict[str, object]]:
"""
Reset the environment.
:param options: options for the environment. The options are:
- storage_state: the path to the storage state file
"""
super().reset(seed=seed, options=options)
if self.reset_finished:
await self.context_manager.__aexit__()
if options is not None and "config_file" in options:
config_file = Path(options["config_file"])
if config_file.exists():
await self.setup(config_file=config_file)
else:
raise ValueError(f"Config state {config_file} does not exist.")
else:
await self.setup()
self.reset_finished = True
content = await self.page.content()
screenshot = png_bytes_to_numpy(await self.page.screenshot())
return (
screenshot,
{"page": DetachedPage(self.page.url, content)},
)
def reset(
self,
*,
seed: int | None = None,
options: dict[str, str] | None = None,
) -> tuple[npt.NDArray[np.uint8], dict[str, object]]:
return asyncio.run(self.areset(seed=seed, options=options))
async def aclose(self) -> None:
if self.reset_finished:
await self.context_manager.__aexit__()
def close(self) -> None:
asyncio.run(self.aclose())
async def astep(
self, action: Action
) -> tuple[npt.NDArray[np.uint8], float, bool, bool, dict[str, object]]:
if not self.reset_finished:
raise RuntimeError("Call reset first before calling step.")
success = False
fail_error = ""
try:
self.page = await aexecute_action(action, self.page, self.context)
success = True
except Exception as e:
fail_error = str(e)
try:
content = await self.page.content()
screenshot = png_bytes_to_numpy(await self.page.screenshot())
except:
await self.page.wait_for_load_state("load")
content = await self.page.content()
screenshot = png_bytes_to_numpy(await self.page.screenshot())
return (
screenshot,
float(success),
False,
False,
{
"page": DetachedPage(self.page.url, content),
"fail_error": fail_error,
},
)
def step(
self, action: Action
) -> tuple[npt.NDArray[np.uint8], float, bool, bool, dict[str, object]]:
return asyncio.run(self.astep(action), debug=True)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/__init__.py | exps/webarena_exp/browser_env/__init__.py | import asyncio
from .actions import (
Action,
ActionParsingError,
ActionTypes,
action2create_function,
action2str,
create_check_action,
create_click_action,
create_focus_and_click_action,
create_focus_and_type_action,
create_go_back_action,
create_go_forward_action,
create_goto_url_action,
create_hover_action,
create_id_based_action,
create_key_press_action,
create_keyboard_type_action,
create_mouse_click_action,
create_mouse_hover_action,
create_new_tab_action,
create_none_action,
create_page_close_action,
create_page_focus_action,
create_playwright_action,
create_random_action,
create_scroll_action,
create_select_option_action,
create_stop_action,
create_type_action,
is_equivalent,
)
from .async_envs import AsyncScriptBrowserEnv
from .envs import ScriptBrowserEnv
from .processors import ObservationMetadata
from .trajectory import Trajectory
from .utils import DetachedPage, StateInfo
__all__ = [
"ScriptBrowserEnv",
"AsyncScriptBrowserEnv",
"DetachedPage",
"StateInfo",
"ObservationMetadata",
"Action",
"ActionTypes",
"action2str",
"create_random_action",
"create_focus_and_click_action",
"create_focus_and_type_action",
"is_equivalent",
"create_mouse_click_action",
"create_mouse_hover_action",
"create_none_action",
"create_keyboard_type_action",
"create_page_focus_action",
"create_new_tab_action",
"create_go_back_action",
"create_go_forward_action",
"create_goto_url_action",
"create_page_close_action",
"action2create_function",
"create_playwright_action",
"create_id_based_action",
"create_scroll_action",
"create_key_press_action",
"create_check_action",
"create_click_action",
"create_type_action",
"create_hover_action",
"create_select_option_action",
"create_stop_action",
"ActionParsingError",
"Trajectory",
]
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/envs.py | exps/webarena_exp/browser_env/envs.py | import json
import re
import time
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Union
import numpy as np
import numpy.typing as npt
from beartype import beartype
from beartype.door import is_bearable
from gymnasium import Env
from gymnasium.spaces import Box, Text
from playwright.sync_api import (
CDPSession,
Page,
Playwright,
ViewportSize,
expect,
sync_playwright,
)
from .actions import Action, execute_action, get_action_space
from .processors import ObservationHandler, ObservationMetadata
from .utils import (
AccessibilityTree,
DetachedPage,
Observation,
png_bytes_to_numpy,
)
@dataclass
class PlaywrightScript:
function: str # goto, get_by_role
destination: str # https://www.google.com/, combobox
name: str | None = None # Search, Avatar 2009
operation: str | None = None # click, fill, press
value: str | None = None # avatar movie, Enter
def parse_action(action: str) -> PlaywrightScript:
splitted = action.strip().split(" ")
assert len(splitted) >= 2
match splitted[:2]:
case ["goto", url]:
assert len(splitted) == 2
return PlaywrightScript("goto", url)
case ["get_by_role", destination]:
assert len(splitted) >= 4
match splitted[2:]:
case [name, operation]:
return PlaywrightScript(
"get_by_role", destination, name, operation
)
case [name, operation, value]:
return PlaywrightScript(
"get_by_role", destination, name, operation, value
)
case _:
raise ValueError("Invalid action")
case _:
raise ValueError(f"Invalid action {action}")
class ScriptBrowserEnv(Env[dict[str, Observation], Action]):
"""
The goal of this environment is to produce a prototype of a browser environment.
In the end, we want to support a fully configurable browser environment with wide
range of action spaces and observation spaces, both structured and unstructured.
But in this prototype, we just support action space specified by Playwright script,
and observation space is the html content of the page.
"""
@beartype
def __init__(
self,
max_page_length: int = 8192,
headless: bool = True,
slow_mo: int = 0,
observation_type: str = "html",
current_viewport_only: bool = False,
viewport_size: ViewportSize = {"width": 1280, "height": 720},
save_trace_enabled: bool = False,
sleep_after_execution: float = 0.0,
):
# TODO: make Space[Action] = ActionSpace
self.action_space = get_action_space() # type: ignore[assignment]
self.headless = headless
self.slow_mo = slow_mo
self.current_viewport_only = current_viewport_only
self.reset_finished = False
self.viewport_size = viewport_size
self.save_trace_enabled = save_trace_enabled
self.sleep_after_execution = sleep_after_execution
match observation_type:
case "html" | "accessibility_tree":
self.text_observation_type = observation_type
self.image_observation_type = ""
self.main_observation_type = "text"
case "image":
self.image_observation_type = observation_type
self.text_observation_type = "" # type: ignore[assignment]
self.main_observation_type = "image"
case _:
raise ValueError(
f"Unsupported observation type: {observation_type}"
)
self.observation_handler = ObservationHandler(
self.main_observation_type,
self.text_observation_type,
self.image_observation_type,
self.current_viewport_only,
self.viewport_size,
)
self.observation_space = (
self.observation_handler.get_observation_space()
)
@beartype
def setup(self, config_file: Path | None = None) -> None:
self.context_manager = sync_playwright()
self.playwright = self.context_manager.__enter__()
self.browser = self.playwright.chromium.launch(
headless=self.headless, slow_mo=self.slow_mo
)
if config_file:
with open(config_file, "r") as f:
instance_config = json.load(f)
else:
instance_config = {}
storage_state = instance_config.get("storage_state", None)
start_url = instance_config.get("start_url", None)
geolocation = instance_config.get("geolocation", None)
self.context = self.browser.new_context(
viewport=self.viewport_size,
storage_state=storage_state,
geolocation=geolocation,
device_scale_factor=1,
)
if self.save_trace_enabled:
self.context.tracing.start(screenshots=True, snapshots=True)
if start_url:
start_urls = start_url.split(" |AND| ")
for url in start_urls:
page = self.context.new_page()
client = page.context.new_cdp_session(
page
) # talk to chrome devtools
if self.text_observation_type == "accessibility_tree":
client.send("Accessibility.enable")
page.client = client # type: ignore # TODO[shuyanzh], fix this hackey client
page.goto(url)
# set the first page as the current page
self.page = self.context.pages[0]
self.page.bring_to_front()
else:
self.page = self.context.new_page()
client = self.page.context.new_cdp_session(self.page)
if self.text_observation_type == "accessibility_tree":
client.send("Accessibility.enable")
self.page.client = client # type: ignore
def get_page_client(self, page: Page) -> CDPSession:
return page.client # type: ignore
def _get_obs(self) -> dict[str, Observation]:
obs = self.observation_handler.get_observation(
self.page, self.get_page_client(self.page)
)
return obs
def _get_obs_metadata(self) -> dict[str, ObservationMetadata]:
metadata = self.observation_handler.get_observation_metadata()
return metadata
@beartype
def reset(
self,
*,
seed: int | None = None,
options: dict[str, str] | None = None,
) -> tuple[dict[str, Observation], dict[str, Any]]:
"""
Reset the environment.
:param options: options for the environment. The current supported options are:
- "storage_state": the storage state of the browser. It is a file path to a json file.
"""
super().reset(seed=seed, options=options)
if self.reset_finished:
self.context_manager.__exit__()
if options is not None and "config_file" in options:
config_file = Path(options["config_file"])
if config_file.exists():
self.setup(config_file=config_file)
else:
raise ValueError(f"Config file {config_file} does not exist.")
else:
self.setup()
self.reset_finished = True
if self.sleep_after_execution > 0:
time.sleep(self.sleep_after_execution)
observation = self._get_obs()
observation_metadata = self._get_obs_metadata()
info = {
"page": DetachedPage(self.page.url, ""),
"fail_error": "",
"observation_metadata": observation_metadata,
}
return (observation, info)
def save_trace(self, trace_path: str | Path) -> None:
if self.save_trace_enabled:
self.context.tracing.stop(path=trace_path)
def close(self) -> None:
if self.reset_finished:
self.context_manager.__exit__()
def step(
self, action: Action
) -> tuple[dict[str, Observation], float, bool, bool, dict[str, Any]]:
if not self.reset_finished:
raise RuntimeError("Call reset first before calling step.")
success = False
fail_error = ""
try:
self.page = execute_action(
action,
self.page,
self.context,
self.observation_handler.action_processor,
)
success = True
except Exception as e:
fail_error = str(e)
# hard sleep TODO[shuyanzh] suboptimal, may need to check network
if self.sleep_after_execution > 0:
time.sleep(self.sleep_after_execution)
observation = self._get_obs()
observation_metadata = self._get_obs_metadata()
info = {
"page": DetachedPage(self.page.url, self.page.content()),
"fail_error": fail_error,
"observation_metadata": observation_metadata,
}
msg = (
observation,
float(success), # reward
False, # terminated
False, # truncated
info,
)
return msg
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/actions.py | exps/webarena_exp/browser_env/actions.py | """
Browser Env action space.
Inspited by Farama-Foundation/miniwob-plusplus
"""
import ast
import random
import re
import string
from enum import IntEnum
from itertools import chain
from typing import Any, TypedDict, Union, cast
import numpy as np
import numpy.typing as npt
from beartype import beartype
from gymnasium import spaces
from playwright._impl._api_structures import ViewportSize
from playwright.async_api import BrowserContext as ABrowserContext
from playwright.async_api import Locator as ALocator
from playwright.async_api import Page as APage
from playwright.sync_api import BrowserContext, Locator, Page
from browser_env.constants import (
ASCII_CHARSET,
FREQ_UNICODE_CHARSET,
MAX_ANSWER_LENGTH,
MAX_ELEMENT_ID,
MAX_ELEMENT_INDEX_IN_VIEWPORT,
MAX_PAGE_NUMBER,
MAX_VANILLA_STR_LENGTH,
PLAYWRIGHT_ACTIONS,
PLAYWRIGHT_LOCATORS,
ROLES,
SPECIAL_KEY_MAPPINGS,
SPECIAL_KEYS,
SPECIAL_LOCATORS,
TEXT_MAX_LENGTH,
TYPING_MAX_LENGTH,
URL_MAX_LENGTH,
RolesType,
)
from browser_env.processors import ObservationProcessor
class ParsedPlaywrightCode(TypedDict):
function_name: str
arguments: list[str]
keywords: dict[str, Any]
from browser_env.processors import (
ObservationProcessor,
TextObervationProcessor,
)
def is_in_viewport(
element: Locator, viewport: ViewportSize, threshold: float = 0.3
) -> bool:
"""Given a playwright locator, check if it is in the viewport"""
box = element.bounding_box()
assert box is not None
boxx0 = box["x"]
boxx1 = box["x"] + box["width"]
boxy0 = box["y"]
boxy1 = box["y"] + box["height"]
viewportx0, viewporty0 = 0, 0
viewportx1, viewporty1 = viewport["width"], viewport["height"]
inter = max(0, min(boxx1, viewportx1) - max(boxx0, viewportx0)) * max(
0, min(boxy1, viewporty1) - max(boxy0, viewporty0)
)
ratio = inter / (box["width"] * box["height"])
return ratio > threshold
async def async_is_in_viewport(
element: ALocator, viewport: ViewportSize, threshold: float = 0.3
) -> bool:
box = await element.bounding_box()
assert box is not None
boxx0 = box["x"]
boxx1 = box["x"] + box["width"]
boxy0 = box["y"]
boxy1 = box["y"] + box["height"]
viewportx0, viewporty0 = 0, 0
viewportx1, viewporty1 = viewport["width"], viewport["height"]
inter = max(0, min(boxx1, viewportx1) - max(boxx0, viewportx0)) * max(
0, min(boxy1, viewporty1) - max(boxy0, viewporty0)
)
ratio = inter / (box["width"] * box["height"])
return ratio > threshold
class Action(TypedDict):
action_type: int
coords: npt.NDArray[np.float32]
element_role: int
element_name: str
text: list[int]
page_number: int
url: str
nth: int
element_id: str
direction: str
key_comb: str
pw_code: str
answer: str
raw_prediction: str # raw prediction from the model
@beartype
def action2str(
action: Action, action_set_tag: str, semantic_element: str = ""
) -> str:
"""Return the string representation of an action
sementic_element: the semantic information of the element
such as a line in an accessibility tree
"""
if action_set_tag == "id_accessibility_tree":
element_id = action["element_id"]
match action["action_type"]:
case ActionTypes.CLICK:
# [ID=X] xxxxx
action_str = f"click [{element_id}] where [{element_id}] is {semantic_element}"
case ActionTypes.TYPE:
text = "".join([_id2key[i] for i in action["text"]])
text = text.replace("\n", " ")
action_str = f"type [{element_id}] [{text}] where [{element_id}] is {semantic_element}"
case ActionTypes.HOVER:
action_str = f"hover [{element_id}] where [{element_id}] is {semantic_element}"
case ActionTypes.SCROLL:
action_str = f"scroll [{action['direction']}]"
case ActionTypes.KEY_PRESS:
action_str = f"press [{action['key_comb']}]"
case ActionTypes.GOTO_URL:
action_str = f"goto [{action['url']}]"
case ActionTypes.NEW_TAB:
action_str = "new_tab"
case ActionTypes.PAGE_CLOSE:
action_str = "close_tab"
case ActionTypes.GO_BACK:
action_str = "go_back"
case ActionTypes.GO_FORWARD:
action_str = "go_forward"
case ActionTypes.PAGE_FOCUS:
action_str = f"page_focus [{action['page_number']}]"
case ActionTypes.STOP:
action_str = f"stop [{action['answer']}]"
case ActionTypes.NONE:
action_str = "none"
case _:
raise ValueError(
f"Unknown action type {action['action_type']}"
)
else:
raise NotImplementedError(f"Unknown action set tag {action_set_tag}")
return action_str
@beartype
def action2create_function(action: Action) -> str:
match (action["action_type"]):
case ActionTypes.NONE:
return "create_none_action()"
# mouse wheel and keyboard action
case ActionTypes.SCROLL:
direction = "up" if "up" in action["direction"] else "down"
return f"create_scroll_action({repr(direction)})"
case ActionTypes.KEY_PRESS:
return f"create_key_press_action({repr(action['key_comb'])})"
# inter-page actions
case ActionTypes.PAGE_FOCUS:
return f"create_page_focus_action({action['page_number']})"
case ActionTypes.NEW_TAB:
return "create_new_tab_action()"
case ActionTypes.GO_BACK:
return "create_go_back_action()"
case ActionTypes.GO_FORWARD:
return "create_go_forward_action()"
case ActionTypes.GOTO_URL:
return f"create_goto_url_action({repr(action['url'])})"
case ActionTypes.PAGE_CLOSE:
return "create_page_close_action()"
# low-level keyboard and mouse actions
case ActionTypes.MOUSE_CLICK:
return f"create_mouse_click_action({action['coords'][0]}, {action['coords'][1]})"
case ActionTypes.MOUSE_HOVER:
return f"create_mouse_hover_action({action['coords'][0]}, {action['coords'][1]})"
case ActionTypes.KEYBOARD_TYPE:
return f"create_keyboard_type_action({list(map(lambda x: _id2key[x], action['text']))})"
# mid-level keyboard and mouse actions
case ActionTypes.CLICK:
args = []
args.append(f"element_id={repr(action['element_id'])}")
args.append(
f"element_role={repr(_id2role[action['element_role']])}"
)
args.append(f"element_name={repr(action['element_name'])}")
args.append(f"pw_code={repr(action['pw_code'])}")
args_str = ", ".join(args)
return f"create_click_action({args_str})"
case ActionTypes.HOVER:
args = []
args.append(f"element_id={repr(action['element_id'])}")
args.append(
f"element_role={repr(_id2role[action['element_role']])}"
)
args.append(f"element_name={repr(action['element_name'])}")
args.append(f"pw_code={repr(action['pw_code'])}")
args_str = ", ".join(args)
return f"create_hover_action({args_str})"
case ActionTypes.TYPE:
args = []
text = "".join(map(lambda x: _id2key[x], action["text"]))
args.append(f"text={repr(text)}")
args.append(f"element_id={repr(action['element_id'])}")
args.append(
f"element_role={repr(_id2role[action['element_role']])}"
)
args.append(f"element_name={repr(action['element_name'])}")
args.append(f"pw_code={repr(action['pw_code'])}")
args_str = ", ".join(args)
return f"create_type_action({args_str})"
# high-level actions, only support locators from playwright
case ActionTypes.CHECK:
return f"create_check_action(pw_code={repr(action['pw_code'])})"
case ActionTypes.SELECT_OPTION:
return f"create_select_option_action(pw_code={repr(action['pw_code'])})"
case ActionTypes.STOP:
return f'create_stop_action({repr(action["answer"])})'
raise ValueError(f"Invalid action type: {action['action_type']}")
class ActionTypes(IntEnum):
"""Valid action types for browser env."""
NONE = 0
# mouse wheel and keyboard, universal across all action spaces
SCROLL = 1
KEY_PRESS = 2
# low level mouse and keyboard actions
MOUSE_CLICK = 3
KEYBOARD_TYPE = 4
MOUSE_HOVER = 5
# mid level mouse and keyboard actions
CLICK = 6
TYPE = 7
HOVER = 8
# page level actions, universal across all action spaces
PAGE_FOCUS = 9
NEW_TAB = 10
GO_BACK = 11
GO_FORWARD = 12
GOTO_URL = 13
PAGE_CLOSE = 14
# high-leval actions that playwright support
CHECK = 15
SELECT_OPTION = 16
STOP = 17
def __str__(self) -> str:
return f"ACTION_TYPES.{self.name}"
@beartype
def is_equivalent(a: Action, b: Action) -> bool:
"""Return True if two actions are equal."""
if a["action_type"] != b["action_type"]:
return False
match (a["action_type"]):
case ActionTypes.NONE:
return True
case ActionTypes.SCROLL:
da = "up" if "up" in a["direction"] else "down"
db = "up" if "up" in b["direction"] else "down"
return da == db
case ActionTypes.KEY_PRESS:
return a["key_comb"] == b["key_comb"]
case ActionTypes.MOUSE_CLICK | ActionTypes.MOUSE_HOVER:
return np.allclose(a["coords"], b["coords"])
case ActionTypes.KEYBOARD_TYPE:
return a["text"] == b["text"]
case ActionTypes.CLICK | ActionTypes.HOVER | ActionTypes.TYPE: # TODO: can be further optimized
if a["element_id"] and b["element_id"]:
return a["element_id"] == b["element_id"]
elif a["element_role"] and b["element_role"]:
return (
a["element_role"] == b["element_role"]
and a["element_name"] == b["element_name"]
)
elif a["pw_code"] and b["pw_code"]:
return a["pw_code"] == b["pw_code"]
else:
return False
case ActionTypes.PAGE_FOCUS:
return a["page_number"] == b["page_number"]
case ActionTypes.NEW_TAB:
return True
case ActionTypes.GO_BACK:
return True
case ActionTypes.GO_FORWARD:
return True
case ActionTypes.GOTO_URL:
return a["url"] == b["url"]
case ActionTypes.PAGE_CLOSE:
return True
case ActionTypes.CHECK | ActionTypes.SELECT_OPTION:
return a["pw_code"] == b["pw_code"]
case ActionTypes.STOP:
return a["answer"] == b["answer"]
case _:
raise ValueError(f"Unknown action type: {a['action_type']}")
_key2id: dict[str, int] = {
key: i
for i, key in enumerate(
chain(SPECIAL_KEYS, ASCII_CHARSET, FREQ_UNICODE_CHARSET, ["\n"])
)
}
_id2key: list[str] = sorted(_key2id, key=_key2id.get) # type: ignore[arg-type]
_role2id: dict[RolesType, int] = {
cast(RolesType, role): i
for i, role in enumerate(chain(ROLES, SPECIAL_LOCATORS))
}
_id2role: list[RolesType] = sorted(_role2id, key=_role2id.get) # type: ignore[arg-type]
def _keys2ids(keys: list[int | str] | str) -> list[int]:
return list(
map(
lambda key: _key2id[str(key)]
if isinstance(key, str)
else int(key),
keys,
)
)
@beartype
def get_action_space() -> spaces.Dict:
"""Return the space of serialized actions."""
space = spaces.Dict(
{
"action_type": spaces.Discrete(len(ActionTypes)),
# coords (left, top) is used for COORD_CLICK
"coords": spaces.Box(
np.array([0.0, 0.0], dtype=np.float32),
np.array([1.0, 1.0], dtype=np.float32),
),
# element role is used for FOCUS_AND_CLICK and FOCUS_AND_TYPE
"element_role": spaces.Discrete(
len(ROLES) + len(SPECIAL_LOCATORS)
),
# element name is used with element role
"element_name": spaces.Text(TEXT_MAX_LENGTH),
"element_id": spaces.Text(TEXT_MAX_LENGTH),
# text is only used for TYPE and FOCUS_AND_TYPE
"text": spaces.MultiDiscrete(
[
len(ASCII_CHARSET)
+ len(SPECIAL_KEYS)
+ len(FREQ_UNICODE_CHARSET)
]
* TYPING_MAX_LENGTH
),
"page_number": spaces.Discrete(MAX_PAGE_NUMBER),
"url": spaces.Text(URL_MAX_LENGTH),
"nth": spaces.Discrete(MAX_ELEMENT_INDEX_IN_VIEWPORT),
"key_comb": spaces.Text(MAX_VANILLA_STR_LENGTH),
"direction": spaces.Text(MAX_VANILLA_STR_LENGTH),
"pw_code": spaces.Text(MAX_VANILLA_STR_LENGTH),
"answer": spaces.Text(MAX_ANSWER_LENGTH),
}
)
return space
@beartype
def create_random_action() -> Action:
"""Return a random action."""
return {
"action_type": np.random.randint(len(ActionTypes)),
"coords": np.random.rand(2).astype(np.float32),
"element_role": np.random.randint(len(ROLES) + len(SPECIAL_LOCATORS)),
"element_name": "".join(
random.choices(ASCII_CHARSET, k=np.random.randint(TEXT_MAX_LENGTH))
),
"text": list(
random.choices(
list(range(len(ASCII_CHARSET))),
k=np.random.randint(TYPING_MAX_LENGTH),
)
),
"page_number": np.random.randint(MAX_PAGE_NUMBER),
"url": "".join(
random.choices(ASCII_CHARSET, k=np.random.randint(URL_MAX_LENGTH))
),
"nth": np.random.randint(MAX_ELEMENT_INDEX_IN_VIEWPORT),
"element_id": str(np.random.randint(MAX_ELEMENT_ID)),
"key_comb": "+".join(
random.choices(SPECIAL_KEYS, k=np.random.randint(3))
),
"direction": random.choice(["up", "down"]),
"pw_code": "".join(
random.choices(
string.ascii_uppercase + string.digits,
k=np.random.randint(MAX_VANILLA_STR_LENGTH),
)
),
"answer": str(np.random.randint(MAX_ANSWER_LENGTH)),
"raw_prediction": str(np.random.randint(MAX_ANSWER_LENGTH)),
}
@beartype
def create_none_action() -> Action:
"""Return a valid action object that does nothing."""
return {
"action_type": ActionTypes.NONE,
"coords": np.zeros(2, dtype=np.float32),
"element_role": 0,
"element_name": "",
"text": [],
"page_number": 0,
"url": "",
"nth": 0,
"pw_code": "", # str that requires further processing
"element_id": "",
"key_comb": "",
"direction": "",
"answer": "",
"raw_prediction": "",
}
@beartype
def create_stop_action(answer: str) -> Action:
action = create_none_action()
action.update({"action_type": ActionTypes.STOP, "answer": answer})
return action
@beartype
def create_scroll_action(direction: str) -> Action:
"""Return the playwright action"""
assert direction in ["up", "down"]
action = create_none_action()
action.update(
{
"action_type": ActionTypes.SCROLL,
"direction": direction,
}
)
return action
@beartype
def create_mouse_hover_action(
left: float | None = None, top: float | None = None
) -> Action:
"""Return a valid action object with type COORD_CLICK."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.MOUSE_HOVER,
"coords": np.array([left, top], dtype=np.float32),
}
)
return action
@beartype
def create_key_press_action(key_comb: str) -> Action:
"""Return the key press action"""
def map_keys(key_comb: str) -> str:
keys = key_comb.split("+")
mapped_keys = []
for key in keys:
mapped_key = SPECIAL_KEY_MAPPINGS.get(key.lower(), key)
mapped_keys.append(mapped_key)
return "+".join(mapped_keys)
action = create_none_action()
mapped_key_comb = map_keys(key_comb)
action.update(
{
"action_type": ActionTypes.KEY_PRESS,
"key_comb": mapped_key_comb,
}
)
return action
@beartype
def create_page_focus_action(page_number: int) -> Action:
"""Return a valid action object with type PAGE_FOCUS."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.PAGE_FOCUS,
"page_number": page_number,
}
)
return action
@beartype
def create_new_tab_action() -> Action:
"""Return a valid action object with type NEW_TAB."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.NEW_TAB,
}
)
return action
@beartype
def create_go_back_action() -> Action:
"""Return a valid action object with type GO_BACK."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.GO_BACK,
}
)
return action
@beartype
def create_go_forward_action() -> Action:
"""Return a valid action object with type GO_FORWARD."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.GO_FORWARD,
}
)
return action
@beartype
def create_goto_url_action(url: str) -> Action:
"""Return a valid action object with type GOTO_URL."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.GOTO_URL,
"url": url,
}
)
return action
@beartype
def create_page_close_action() -> Action:
"""Return a valid action object with type PAGE_CLOSE."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.PAGE_CLOSE,
}
)
return action
@beartype
def create_mouse_click_action(
left: float | None = None, top: float | None = None
) -> Action:
"""Return a valid action object with type COORD_CLICK."""
action = create_none_action()
if left and top:
action.update(
{
"action_type": ActionTypes.MOUSE_CLICK,
"coords": np.array([left, top], dtype=np.float32),
}
)
elif (not left) and (not top):
action.update(
{
"action_type": ActionTypes.CLICK,
}
)
else:
raise ValueError("left and top must be both None or both not None")
return action
@beartype
def create_keyboard_type_action(keys: list[int | str] | str) -> Action:
"""Return a valid action object with type TYPE."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.KEYBOARD_TYPE,
"text": _keys2ids(keys),
}
)
return action
@beartype
def create_click_action(
element_id: str = "",
element_role: RolesType = "link",
element_name: str = "",
pw_code: str = "",
nth: int = 0,
) -> Action:
action = create_none_action()
action.update(
{
"action_type": ActionTypes.CLICK,
"element_id": element_id,
"element_role": _role2id[element_role],
"element_name": element_name,
"nth": nth,
"pw_code": pw_code,
}
)
return action
@beartype
def create_hover_action(
element_id: str = "",
element_role: RolesType = "link",
element_name: str = "",
pw_code: str = "",
nth: int = 0,
) -> Action:
action = create_none_action()
action.update(
{
"action_type": ActionTypes.HOVER,
"element_id": element_id,
"element_role": _role2id[element_role],
"element_name": element_name,
"nth": nth,
"pw_code": pw_code,
}
)
return action
@beartype
def create_type_action(
text: str,
element_id: str = "",
element_role: RolesType = "link",
element_name: str = "",
pw_code: str = "",
nth: int = 0,
) -> Action:
action = create_none_action()
action.update(
{
"action_type": ActionTypes.TYPE,
"element_id": element_id,
"element_role": _role2id[element_role],
"element_name": element_name,
"nth": nth,
"text": _keys2ids(text),
"pw_code": pw_code,
}
)
return action
@beartype
def create_check_action(pw_code: str) -> Action:
action = create_none_action()
action.update(
{
"action_type": ActionTypes.CHECK,
"pw_code": pw_code,
}
)
return action
def create_select_option_action(
pw_code: str,
) -> Action:
action = create_none_action()
action.update(
{
"action_type": ActionTypes.SELECT_OPTION,
"pw_code": pw_code,
}
)
return action
@beartype
def create_focus_action(
element_role: RolesType, element_name: str = "", nth: int = 0
) -> Action:
"""Return a valid action object with type CLICK.
Keep compatible with the old version."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.CLICK,
"element_role": _role2id[element_role],
"element_name": element_name,
"nth": nth,
}
)
return action
@beartype
def create_focus_and_click_action(
element_role: RolesType, element_name: str = "", nth: int = 0
) -> Action:
"""Return a valid action object with type CLICK.
Keep compatible with the old version."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.CLICK,
"element_role": _role2id[element_role],
"element_name": element_name,
"nth": nth,
}
)
return action
@beartype
def create_focus_and_type_action(
keys: list[int | str] | str,
element_role: RolesType,
element_name: str = "",
nth: int = 0,
) -> Action:
"""Return a valid action object with type TYPE.
Keep compatible with the old version."""
action = create_none_action()
action.update(
{
"action_type": ActionTypes.TYPE,
"element_role": _role2id[element_role],
"element_name": element_name,
"text": _keys2ids(keys),
"nth": nth,
}
)
return action
def execute_scroll(direction: str, page: Page) -> None:
# perform the action
# code from natbot
if direction == "up":
page.evaluate(
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;"
)
elif direction == "down":
page.evaluate(
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;"
)
async def aexecute_scroll(direction: str, page: APage) -> None:
# perform the action
# code from natbot
if direction == "up":
await page.evaluate(
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;"
)
elif direction == "down":
await page.evaluate(
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;"
)
def execute_key_press(key: str, page: Page) -> None:
"""Press a key."""
if "Meta" in key and "Mac" not in page.evaluate("navigator.platform"):
key = key.replace("Meta", "Control")
page.keyboard.press(key)
async def aexecute_key_press(key: str, page: APage) -> None:
"""Press a key."""
if "Meta" in key and "Mac" not in await page.evaluate(
"navigator.platform"
):
key = key.replace("Meta", "Control")
await page.keyboard.press(key)
def execute_mouse_hover(left: float, top: float, page: Page) -> None:
"""Click at coordinates (left, top)."""
viewport_size = page.viewport_size
assert viewport_size
page.mouse.move(
left * viewport_size["width"], top * viewport_size["height"]
)
async def aexecute_mouse_hover(left: float, top: float, page: APage) -> None:
"""Click at coordinates (left, top)."""
viewport_size = page.viewport_size
assert viewport_size
await page.mouse.move(
left * viewport_size["width"], top * viewport_size["height"]
)
def execute_mouse_click(left: float, top: float, page: Page) -> None:
"""Click at coordinates (left, top)."""
viewport_size = page.viewport_size
assert viewport_size
page.mouse.click(
left * viewport_size["width"], top * viewport_size["height"]
)
async def aexecute_mouse_click(left: float, top: float, page: APage) -> None:
"""Click at coordinates (left, top)."""
viewport_size = page.viewport_size
assert viewport_size
await page.mouse.click(
left * viewport_size["width"], top * viewport_size["height"]
)
def execute_keyboard_type(text: str, page: Page) -> None:
"""Fill the focused element with text."""
page.keyboard.type(text)
async def aexecute_keyboard_type(text: str, page: APage) -> None:
"""Fill the focused element with text."""
await page.keyboard.type(text)
def execute_click_current(page: Page) -> None:
"""Click at the current mouse position."""
locators = page.locator("*:focus")
if not locators.count():
for frame in page.frames[1:]:
locators = frame.locator("*:focus")
if locators.count():
break
locators.click()
async def aexecute_click_current(page: APage) -> None:
"""Click at the current mouse position."""
locators = page.locator("*:focus")
locator_count = await locators.count()
if not locator_count:
for frame in page.frames[1:]:
locators = frame.locator("*:focus")
locator_count = await locators.count()
if locator_count:
break
await locators.click()
await page.wait_for_load_state("load")
def execute_type(keys: list[int], page: Page) -> None:
"""Send keystrokes to the focused element."""
text = "".join([_id2key[key] for key in keys])
page.keyboard.type(text)
async def aexecute_type(keys: list[int], page: APage) -> None:
"""Send keystrokes to the focused element."""
text = "".join([_id2key[key] for key in keys])
await page.keyboard.type(text)
def execute_focus(
element_role: int, element_name: str, nth: int, page: Page
) -> None:
"""Click the specified DOM element."""
element_role_str = _id2role[element_role]
if page.viewport_size is None:
raise ValueError("Viewport size is not set for the current page")
element_location_list: list[tuple[Locator, float, float]] = []
for frame in page.frames:
match element_role_str:
case "alt_text":
locators = frame.get_by_alt_text(element_name)
case "label":
locators = frame.get_by_label(element_name)
case "placeholder":
locators = frame.get_by_placeholder(element_name)
case _:
locators = frame.get_by_role(
role=element_role_str, name=element_name
)
for locator_idx in range(locators.count()):
locator = locators.nth(locator_idx)
if is_in_viewport(locator, page.viewport_size):
bounding_box = locator.bounding_box()
assert bounding_box
element_location_list.append(
(locator, bounding_box["x"], bounding_box["y"])
)
if len(element_location_list) <= nth:
raise ValueError(
f"There are only {len(element_location_list)} elements found in viewport, but {nth + 1} is requested"
)
element_location_list.sort(key=lambda x: (x[2], x[1])) # row major order
element_location_list[nth][0].focus()
async def aexecute_focus(
element_role: int, element_name: str, nth: int, page: APage
) -> None:
"""Click the specified DOM element."""
element_role_str = _id2role[element_role]
if page.viewport_size is None:
raise ValueError("Viewport size is not set for the current page")
element_location_list: list[tuple[ALocator, float, float]] = []
for frame in page.frames:
match element_role_str:
case "alt_text":
locators = frame.get_by_alt_text(element_name)
case "label":
locators = frame.get_by_label(element_name)
case "placeholder":
locators = frame.get_by_placeholder(element_name)
case _:
locators = frame.get_by_role(
role=element_role_str, name=element_name
)
for locator_idx in range(await locators.count()):
locator = locators.nth(locator_idx)
if await async_is_in_viewport(locator, page.viewport_size):
bounding_box = await locator.bounding_box()
assert bounding_box
element_location_list.append(
(locator, bounding_box["x"], bounding_box["y"])
)
if len(element_location_list) <= nth:
raise ValueError(
f"There are only {len(element_location_list)} elements found in viewport, but {nth + 1} is requested"
)
element_location_list.sort(key=lambda x: (x[2], x[1])) # row major order
await element_location_list[nth][0].focus()
def locate(locator_calls: list[ParsedPlaywrightCode], page: Page) -> Locator:
locator = page
for call in locator_calls:
function_name = call["function_name"]
arguments = call["arguments"]
keywords = call["keywords"]
locator = getattr(locator, function_name)(*arguments, **keywords)
return locator # type: ignore[return-value]
async def alocate(
locator_calls: list[ParsedPlaywrightCode], page: APage
) -> ALocator:
locator = page
for call in locator_calls:
function_name = call["function_name"]
arguments = call["arguments"]
keywords = call["keywords"]
locator = await getattr(locator, function_name)(*arguments, **keywords)
return locator # type: ignore[return-value]
def execute_playwright_click(
locator_code: list[ParsedPlaywrightCode],
page: Page,
pw_action_args: list[str] = [],
pw_action_kwargs: dict[str, Any] = {},
) -> None:
locator = locate(locator_code, page)
# perform the action
locator.click(*pw_action_args, **pw_action_kwargs)
async def aexecute_playwright_click(
locator_code: list[ParsedPlaywrightCode],
page: APage,
pw_action_args: list[str] = [],
pw_action_kwargs: dict[str, Any] = {},
) -> None:
locator = await alocate(locator_code, page)
# perform the action
await locator.click(*pw_action_args, **pw_action_kwargs)
def execute_playwright_hover(
locator_code: list[ParsedPlaywrightCode], page: Page
) -> None:
locator = locate(locator_code, page)
# perform the action
locator.hover()
async def aexecute_playwright_hover(
locator_code: list[ParsedPlaywrightCode], page: APage
) -> None:
locator = await alocate(locator_code, page)
# perform the action
await locator.hover()
def execute_playwright_type(
text: str,
locator_code: list[ParsedPlaywrightCode],
page: Page,
pw_action_args: list[str] = [],
pw_action_kwargs: dict[str, Any] = {},
) -> None:
locator = locate(locator_code, page)
# perform the action
pw_action_args = [text] + pw_action_args # text is the first argument
locator.type(*pw_action_args, **pw_action_kwargs)
async def aexecute_playwright_type(
text: str,
locator_code: list[ParsedPlaywrightCode],
page: APage,
pw_action_args: list[str] = [],
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | true |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/helper_functions.py | exps/webarena_exp/browser_env/helper_functions.py | import base64
import io
import json
import re
from pathlib import Path
from typing import Any, Optional
import numpy as np
from PIL import Image
from agent.prompts import *
from browser_env import (
Action,
ActionTypes,
ObservationMetadata,
StateInfo,
action2str,
)
HTML_TEMPLATE = """
<!DOCTYPE html>
<head>
<style>
pre {{
white-space: pre-wrap;
word-wrap: break-word;
}}
</style>
</head>
<html>
<body>
{body}
</body>
</html>
"""
def get_render_action(
action: Action,
observation_metadata: dict[str, ObservationMetadata],
action_set_tag: str,
) -> str:
"""Parse the predicted actions for rendering purpose. More comprehensive information"""
match action_set_tag:
case "id_accessibility_tree":
text_meta_data = observation_metadata["text"]
if action["element_id"] in text_meta_data["obs_nodes_info"]:
node_content = text_meta_data["obs_nodes_info"][action["element_id"]][
"text"
]
else:
node_content = "No match found"
action_str = f"<div class='raw_parsed_prediction' style='background-color:grey'><pre>{action['raw_prediction']}</pre></div>"
action_str += f"<div class='action_object' style='background-color:grey'><pre>{repr(action)}</pre></div>"
action_str += f"<div class='parsed_action' style='background-color:black'><pre>{action2str(action, action_set_tag, node_content)}</pre></div>"
case "playwright":
action_str = action["pw_code"]
case _:
raise ValueError(f"Unknown action type {action['action_type']}")
return action_str
def get_action_description(
action: Action,
observation_metadata: dict[str, ObservationMetadata],
action_set_tag: str,
prompt_constructor: PromptConstructor | None,
) -> str:
"""Generate the text version of the predicted actions to store in action history for prompt use.
May contain hint information to recover from the failures"""
match action_set_tag:
case "id_accessibility_tree":
text_meta_data = observation_metadata["text"]
if action["action_type"] in [
ActionTypes.CLICK,
ActionTypes.HOVER,
ActionTypes.TYPE,
]:
action_name = str(action["action_type"]).split(".")[1].lower()
if action["element_id"] in text_meta_data["obs_nodes_info"]:
node_content = text_meta_data["obs_nodes_info"][
action["element_id"]
]["text"]
node_content = " ".join(node_content.split()[1:])
action_str = action2str(action, action_set_tag, node_content)
else:
action_str = f"Attempt to perfom \"{action_name}\" on element \"[{action['element_id']}]\" but no matching element found. Please check the observation more carefully."
else:
if (
action["action_type"] == ActionTypes.NONE
and prompt_constructor is not None
):
action_splitter = prompt_constructor.instruction["meta_data"][
"action_splitter"
]
action_str = f'The previous prediction you issued was "{action["raw_prediction"]}". However, the format was incorrect. Ensure that the action is wrapped inside a pair of {action_splitter} and enclose arguments within [] as follows: {action_splitter}action [arg] ...{action_splitter}.'
else:
action_str = action2str(action, action_set_tag, "")
case "playwright":
action_str = action["pw_code"]
case _:
raise ValueError(f"Unknown action type {action['action_type']}")
return action_str
class RenderHelper(object):
"""Helper class to render text and image observations and meta data in the trajectory"""
def __init__(
self, config_file: str, result_dir: str, action_set_tag: str, trail_idx: int = 0
) -> None:
with open(config_file, "r") as f:
_config = json.load(f)
_config_str = ""
for k, v in _config.items():
_config_str += f"{k}: {v}\n"
_config_str = f"<pre>{_config_str}</pre>\n"
task_id = _config["task_id"]
self.action_set_tag = action_set_tag
self.render_file = open(
Path(result_dir) / f"render_{task_id}_{trail_idx}.html", "a+"
)
self.render_file.truncate(0)
# write init template
self.render_file.write(HTML_TEMPLATE.format(body=f"{_config_str}"))
self.render_file.read()
self.render_file.flush()
def render(
self,
action: Action,
state_info: StateInfo,
meta_data: dict[str, Any],
render_screenshot: bool = False,
) -> None:
"""Render the trajectory"""
# text observation
observation = state_info["observation"]
text_obs = observation["text"]
info = state_info["info"]
new_content = f"<h2>New Page</h2>\n"
new_content += f"<h3 class='url'><a href={state_info['info']['page'].url}>URL: {state_info['info']['page'].url}</a></h3>\n"
new_content += f"<div class='state_obv'><pre>{text_obs}</pre><div>\n"
if render_screenshot:
# image observation
img_obs = observation["image"]
image = Image.fromarray(img_obs)
byte_io = io.BytesIO()
image.save(byte_io, format="PNG")
byte_io.seek(0)
image_bytes = base64.b64encode(byte_io.read())
image_str = image_bytes.decode("utf-8")
new_content += f"<img src='data:image/png;base64,{image_str}' style='width:50vw; height:auto;'/>\n"
# meta data
new_content += f"<div class='prev_action' style='background-color:brown'>{meta_data['action_history'][-1]}</div>\n"
# reflection
if "memory" in meta_data:
memory = "\n\n".join(meta_data["memory"])
new_content += (
f"<div class='memory' style='background-color:red'>{memory}</div>\n"
)
# action
action_str = get_render_action(
action,
info["observation_metadata"],
action_set_tag=self.action_set_tag,
)
# with yellow background
action_str = f"<div class='predict_action'>{action_str}</div>"
new_content += f"{action_str}\n"
# add new content
self.render_file.seek(0)
html = self.render_file.read()
html_body = re.findall(r"<body>(.*?)</body>", html, re.DOTALL)[0]
html_body += new_content
html = HTML_TEMPLATE.format(body=html_body)
self.render_file.seek(0)
self.render_file.truncate()
self.render_file.write(html)
self.render_file.flush()
def close(self) -> None:
self.render_file.close()
def save_img(
img_obs: np.ndarray, save_dir: str, task_id: int, step_idx: int, trail_idx: int = 0
) -> None:
"""Save the image observation as png file"""
if not Path(save_dir).exists():
Path(save_dir).mkdir(parents=True)
image = Image.fromarray(img_obs)
img_path = Path(save_dir) / f"{task_id}_{trail_idx}_{step_idx}.png"
image.save(img_path, format="PNG")
# to string
return str(img_path)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/browser_env/trajectory.py | exps/webarena_exp/browser_env/trajectory.py | from typing import Union
from .actions import Action
from .utils import StateInfo
Trajectory = list[Union[StateInfo, Action]]
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/llms/utils.py | exps/webarena_exp/llms/utils.py | import argparse
from typing import Any
import time
from llms import (
generate_from_huggingface_completion,
generate_from_openai_chat_completion,
generate_from_openai_completion,
lm_config,
)
APIInput = str | list[Any] | dict[str, Any]
def call_llm(
lm_config: lm_config.LMConfig,
prompt: APIInput,
) -> str:
response: str
start_t = time.time()
if lm_config.provider == "openai":
if lm_config.mode == "chat":
assert isinstance(prompt, list)
response = generate_from_openai_chat_completion(
messages=prompt,
model=lm_config.model,
temperature=lm_config.gen_config["temperature"],
top_p=lm_config.gen_config["top_p"],
context_length=lm_config.gen_config["context_length"],
max_tokens=lm_config.gen_config["max_tokens"],
stop_token=None,
)
elif lm_config.mode == "completion":
assert isinstance(prompt, str)
response = generate_from_openai_completion(
prompt=prompt,
engine=lm_config.model,
temperature=lm_config.gen_config["temperature"],
max_tokens=lm_config.gen_config["max_tokens"],
top_p=lm_config.gen_config["top_p"],
stop_token=lm_config.gen_config["stop_token"],
)
else:
raise ValueError(
f"OpenAI models do not support mode {lm_config.mode}"
)
elif lm_config.provider == "huggingface":
assert isinstance(prompt, str)
response = generate_from_huggingface_completion(
prompt=prompt,
model_endpoint=lm_config.gen_config["model_endpoint"],
temperature=lm_config.gen_config["temperature"],
top_p=lm_config.gen_config["top_p"],
stop_sequences=lm_config.gen_config["stop_sequences"],
max_new_tokens=lm_config.gen_config["max_new_tokens"],
)
else:
raise NotImplementedError(
f"Provider {lm_config.provider} not implemented"
)
print(f"Calling LLM takes: {time.time() - start_t:.2f}s")
return response
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/llms/__init__.py | exps/webarena_exp/llms/__init__.py | """This module is adapt from https://github.com/zeno-ml/zeno-build"""
from .providers.hf_utils import generate_from_huggingface_completion
from .providers.openai_utils import (
generate_from_openai_chat_completion,
generate_from_openai_completion,
)
from .utils import call_llm
__all__ = [
"generate_from_openai_completion",
"generate_from_openai_chat_completion",
"generate_from_huggingface_completion",
"call_llm",
]
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/llms/tokenizers.py | exps/webarena_exp/llms/tokenizers.py | from typing import Any
import tiktoken
from transformers import LlamaTokenizer # type: ignore
class Tokenizer(object):
def __init__(self, provider: str, model_name: str) -> None:
if provider == "openai":
self.tokenizer = tiktoken.encoding_for_model(model_name)
elif provider == "huggingface":
self.tokenizer = LlamaTokenizer.from_pretrained(model_name)
# turn off adding special tokens automatically
self.tokenizer.add_special_tokens = False # type: ignore[attr-defined]
self.tokenizer.add_bos_token = False # type: ignore[attr-defined]
self.tokenizer.add_eos_token = False # type: ignore[attr-defined]
else:
raise NotImplementedError
def encode(self, text: str) -> list[int]:
return self.tokenizer.encode(text)
def decode(self, ids: list[int]) -> str:
return self.tokenizer.decode(ids)
def __call__(self, text: str) -> list[int]:
return self.tokenizer.encode(text)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/llms/lm_config.py | exps/webarena_exp/llms/lm_config.py | """Config for language models."""
from __future__ import annotations
import argparse
import dataclasses
from dataclasses import dataclass
from typing import Any
@dataclass(frozen=True)
class LMConfig:
"""A config for a language model.
Attributes:
provider: The name of the API provider.
model: The name of the model.
model_cls: The Python class corresponding to the model, mostly for
Hugging Face transformers.
tokenizer_cls: The Python class corresponding to the tokenizer, mostly
for Hugging Face transformers.
mode: The mode of the API calls, e.g., "chat" or "generation".
"""
provider: str
model: str
model_cls: type | None = None
tokenizer_cls: type | None = None
mode: str | None = None
gen_config: dict[str, Any] = dataclasses.field(default_factory=dict)
def construct_llm_config(args: argparse.Namespace) -> LMConfig:
llm_config = LMConfig(
provider=args.provider, model=args.model, mode=args.mode
)
if args.provider == "openai":
llm_config.gen_config["temperature"] = args.temperature
llm_config.gen_config["top_p"] = args.top_p
llm_config.gen_config["context_length"] = args.context_length
llm_config.gen_config["max_tokens"] = args.max_tokens
llm_config.gen_config["stop_token"] = args.stop_token
llm_config.gen_config["max_obs_length"] = args.max_obs_length
llm_config.gen_config["max_retry"] = args.max_retry
elif args.provider == "huggingface":
llm_config.gen_config["temperature"] = args.temperature
llm_config.gen_config["top_p"] = args.top_p
llm_config.gen_config["max_new_tokens"] = args.max_tokens
llm_config.gen_config["stop_sequences"] = (
[args.stop_token] if args.stop_token else None
)
llm_config.gen_config["max_obs_length"] = args.max_obs_length
llm_config.gen_config["model_endpoint"] = args.model_endpoint
llm_config.gen_config["max_retry"] = args.max_retry
else:
raise NotImplementedError(f"provider {args.provider} not implemented")
return llm_config
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/llms/providers/hf_utils.py | exps/webarena_exp/llms/providers/hf_utils.py | from text_generation import Client # type: ignore
def generate_from_huggingface_completion(
prompt: str,
model_endpoint: str,
temperature: float,
top_p: float,
max_new_tokens: int,
stop_sequences: list[str] | None = None,
) -> str:
client = Client(model_endpoint, timeout=60)
generation: str = client.generate(
prompt=prompt,
temperature=temperature,
top_p=top_p,
max_new_tokens=max_new_tokens,
stop_sequences=stop_sequences,
).generated_text
return generation
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/llms/providers/openai_utils.py | exps/webarena_exp/llms/providers/openai_utils.py | """Tools to generate from OpenAI prompts.
Adopted from https://github.com/zeno-ml/zeno-build/"""
import asyncio
import logging
import os
import random
import time
from typing import Any
import aiolimiter
import openai
import openai.error
from tqdm.asyncio import tqdm_asyncio
def retry_with_exponential_backoff( # type: ignore
func,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = True,
max_retries: int = 0,
errors: tuple[Any] = (openai.error.RateLimitError,),
):
"""Retry a function with exponential backoff."""
def wrapper(*args, **kwargs): # type: ignore
# Initialize variables
num_retries = 0
delay = initial_delay
# Loop until a successful response or max_retries is hit or an exception is raised
while True:
try:
return func(*args, **kwargs)
# Retry on specified errors
except errors as e:
# Increment retries
num_retries += 1
# Check if max retries has been reached
if num_retries > max_retries:
raise Exception(
f"Maximum number of retries ({max_retries}) exceeded."
)
# Increment the delay
delay *= exponential_base * (1 + jitter * random.random())
print(f"Retrying in {delay} seconds.")
# Sleep for the delay
time.sleep(delay)
# Raise exceptions for any errors not specified
except Exception as e:
raise e
return wrapper
async def _throttled_openai_completion_acreate(
engine: str,
prompt: str,
temperature: float,
max_tokens: int,
top_p: float,
limiter: aiolimiter.AsyncLimiter,
) -> dict[str, Any]:
async with limiter:
for _ in range(3):
try:
return await openai.Completion.acreate( # type: ignore
engine=engine,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
)
except openai.error.RateLimitError:
logging.warning(
"OpenAI API rate limit exceeded. Sleeping for 10 seconds."
)
await asyncio.sleep(10)
except openai.error.APIError as e:
logging.warning(f"OpenAI API error: {e}")
break
return {"choices": [{"message": {"content": ""}}]}
async def agenerate_from_openai_completion(
prompts: list[str],
engine: str,
temperature: float,
max_tokens: int,
top_p: float,
context_length: int,
requests_per_minute: int = 300,
) -> list[str]:
"""Generate from OpenAI Completion API.
Args:
prompts: list of prompts
temperature: Temperature to use.
max_tokens: Maximum number of tokens to generate.
top_p: Top p to use.
context_length: Length of context to use.
requests_per_minute: Number of requests per minute to allow.
Returns:
List of generated responses.
"""
if "OPENAI_API_KEY" not in os.environ:
raise ValueError(
"OPENAI_API_KEY environment variable must be set when using OpenAI API."
)
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.organization = os.environ.get("OPENAI_ORGANIZATION", "")
limiter = aiolimiter.AsyncLimiter(requests_per_minute)
async_responses = [
_throttled_openai_completion_acreate(
engine=engine,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
limiter=limiter,
)
for prompt in prompts
]
responses = await tqdm_asyncio.gather(*async_responses)
return [x["choices"][0]["text"] for x in responses]
@retry_with_exponential_backoff
def generate_from_openai_completion(
prompt: str,
engine: str,
temperature: float,
max_tokens: int,
top_p: float,
context_length: int,
stop_token: str | None = None,
) -> str:
if "OPENAI_API_KEY" not in os.environ:
raise ValueError(
"OPENAI_API_KEY environment variable must be set when using OpenAI API."
)
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.organization = os.environ.get("OPENAI_ORGANIZATION", "")
response = openai.Completion.create( # type: ignore
prompt=prompt,
engine=engine,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
stop=[stop_token],
)
answer: str = response["choices"][0]["text"]
return answer
async def _throttled_openai_chat_completion_acreate(
model: str,
messages: list[dict[str, str]],
temperature: float,
max_tokens: int,
top_p: float,
limiter: aiolimiter.AsyncLimiter,
) -> dict[str, Any]:
async with limiter:
for _ in range(3):
try:
return await openai.ChatCompletion.acreate( # type: ignore
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
)
except openai.error.RateLimitError:
logging.warning(
"OpenAI API rate limit exceeded. Sleeping for 10 seconds."
)
await asyncio.sleep(10)
except asyncio.exceptions.TimeoutError:
logging.warning("OpenAI API timeout. Sleeping for 10 seconds.")
await asyncio.sleep(10)
except openai.error.APIError as e:
logging.warning(f"OpenAI API error: {e}")
break
return {"choices": [{"message": {"content": ""}}]}
async def agenerate_from_openai_chat_completion(
messages_list: list[list[dict[str, str]]],
engine: str,
temperature: float,
max_tokens: int,
top_p: float,
context_length: int,
requests_per_minute: int = 300,
) -> list[str]:
"""Generate from OpenAI Chat Completion API.
Args:
messages_list: list of message list
temperature: Temperature to use.
max_tokens: Maximum number of tokens to generate.
top_p: Top p to use.
context_length: Length of context to use.
requests_per_minute: Number of requests per minute to allow.
Returns:
List of generated responses.
"""
if "OPENAI_API_KEY" not in os.environ:
raise ValueError(
"OPENAI_API_KEY environment variable must be set when using OpenAI API."
)
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.organization = os.environ.get("OPENAI_ORGANIZATION", "")
limiter = aiolimiter.AsyncLimiter(requests_per_minute)
async_responses = [
_throttled_openai_chat_completion_acreate(
model=engine,
messages=message,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
limiter=limiter,
)
for message in messages_list
]
responses = await tqdm_asyncio.gather(*async_responses)
return [x["choices"][0]["message"]["content"] for x in responses]
@retry_with_exponential_backoff
def generate_from_openai_chat_completion(
messages: list[dict[str, str]],
model: str,
temperature: float,
max_tokens: int,
top_p: float,
context_length: int,
stop_token: str | None = None,
) -> str:
if "OPENAI_API_KEY" not in os.environ:
raise ValueError(
"OPENAI_API_KEY environment variable must be set when using OpenAI API."
)
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.organization = os.environ.get("OPENAI_ORGANIZATION", "")
response = openai.ChatCompletion.create( # type: ignore
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
stop=[stop_token] if stop_token else None,
)
answer: str = response["choices"][0]["message"]["content"]
return answer
@retry_with_exponential_backoff
# debug only
def fake_generate_from_openai_chat_completion(
messages: list[dict[str, str]],
model: str,
temperature: float,
max_tokens: int,
top_p: float,
context_length: int,
stop_token: str | None = None,
) -> str:
if "OPENAI_API_KEY" not in os.environ:
raise ValueError(
"OPENAI_API_KEY environment variable must be set when using OpenAI API."
)
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.organization = os.environ.get("OPENAI_ORGANIZATION", "")
answer = "Let's think step-by-step. This page shows a list of links and buttons. There is a search box with the label 'Search query'. I will click on the search box to type the query. So the action I will perform is \"click [60]\"."
return answer
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/environment_docker/webarena-homepage/app.py | exps/webarena_exp/environment_docker/webarena-homepage/app.py | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index() -> str:
return render_template("index.html")
@app.route("/scratchpad.html")
def scratchpad() -> str:
return render_template("scratchpad.html")
@app.route("/calculator.html")
def calculator() -> str:
return render_template("calculator.html")
@app.route("/password.html")
def password() -> str:
return render_template("password.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=4399)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/evaluator.py | exps/webarena_exp/agent/evaluator.py | import os
from typing import Any
from gradio_client import Client
from browser_env import Trajectory
import numpy as np
import tempfile
from PIL import Image
from typing import Union, Literal
import time
from agent_eval.clients import LM_Client, GPT4V_Client
from agent_eval.eval.evaluator import Evaluator
import multiprocessing as mp
import re
import random
OAI_KEY = os.getenv("OPENAI_API_KEY")
CAPTION_CLIENT_URLS = os.getenv("CAPTION_CLIENT_URLS")
CAPTION_CLIENT_URLS = CAPTION_CLIENT_URLS.split(" ") if CAPTION_CLIENT_URLS else []
class GUIAgentEvaluator:
def __init__(
self,
result_path: str,
model_type: str = "mixtral",
prompt_version: str = "final-v2",
):
self.model_type = model_type
self.prompt_version = prompt_version
self.client_urls = CAPTION_CLIENT_URLS
self.num_caption_clients = len(CAPTION_CLIENT_URLS)
self.caption_clients = [Client(url) for url in CAPTION_CLIENT_URLS]
self.lm_clients = {
"gpt-3.5": LM_Client(api_key=OAI_KEY, model_name="gpt-3.5"),
"gpt-4": LM_Client(api_key=OAI_KEY, model_name="gpt-4"),
"mixtral": LM_Client(api_key="<removed>", model_name="mixtral"),
"gpt-4v": GPT4V_Client(api_key=OAI_KEY),
}
self.evaluator = Evaluator(self.lm_clients, log_save_path=result_path)
def caption(self, image: Union[str, Image.Image, np.array], idx: int = -1) -> str:
start_t = time.time()
# print(f"captioning image {idx}")
if idx < 0:
client = random.choice(self.caption_clients)
else:
client = self.caption_clients[idx % self.num_caption_clients]
if isinstance(image, str):
caption = client.predict(image, api_name="/predict")
else:
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
with tempfile.NamedTemporaryFile(suffix=".png") as f:
print(f.name)
image.save(f.name)
caption = client.predict(f.name, api_name="/predict")
print(f"captioning took {time.time() - start_t}")
# print(caption)
return caption
def __call__(self, records: dict) -> tuple[float, str]:
# (s, a, s, a, ...)
# if len(records["steps"]) == 1:
# captions = [self.caption(records["steps"][-1]["img"], idx=0)]
# else:
# pool = mp.Pool(2)
# jobs = [
# pool.apply_async(self.caption, args=(step["img"], idx))
# for idx, step in enumerate(records["steps"][-2:])
# ]
# pool.close()
# captions = [job.get() for job in jobs]
# Note: we only caption the last two observations
if "captions" not in records and self.model_type != "gpt-4v":
captions = [self.caption(s["img"]) for s in records["steps"][-2:]]
records["captions"] = captions
records["actions"] = self._get_actions(records)
records["traj_name"] = f"eval_{records['uid']}_{records['trail_idx']}"
records["image_paths"] = [step["img"] for step in records["steps"]]
if self.model_type == "gpt-4v":
records["images"] = []
for img_path in records["image_paths"]:
with Image.open(img_path) as img:
records["images"].append(np.array(img))
out, _ = self.evaluator(records, self.model_type, self.prompt_version)
print(out)
if "success" in out["status"]:
# return 0, "FAILED"
return 1, out["thoughts"]
else:
# return 1, "PASSED"
return 0, out["thoughts"]
def _get_actions(self, dp):
actions = []
for step in dp["steps"]:
action = None
try:
raw_action = step["other"]["raw_action"]
splits = raw_action.split(" ")
if not splits:
action = raw_action.replace("_", " ")
elif splits[0] == "click":
element_str = " ".join(splits[6:])
action = f"click at [{element_str}]"
elif splits[0] in ["scroll", "stop"]:
action = raw_action
elif splits[0] == "type":
matches = re.findall(r"\[(.*?)\]", raw_action)
typed = matches[1].strip()
last_bracket_pos = raw_action.rfind("]")
element_str = raw_action[last_bracket_pos + 1 :].strip()
action = f"type [{typed}] at [{element_str}]"
else:
action = raw_action
except Exception as e:
print("Error in extracting acrtion", e, step)
actions.append(action)
return actions
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/__init__.py | exps/webarena_exp/agent/__init__.py | from .agent import (
Agent,
PromptAgent,
TeacherForcingAgent,
construct_agent,
)
__all__ = ["Agent", "TeacherForcingAgent", "PromptAgent", "construct_agent"]
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/agent.py | exps/webarena_exp/agent/agent.py | import argparse
import json
from typing import Any
import tiktoken
from beartype import beartype
from agent.prompts import *
from browser_env import Trajectory
from browser_env.actions import (
Action,
ActionParsingError,
create_id_based_action,
create_none_action,
create_playwright_action,
)
from browser_env.utils import Observation, StateInfo
from llms import (
call_llm,
generate_from_huggingface_completion,
generate_from_openai_chat_completion,
generate_from_openai_completion,
lm_config,
)
from llms.tokenizers import Tokenizer
from agent.evaluator import GUIAgentEvaluator
from pprint import pprint
class Agent:
"""Base class for the agent"""
def __init__(self, *args: Any) -> None:
pass
def next_action(
self, trajectory: Trajectory, intent: str, meta_data: Any
) -> Action:
"""Predict the next action given the observation"""
raise NotImplementedError
def reset(
self,
test_config_file: str,
) -> None:
raise NotImplementedError
class TeacherForcingAgent(Agent):
"""Agent that follows a pre-defined action sequence"""
def __init__(self) -> None:
super().__init__()
def set_action_set_tag(self, tag: str) -> None:
self.action_set_tag = tag
def set_actions(self, action_seq: str | list[str]) -> None:
if isinstance(action_seq, str):
action_strs = action_seq.strip().split("\n")
else:
action_strs = action_seq
action_strs = [a.strip() for a in action_strs]
actions = []
for a_str in action_strs:
try:
if self.action_set_tag == "playwright":
cur_action = create_playwright_action(a_str)
elif self.action_set_tag == "id_accessibility_tree":
cur_action = create_id_based_action(a_str)
else:
raise ValueError(
f"Unknown action type {self.action_set_tag}"
)
except ActionParsingError as e:
cur_action = create_none_action()
cur_action["raw_prediction"] = a_str
actions.append(cur_action)
self.actions: list[Action] = actions
def next_action(
self, trajectory: Trajectory, intent: str, meta_data: Any
) -> Action:
"""Predict the next action given the observation"""
return self.actions.pop(0)
def reset(
self,
test_config_file: str,
) -> None:
with open(test_config_file) as f:
ref_actions = json.load(f)["reference_action_sequence"]
tag = ref_actions["action_set_tag"]
action_seq = ref_actions["action_sequence"]
self.set_action_set_tag(tag)
self.set_actions(action_seq)
class PromptAgent(Agent):
"""prompt-based agent that emits action given the history"""
@beartype
def __init__(
self,
action_set_tag: str,
lm_config: lm_config.LMConfig,
prompt_constructor: PromptConstructor,
) -> None:
super().__init__()
self.lm_config = lm_config
self.prompt_constructor = prompt_constructor
self.action_set_tag = action_set_tag
def set_action_set_tag(self, tag: str) -> None:
self.action_set_tag = tag
@beartype
def next_action(
self, trajectory: Trajectory, intent: str, meta_data: dict[str, Any]
) -> Action:
prompt = self.prompt_constructor.construct(
trajectory, intent, meta_data
)
lm_config = self.lm_config
n = 0
while True:
response = call_llm(lm_config, prompt)
force_prefix = self.prompt_constructor.instruction[
"meta_data"
].get("force_prefix", "")
response = f"{force_prefix}{response}"
n += 1
try:
parsed_response = self.prompt_constructor.extract_action(
response
)
if self.action_set_tag == "id_accessibility_tree":
action = create_id_based_action(parsed_response)
elif self.action_set_tag == "playwright":
action = create_playwright_action(parsed_response)
else:
raise ValueError(
f"Unknown action type {self.action_set_tag}"
)
action["raw_prediction"] = response
break
except ActionParsingError as e:
if n >= lm_config.gen_config["max_retry"]:
action = create_none_action()
action["raw_prediction"] = response
break
return action
def reset(self, test_config_file: str) -> None:
pass
class ReflexionAgent(Agent):
"""prompt-based agent that emits action given the history"""
@beartype
def __init__(
self,
action_set_tag: str,
lm_config: lm_config.LMConfig,
action_prompt_constructor: PromptConstructor,
reflexion_prompt_constructor: PromptConstructor,
evaluator_type: str,
result_path: str = None,
eval_lm_model: str = None,
eval_prompt_version: str = None,
) -> None:
super().__init__()
self.lm_config = lm_config
self.action_prompt_constructor = action_prompt_constructor
self.reflexion_prompt_constructor = reflexion_prompt_constructor
self.action_set_tag = action_set_tag
self.evaluator_type = evaluator_type
if self.evaluator_type == "model":
self.evaluator = GUIAgentEvaluator(result_path, eval_lm_model, eval_prompt_version)
else:
self.evaluator = None
def set_action_set_tag(self, tag: str) -> None:
self.action_set_tag = tag
@beartype
def next_action(
self, trajectory: Trajectory, intent: str, meta_data: dict[str, Any]
) -> Action:
prompt = self.action_prompt_constructor.construct(
trajectory, intent, meta_data
)
lm_config = self.lm_config
n = 0
while True:
response = call_llm(lm_config, prompt)
# print("------- Action Prediction -------")
# print("[AP] PROMPT")
# pprint(prompt)
# print("[AP] RESPONSE\n", response)
force_prefix = self.action_prompt_constructor.instruction[
"meta_data"
].get("force_prefix", "")
response = f"{force_prefix}{response}"
n += 1
try:
parsed_response = self.action_prompt_constructor.extract_action(
response
)
if self.action_set_tag == "id_accessibility_tree":
action = create_id_based_action(parsed_response)
elif self.action_set_tag == "playwright":
action = create_playwright_action(parsed_response)
else:
raise ValueError(
f"Unknown action type {self.action_set_tag}"
)
action["raw_prediction"] = response
break
except ActionParsingError as e:
if n >= lm_config.gen_config["max_retry"]:
action = create_none_action()
action["raw_prediction"] = response
break
return action
def generate_reflection(self, records: dict) -> str:
prompt = self.reflexion_prompt_constructor.construct(records)
lm_config = self.lm_config
response = ""
n = 0
while True:
response = call_llm(lm_config, prompt)
# print("------- Reflection Generation -------")
# print("[RG] PROMPT")
# pprint(prompt)
# print("[RG] RESPONSE\n", response)
force_prefix = self.reflexion_prompt_constructor.instruction[
"meta_data"
].get("force_prefix", "")
response = f"{force_prefix}{response}"
n += 1
if response:
break
if n >= lm_config.gen_config["max_retry"]:
break
return response
def reset(self, test_config_file: str) -> None:
pass
def construct_agent(args: argparse.Namespace) -> Agent:
llm_config = lm_config.construct_llm_config(args)
agent: Agent
if args.agent_type == "teacher_forcing":
agent = TeacherForcingAgent()
elif args.agent_type == "prompt":
with open(args.instruction_path) as f:
constructor_type = json.load(f)["meta_data"]["prompt_constructor"]
tokenizer = Tokenizer(args.provider, args.model)
prompt_constructor = eval(constructor_type)(
args.instruction_path, lm_config=llm_config, tokenizer=tokenizer
)
agent = PromptAgent(
action_set_tag=args.action_set_tag,
lm_config=llm_config,
prompt_constructor=prompt_constructor,
)
elif args.agent_type == "reflexion":
# with open(args.instruction_path) as f:
# constructor_type = json.load(f)["meta_data"]["prompt_constructor"]
constructor_type = "ReflexionPromptConstructor"
tokenizer = Tokenizer(args.provider, args.model)
action_prompt_constructor = eval(constructor_type)(
args.instruction_path, lm_config=llm_config, tokenizer=tokenizer
)
reflection_gen_prompt_constructor = ReflectionGenerationPromptConstructor(
"agent/prompts/jsons/reflexion_generation.json", lm_config=llm_config, tokenizer=tokenizer
)
eval_save_path = Path(args.result_dir) / "eval"
eval_save_path.mkdir(parents=True, exist_ok=True)
agent = ReflexionAgent(
action_set_tag=args.action_set_tag,
lm_config=llm_config,
action_prompt_constructor=action_prompt_constructor,
reflexion_prompt_constructor=reflection_gen_prompt_constructor,
evaluator_type=args.reflexion_evaluator,
result_path=str(eval_save_path),
eval_lm_model=args.eval_lm_model,
eval_prompt_version=args.eval_prompt_version,
)
else:
raise NotImplementedError(
f"agent type {args.agent_type} not implemented"
)
return agent
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/prompt_constructor.py | exps/webarena_exp/agent/prompts/prompt_constructor.py | import json
import re
from pathlib import Path
from typing import Any, TypedDict
from browser_env import Action, ActionParsingError, Trajectory
from browser_env.env_config import URL_MAPPINGS
from browser_env.utils import StateInfo
from llms import lm_config
from llms.tokenizers import Tokenizer
from llms.utils import APIInput
class Instruction(TypedDict):
"""Instruction for constructing prompt"""
intro: str
examples: list[tuple[str, str]]
template: str
meta_data: dict[str, Any]
class PromptConstructor(object):
def __init__(
self,
instruction_path: str | Path,
lm_config: lm_config.LMConfig,
tokenizer: Tokenizer,
):
self.instruction_path = Path(instruction_path)
self.obs_modality = "text"
self.lm_config = lm_config
instruction = json.load(open(self.instruction_path))
instruction["examples"] = [tuple(e) for e in instruction["examples"]]
self.instruction: Instruction = instruction
self.tokenizer = tokenizer
def get_lm_api_input(
self, intro: str, examples: list[tuple[str, str]], current: str
) -> APIInput:
"""Return the require format for an API"""
message: list[dict[str, str]] | str
if "openai" in self.lm_config.provider:
if self.lm_config.mode == "chat":
message = [{"role": "system", "content": intro}]
for (x, y) in examples:
message.append(
{
"role": "user",
"name": "example_user",
"content": x,
}
)
message.append(
{
"role": "assistant",
"name": "example_assistant",
"content": y,
}
)
message.append({"role": "user", "content": current})
return message
elif self.lm_config.mode == "completion":
message = f"{intro}\n\n"
message += "Here are a few examples:\n"
for example in examples:
message += f"Observation\n:{example[0]}\n\n"
message += f"Action: {example[1]}\n\n"
message += "Now make prediction given the observation\n\n"
message += f"Observation\n:{current}\n\n"
message += "Action:"
return message
else:
raise ValueError(
f"OpenAI models do not support mode {self.lm_config.mode}"
)
elif "huggingface" in self.lm_config.provider:
# https://huggingface.co/blog/llama2#how-to-prompt-llama-2
# https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L320
if "Llama-2" in self.lm_config.model:
if self.lm_config.mode == "chat":
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
BOS, EOS = "<s>", "</s>"
# adding the system message to be the starting of the first example
examples = [
(
B_SYS + intro + E_SYS + examples[0][0],
examples[0][1],
)
] + examples[1:]
message = "".join(
[
f"{BOS}{B_INST} {x.strip()} {E_INST} {y.strip()} {EOS}"
for (x, y) in examples
]
)
# add the current observation
message += f"{BOS}{B_INST} {current.strip()} {E_INST} {self.instruction['meta_data'].get('force_prefix', '')}"
return message
else:
raise ValueError("Only chat mode is supported for Llama-2")
else:
raise ValueError(
f"Huggingface models do not support model_tag {self.lm_config.gen_config['model_tag']}"
)
else:
raise NotImplementedError(
f"Provider {self.lm_config.provider} not implemented"
)
def construct(
self,
trajectory: Trajectory,
intent: str,
meta_data: dict[str, Any] = {},
) -> APIInput:
raise NotImplementedError
def map_url_to_real(self, url: str) -> str:
"""Map the urls to their real world counterparts"""
for i, j in URL_MAPPINGS.items():
if i in url:
url = url.replace(i, j)
return url
def map_url_to_local(self, url: str) -> str:
"""Map the urls to their local counterparts"""
for i, j in URL_MAPPINGS.items():
if j in url:
url = url.replace(j, i)
# https
if j.replace("http", "https") in url:
url = url.replace(j.replace("http", "https"), i)
return url
def _extract_action(self, response: str) -> str:
raise NotImplementedError
def extract_action(self, response: str) -> str:
response = self._extract_action(response)
response = self.map_url_to_local(response)
return response
class DirectPromptConstructor(PromptConstructor):
"""The agent will direct predict the action"""
def __init__(
self,
instruction_path: str | Path,
lm_config: lm_config.LMConfig,
tokenizer: Tokenizer,
):
super().__init__(instruction_path, lm_config, tokenizer)
def construct(
self,
trajectory: Trajectory,
intent: str,
meta_data: dict[str, Any] = {},
) -> APIInput:
"""Construct prompt given the trajectory"""
intro = self.instruction["intro"]
examples = self.instruction["examples"]
template = self.instruction["template"]
keywords = self.instruction["meta_data"]["keywords"]
state_info: StateInfo = trajectory[-1] # type: ignore[assignment]
obs = state_info["observation"][self.obs_modality]
max_obs_length = self.lm_config.gen_config["max_obs_length"]
if max_obs_length:
obs = self.tokenizer.decode(self.tokenizer.encode(obs)[:max_obs_length]) # type: ignore[arg-type]
page = state_info["info"]["page"]
url = page.url
previous_action_str = meta_data["action_history"][-1]
# input x
current = template.format(
objective=intent,
url=self.map_url_to_real(url),
observation=obs,
previous_action=previous_action_str,
)
# make sure all keywords are replaced
assert all([f"{{k}}" not in current for k in keywords])
prompt = self.get_lm_api_input(intro, examples, current)
return prompt
def _extract_action(self, response: str) -> str:
action_splitter = self.instruction["meta_data"]["action_splitter"]
pattern = rf"{action_splitter}((.|\n)*?){action_splitter}"
match = re.search(pattern, response)
if match:
return match.group(1).strip()
else:
raise ActionParsingError(
f"Cannot parse action from response {response}"
)
class CoTPromptConstructor(PromptConstructor):
"""The agent will perform step-by-step reasoning before the answer"""
def __init__(
self,
instruction_path: str | Path,
lm_config: lm_config.LMConfig,
tokenizer: Tokenizer,
):
super().__init__(instruction_path, lm_config, tokenizer)
self.answer_phrase = self.instruction["meta_data"]["answer_phrase"]
def construct(
self,
trajectory: Trajectory,
intent: str,
meta_data: dict[str, Any] = {},
) -> APIInput:
intro = self.instruction["intro"]
examples = self.instruction["examples"]
template = self.instruction["template"]
keywords = self.instruction["meta_data"]["keywords"]
state_info: StateInfo = trajectory[-1] # type: ignore[assignment]
obs = state_info["observation"][self.obs_modality]
max_obs_length = self.lm_config.gen_config["max_obs_length"]
if max_obs_length:
obs = self.tokenizer.decode(self.tokenizer.encode(obs)[:max_obs_length]) # type: ignore[arg-type]
page = state_info["info"]["page"]
url = page.url
previous_action_str = meta_data["action_history"][-1]
current = template.format(
objective=intent,
url=self.map_url_to_real(url),
observation=obs,
previous_action=previous_action_str,
)
assert all([f"{{k}}" not in current for k in keywords])
prompt = self.get_lm_api_input(intro, examples, current)
return prompt
def _extract_action(self, response: str) -> str:
# find the first occurence of action
action_splitter = self.instruction["meta_data"]["action_splitter"]
pattern = rf"{action_splitter}((.|\n)*?){action_splitter}"
match = re.search(pattern, response)
if match:
return match.group(1).strip()
else:
raise ActionParsingError(
f'Cannot find the answer phrase "{self.answer_phrase}" in "{response}"'
)
class ReflexionPromptConstructor(CoTPromptConstructor):
"""The agent will perform step-by-step reasoning before the answer, with reflexion"""
def __init__(
self,
instruction_path: str | Path,
lm_config: lm_config.LMConfig,
tokenizer: Tokenizer,
):
super().__init__(instruction_path, lm_config, tokenizer)
self.answer_phrase = self.instruction["meta_data"]["answer_phrase"]
def construct(
self,
trajectory: Trajectory,
intent: str,
meta_data: dict[str, Any] = {},
) -> APIInput:
intro = self.instruction["intro"]
examples = self.instruction["examples"]
template = self.instruction["template"]
keywords = self.instruction["meta_data"]["keywords"]
state_info: StateInfo = trajectory[-1] # type: ignore[assignment]
obs = state_info["observation"][self.obs_modality]
max_obs_length = self.lm_config.gen_config["max_obs_length"]
if max_obs_length:
obs = self.tokenizer.decode(self.tokenizer.encode(obs)[:max_obs_length]) # type: ignore[arg-type]
page = state_info["info"]["page"]
url = page.url
previous_action_str = meta_data["action_history"][-1]
memory = ""
if "memory" in meta_data:
memory = "\n" + "\n".join(meta_data["memory"])
current = template.format(
objective=intent,
url=self.map_url_to_real(url),
observation=obs,
previous_action=previous_action_str,
memory=memory,
)
assert all([f"{{k}}" not in current for k in keywords])
prompt = self.get_lm_api_input(intro, examples, current)
return prompt
def _extract_action(self, response: str) -> str:
# find the first occurence of action
action_splitter = self.instruction["meta_data"]["action_splitter"]
pattern = rf"{action_splitter}((.|\n)*?){action_splitter}"
match = re.search(pattern, response)
if match:
return match.group(1).strip()
else:
raise ActionParsingError(
f'Cannot find the answer phrase "{self.answer_phrase}" in "{response}"'
)
class ReflectionGenerationPromptConstructor(CoTPromptConstructor):
"""Conditioned on the trajectory, the agent will generate the reflection"""
def __init__(
self,
instruction_path: str | Path,
lm_config: lm_config.LMConfig,
tokenizer: Tokenizer,
):
super().__init__(instruction_path, lm_config, tokenizer)
def construct(self, records: dict[str, Any]) -> APIInput:
intro = self.instruction["intro"]
examples = self.instruction["examples"]
template = self.instruction["template"]
keywords = self.instruction["meta_data"]["keywords"]
obs_and_action = ""
for idx, step in enumerate(records["steps"]):
obs = step["accessibility_tree"] # TODO: support other obs_modality
max_obs_length = self.lm_config.gen_config["max_obs_length"]
if max_obs_length:
obs = self.tokenizer.decode(self.tokenizer.encode(obs)[:max_obs_length]) # type: ignore[arg-type]
url = step["url"]
action_str = step["other"]["raw_action"]
obs_and_action += f"OBSERVATION {idx}:\nURL: {url}\n{obs}\n\n"
obs_and_action += f"ACTION {idx}:\n{action_str}\n\n"
if not records.get("memory", []):
memory = "none"
else:
memory = "\n"
for trail_idx, reflection in enumerate(records["memory"]):
memory += f"TRIAL {trail_idx}:\n{reflection}\n"
status = records["status"]
current = template.format(
objective=records["intent"],
trajectory=obs_and_action,
status=status,
memory=memory,
)
assert all([f"{{k}}" not in current for k in keywords])
prompt = self.get_lm_api_input(intro, examples, current)
return prompt
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/to_json.py | exps/webarena_exp/agent/prompts/to_json.py | import glob
import importlib
import json
import os
# use the current directory as the root
def run() -> None:
"""Convert all python files in agent/prompts to json files in agent/prompts/jsons
Python files are easiser to edit
"""
for p_file in glob.glob(f"agent/prompts/raw/*.py"):
# import the file as a module
base_name = os.path.basename(p_file).replace(".py", "")
module = importlib.import_module(f"agent.prompts.raw.{base_name}")
prompt = module.prompt
# save the prompt as a json file
os.makedirs("agent/prompts/jsons", exist_ok=True)
with open(f"agent/prompts/jsons/{base_name}.json", "w+") as f:
json.dump(prompt, f, indent=2)
print(f"Done convert python files to json")
if __name__ == "__main__":
run()
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/__init__.py | exps/webarena_exp/agent/prompts/__init__.py | from .prompt_constructor import *
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/raw/p_cot_id_actree_2s_no_na.py | exps/webarena_exp/agent/prompts/raw/p_cot_id_actree_2s_no_na.py | prompt = {
"intro": """You are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.
Here's the information you'll have:
The user's objective: This is the task you're trying to complete.
The current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.
The current web page's URL: This is the page you're currently navigating.
The open tabs: These are the tabs you have open.
The previous action: This is the action you just performed. It may be helpful to track your progress.
The actions you can perform fall into several categories:
Page Operation Actions:
`click [id]`: This action clicks on an element with a specific id on the webpage.
`type [id] [content] [press_enter_after=0|1]`: Use this to type the content into the field with id. By default, the "Enter" key is pressed after typing unless press_enter_after is set to 0.
`hover [id]`: Hover over an element with id.
`press [key_comb]`: Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).
`scroll [direction=down|up]`: Scroll the page up or down.
Tab Management Actions:
`new_tab`: Open a new, empty browser tab.
`tab_focus [tab_index]`: Switch the browser's focus to a specific tab using its index.
`close_tab`: Close the currently active tab.
URL Navigation Actions:
`goto [url]`: Navigate to a specific URL.
`go_back`: Navigate to the previously viewed page.
`go_forward`: Navigate to the next page (if a previous 'go_back' action was performed).
Completion Action:
`stop [answer]`: Issue this action when you believe the task is complete. If the objective is to find a text-based answer, provide the answer in the bracket.
Homepage:
If you want to visit other websites, check out the homepage at http://homepage.com. It has a list of websites you can visit.
http://homepage.com/password.html lists all the account name and password for the websites. You can use them to log in to the websites.
To be successful, it is very important to follow the following rules:
1. You should only issue an action that is valid given the current observation
2. You should only issue one action at a time.
3. You should follow the examples to reason step by step and then issue the next action.
4. Generate the action in the correct format. Start with a "In summary, the next action I will perform is" phrase, followed by action inside ``````. For example, "In summary, the next action I will perform is ```click [1234]```".
5. Issue stop action when you think you have achieved the objective. Don't generate anything after stop.""",
"examples": [
(
"""OBSERVATION:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
[1760] button 'Add to Wish List'
[1761] button 'Add to Compare'
URL: http://onestopmarket.com/office-products/office-electronics.html
OBJECTIVE: What is the price of HP Inkjet Fax Machine
PREVIOUS ACTION: None""",
"Let's think step-by-step. This page list the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. I think I have achieved the objective. I will issue the stop action with the answer. In summary, the next action I will perform is ```stop [$279.49]```",
),
(
"""OBSERVATION:
[164] textbox 'Search' focused: True required: False
[171] button 'Go'
[174] link 'Find directions between two points'
[212] heading 'Search Results'
[216] button 'Close'
URL: http://openstreetmap.org
OBJECTIVE: Show me the restaurants near CMU
PREVIOUS ACTION: None""",
"Let's think step-by-step. This page has a search box whose ID is [164]. According to the nominatim rule of openstreetmap, I can search for the restaurants near a location by \"restaurants near\". I can submit my typing by pressing the Enter afterwards. In summary, the next action I will perform is ```type [164] [restaurants near CMU] [1]```",
),
],
"template": """OBSERVATION:
{observation}
URL: {url}
OBJECTIVE: {objective}
PREVIOUS ACTION: {previous_action}""",
"meta_data": {
"observation": "accessibility_tree",
"action_type": "id_accessibility_tree",
"keywords": ["url", "objective", "observation", "previous_action"],
"prompt_constructor": "CoTPromptConstructor",
"answer_phrase": "In summary, the next action I will perform is",
"action_splitter": "```"
},
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/raw/reflexion_generation.py | exps/webarena_exp/agent/prompts/raw/reflexion_generation.py | prompt = {
"intro": """You are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.
Here's the information you'll have:
The user's objective: This is the task you're trying to complete.
The web page's accessibility tree: This is a simplified representation of the webpage, providing key information.
The web page's URL: This is the page you're currently navigating.
The open tabs: These are the tabs you have open.
The actions you can perform fall into several categories:
Page Operation Actions:
`click [id]`: This action clicks on an element with a specific id on the webpage.
`type [id] [content] [press_enter_after=0|1]`: Use this to type the content into the field with id. By default, the "Enter" key is pressed after typing unless press_enter_after is set to 0.
`hover [id]`: Hover over an element with id.
`press [key_comb]`: Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).
`scroll [direction=down|up]`: Scroll the page up or down.
Tab Management Actions:
`new_tab`: Open a new, empty browser tab.
`tab_focus [tab_index]`: Switch the browser's focus to a specific tab using its index.
`close_tab`: Close the currently active tab.
URL Navigation Actions:
`goto [url]`: Navigate to a specific URL.
`go_back`: Navigate to the previously viewed page.
`go_forward`: Navigate to the next page (if a previous 'go_back' action was performed).
Completion Action:
`stop [answer]`: Issue this action when you believe the task is complete. If the objective is to find a text-based answer, provide the answer in the bracket.
Now you are trying to evaluate your performance on a past task. You will be given the objective of the task, the history of interaction including the observations you had and the actions you issued, and the status of the task. You will also be given the memory of your previous attempts. Your goal is to think about the strategy and path you took to attempt to complete the task. Try to summarize the reason why you failed to complete the task, and devise a concise, new plan that accounts for your mistake and can be helpful when you are solving the same task. Try to think differently from the previous attempts. Try to focus on the key aspect and make the plan concise.
""",
"examples": [
(
"""OBJECTIVE: Compare the time for walking and driving route from AMC Waterfront to Carnegie Mellon University
OBSERVATION AND ACTION HISTORY:
OBSERVATION 0:
Tab 0 (current): Dashboard / Magento Admin
[1] RootWebArea 'Dashboard / Magento Admin' focused: True
[178] link 'Magento Admin Panel'
[201] img 'Magento Admin Panel'
[85] menubar '' orientation: horizontal
[87] link '\ue604 DASHBOARD'
[90] link '\ue60b SALES'
[96] link '\ue608 CATALOG'
[102] link '\ue603 CUSTOMERS'
[108] link '\ue609 MARKETING'
[114] link '\ue602 CONTENT'
[120] link '\ue60a REPORTS'
[138] link '\ue60d STORES'
[144] link '\ue610 SYSTEM'
[150] link '\ue612 FIND PARTNERS & EXTENSIONS'
[821] button 'System Messages: 1'
[902] StaticText 'One or more '
[903] link 'indexers are invalid'
[904] StaticText '. Make sure your '
[905] link 'Magento cron job'
[906] StaticText ' is running.'
[240] heading 'Dashboard'
[242] link '\ue600 admin'
[244] link '\ue607'
[913] textbox '\ue60c' required: False
[48] main ''
[219] StaticText 'Scope:'
[250] button 'All Store Views' hasPopup: menu
[253] link '\ue633 What is this?'
[226] button 'Reload Data'
[917] HeaderAsNonLandmark ''
[919] StaticText 'Advanced Reporting'
[920] StaticText "Gain new insights and take command of your business' performance, using our dynamic product, order, and customer reports tailored to your customer data."
[921] link 'Go to Advanced Reporting \ue644'
[924] StaticText 'Chart is disabled. To enable the chart, click '
[925] link 'here'
[1154] StaticText 'Revenue'
[1054] StaticText '$0.00'
[1155] StaticText 'Tax'
[1156] StaticText 'Shipping'
[1157] StaticText 'Quantity'
[1068] StaticText '0'
[57] tablist '' multiselectable: False orientation: horizontal
[59] tab 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... Bestsellers' expanded: True selected: True controls: grid_tab_ordered_products_content
[67] link 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... Bestsellers'
[61] tab 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... Most Viewed Products' expanded: False selected: False controls: ui-id-1
[69] link 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... Most Viewed Products'
[63] tab 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... New Customers' expanded: False selected: False controls: ui-id-2
[71] link 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... New Customers'
[65] tab 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... Customers' expanded: False selected: False controls: ui-id-3
[73] link 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... Customers'
[79] tabpanel 'The information in this tab has been changed. This tab contains invalid data. Please resolve this before saving. Loading... Bestsellers'
[1088] table ''
[1158] row ''
[1159] columnheader 'Product' required: False
[1160] columnheader 'Price' required: False
[1161] columnheader 'Quantity' required: False
[1162] row 'http://localhost:7780/admin/catalog/product/edit/id/29/'
[1167] gridcell 'Sprite Stasis Ball 65 cm' required: False
[1168] gridcell '$27.00' required: False
[1169] gridcell '6' required: False
[930] StaticText 'Lifetime Sales'
[933] StaticText '$0.00'
[937] StaticText 'Average Order'
[944] StaticText 'Last Orders'
[945] table ''
[979] row ''
[980] columnheader 'Customer' required: False
[981] columnheader 'Items' required: False
[982] columnheader 'Total' required: False
[983] row 'http://localhost:7780/admin/sales/order/view/order_id/299/'
[988] gridcell 'Sarah Miller' required: False
[989] gridcell '5' required: False
[990] gridcell '$194.40' required: False
[984] row 'http://localhost:7780/admin/sales/order/view/order_id/65/'
[991] gridcell 'Grace Nguyen' required: False
[992] gridcell '4' required: False
[993] gridcell '$190.00' required: False
ACTION 0: stop [N/A]
STATUS: FAILED
REFLECTIONS FROM PREVIOUS ATTEMPTS: none""",
"I think the task is impossible to complete, thus I issue the stop action. However, the task is not completed successfully, which means I am wrong. I think I should go to the \"REPORT\" tab and do a search there for the best-selling products next time."
),
(
"""OBJECTIVE: List out reviewers, if exist, who mention about good fingerprint resistant
OBSERVATION AND ACTION HISTORY:
OBSERVATION 0:
URL: http://localhost:7770/3-pack-samsung-galaxy-s6-screen-protector-nearpow-tempered-glass-screen-protector-with-9h-hardness-crystal-clear-easy-bubble-free-installation-scratch-resist.html
Tab 0 (current): [3 Pack] Samsung Galaxy S6 Screen Protector, Nearpow [Tempered Glass] Screen Protector with [9H Hardness] [Crystal Clear] [Easy Bubble-Free Installation] [Scratch Resist]
[1] RootWebArea '[3 Pack] Samsung Galaxy S6 Screen Protector, Nearpow [Tempered Glass] Screen Protector with [9H Hardness] [Crystal Clear] [Easy Bubble-Free Installation] [Scratch Resist]' focused: True
[1314] link 'My Account'
[1312] link 'My Wish List'
[1316] link 'Sign Out'
[1319] StaticText 'Welcome, Emma Lopez!'
[1220] link 'Skip to Content'
[1229] link 'store logo'
[1322] img 'one_stop_market_logo'
[1323] link '\ue611 My Cart'
[2246] StaticText 'Search'
[1508] combobox '\ue615 Search' autocomplete: both hasPopup: listbox required: False expanded: False
[2249] link 'Advanced Search'
[1511] button 'Search' disabled: True
[1096] tablist '' multiselectable: False orientation: horizontal
[1098] tabpanel ''
[40] menu '' orientation: vertical
[791] menuitem '\ue622 Beauty & Personal Care' hasPopup: menu
[856] menuitem '\ue622 Sports & Outdoors' hasPopup: menu
[866] menuitem '\ue622 Clothing, Shoes & Jewelry' hasPopup: menu
[880] menuitem '\ue622 Home & Kitchen' hasPopup: menu
[917] menuitem '\ue622 Office Products' hasPopup: menu
[925] menuitem '\ue622 Tools & Home Improvement' hasPopup: menu
[930] menuitem '\ue622 Health & Household' hasPopup: menu
[936] menuitem '\ue622 Patio, Lawn & Garden' hasPopup: menu
[941] menuitem '\ue622 Electronics' hasPopup: menu
[1002] menuitem '\ue622 Cell Phones & Accessories' hasPopup: menu
[1017] menuitem '\ue622 Video Games' hasPopup: menu
[1030] menuitem '\ue622 Grocery & Gourmet Food' hasPopup: menu
[1253] link 'Home'
[1256] StaticText '[3 Pack] Samsung Galaxy S6 Screen Protector, Nearpow [Tempered Glass] Screen Protector with [9H Hardness] [Crystal Clear] [Easy Bubble-Free Installation] [Scratch Resist]'
[5] main ''
[1257] heading '[3 Pack] Samsung Galaxy S6 Screen Protector, Nearpow [Tempered Glass] Screen Protector with [9H Hardness] [Crystal Clear] [Easy Bubble-Free Installation] [Scratch Resist]'
[11] generic 'Availability'
[13] StaticText 'IN STOCK'
[1331] StaticText 'SKU'
[1467] StaticText 'B01G31IYM0'
[1264] LayoutTable ''
[1469] StaticText 'Rating:'
[1334] generic '78%'
[2221] StaticText '% of'
[2224] StaticText '100'
[1335] link '12\xa0 Reviews '
[1336] link 'Add Your Review'
[1338] StaticText '$7.99'
[1279] LayoutTable ''
[1483] StaticText 'Qty'
[1484] spinbutton 'Qty' required: False valuemin: 0 valuemax: 0 valuetext:
[1485] button 'Add to Cart'
[1281] link 'Add to Wish List'
[1282] link 'Add to Compare'
[1287] link 'Skip to the end of the images gallery'
[1117] button 'Previous'
[1119] generic 'Image'
[2252] img 'Image'
[1118] button 'Next'
ACTION 0:
click [1335] where [1335] is [1335] link '12\xa0 Reviews '
OBSERVATION 1:
URL: http://localhost:7770/3-pack-samsung-galaxy-s6-screen-protector-nearpow-tempered-glass-screen-protector-with-9h-hardness-crystal-clear-easy-bubble-free-installation-scratch-resist.html
Tab 0 (current): [3 Pack] Samsung Galaxy S6 Screen Protector, Nearpow [Tempered Glass] Screen Protector with [9H Hardness] [Crystal Clear] [Easy Bubble-Free Installation] [Scratch Resist]
[1] RootWebArea '[3 Pack] Samsung Galaxy S6 Screen Protector, Nearpow [Tempered Glass] Screen Protector with [9H Hardness] [Crystal Clear] [Easy Bubble-Free Installation] [Scratch Resist]' focused: True
[5] main ''
[1349] StaticText 'Skip to the beginning of the images gallery'
[1106] tablist '' multiselectable: False orientation: horizontal
[1107] tab 'Details' expanded: False selected: False controls: description
[1350] link 'Details'
[1110] tab 'Reviews (12)' expanded: True selected: True controls: reviews
[1352] link 'Reviews (12)'
[2365] tabpanel 'Reviews (12)'
[2460] StaticText 'Customer Reviews'
[2555] StaticText "Best screen protectors I've used!"
[2519] LayoutTable ''
[2556] LayoutTableRow ''
[2699] LayoutTableCell 'Rating'
[2700] generic '100%'
[2559] StaticText 'It is super clear and fingerprint resistant. It was kind of hard trying to get it on, and I did get some hairs on the sticky side, but all in all it was great! Bubbles went away around the small hairs so you can barely tell they are there. They also give you tons of extra tools to help you clean the screen and get dust particles off of the screen before you put it on. I think it was just me being clumsy with all of the dust particles getting inside the screen.'
[2562] StaticText 'Review by '
[2564] StaticText 'Rachel'
[2567] StaticText 'Posted on '
[2568] time ''
[2701] StaticText '4/18/23'
[2569] StaticText 'Good screen protector for the money and good customer service'
[2522] LayoutTable ''
[2570] LayoutTableRow ''
[2702] LayoutTableCell 'Rating'
[2703] generic '80%'
[2573] StaticText 'This is the second time I have used this product. It is a little tricky to apply. I had it on my phone for about 10 months and had dropped my phone a few times without incident. The last drop shattered the protector but thankfully did what it was supposed to do and protected my phone screen. The second one in the package had a small chip in it, which caused it to have a hairline crack all the way through. I emailed the company and they were very quick to respond and sent a new one free of charge. I am very satisfied with the product and only give it a four star rating because it is sometimes very difficult to get out the bubbles. I have 2 very tiny specks that would just not come out.'
[2576] StaticText 'Review by '
[2578] StaticText 'chris'
[2581] StaticText 'Posted on '
[2582] time ''
[2704] StaticText '4/18/23'
[2583] StaticText 'Bubbles still there after a few days'
[2525] LayoutTable ''
[2584] LayoutTableRow ''
[2705] LayoutTableCell 'Rating'
[2706] generic '80%'
[2587] StaticText "OK, so my first impression was, wow it worked with only 1 small bubble. I was like OK, it's normal to have a few small bubbles. The description says that the small bubbles will disappear after a couple days. Well it's been over a week and the one small tiny bubble is still there. It never went away. Ugh I need to add this to my review. The glue does not last forever. It started to come off about a month after I put it on. The bad thing when it does start to come off, it's easy to take off the screen protectant."
ACTION 1:
stop [Rachel]
STATUS: FAILED
REFLECTIONS FROM PREVIOUS ATTEMPTS: none""",
"I find the review from Rachel, which is the answer to the objective. I issue the stop action with the answer. However, the task is not completed successfully. This might be because I missed other reviews that also mention about good fingerprint resistant. I think I should read all the reviews next time."
),
],
"template": """OBJECTIVE: {objective}
OBSERVATION AND ACTION HISTORY:
{trajectory}
STATUS: {status}
REFLECTIONS FROM PREVIOUS ATTEMPTS: {memory}""",
"meta_data": {
"observation": "accessibility_tree",
"action_type": "id_accessibility_tree",
"keywords": ["objective", "trajectory", "status", "memory"],
"prompt_constructor": "ReflectionGenerationPromptConstructor",
"answer_phrase": "",
"action_splitter": "```"
},
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/raw/p_direct_id_actree_3s_llama.py | exps/webarena_exp/agent/prompts/raw/p_direct_id_actree_3s_llama.py | prompt = {
"intro": """You are an autonomous intelligent agent tasked with navigating a web browser. The actions you can perform fall into several categories:
Page Operation Actions:
`click [id]`: This action clicks on an element with a specific id on the webpage.
`type [id] [content] [press_enter_after=0|1]`: Use this to type the content into the field with id. By default, the "Enter" key is pressed after typing unless press_enter_after is set to 0.
`hover [id]`: Hover over an element with id.
`press [key_comb]`: Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).
`scroll [direction=down|up]`: Scroll the page up or down.
Tab Management Actions:
`new_tab`: Open a new, empty browser tab.
`tab_focus [tab_index]`: Switch the browser's focus to a specific tab using its index.
`close_tab`: Close the currently active tab.
URL Navigation Actions:
`goto [url]`: Navigate to a specific URL.
`go_back`: Navigate to the previously viewed page.
`go_forward`: Navigate to the next page (if a previous 'go_back' action was performed).
Completion Action:
`stop [answer]`: Issue this action when you believe the task is complete. If the objective is to find a text-based answer, provide the answer in the bracket.
Homepage:
If you want to visit other websites, check out the homepage at http://homepage.com. It has a list of websites you can visit.
You can only issue one action at a time""",
"examples": [
(
"""Observation:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
[1760] button 'Add to Wish List'
[1761] button 'Add to Compare'
URL: http://onestopmarket.com/office-products/office-electronics.html
Objective: What is the price of HP Inkjet Fax Machine
Previous action: None""",
"```stop [$279.49]```",
),
(
"""Observation:
[164] textbox 'Search' focused: True required: False
[171] button 'Go'
[174] link 'Find directions between two points'
[212] heading 'Search Results'
[216] button 'Close'
URL: http://openstreetmap.org
Objective: Show me the restaurants near CMU
Previous action: None""",
"```type [164] [restaurants near CMU] [1]```",
),
(
"""Observation:
[2036] button 'Sort by: New' hasPopup: menu expanded: False
[587] link 'US Marine’s adoption of Afghan war orphan voided'
[989] time 'March 30, 2023 at 15:03:48 AM UTC'
[602] link 'York student uses AI chatbot to get parking fine revoked'
[1025] time 'March 15, 2023 at 7:48:34 AM UTC'
[617] link 'Loveland parents furious after teachers leave, communication lagged during school threat investigation'
[1025] time 'March 2, 2023 at 3:46:01 AM UTC'
URL: http://reddit.com/f/news/new
Objective: Open the most recent post that was published prior to March 1st.
Previous action: None""",
"```scroll [down]```",
)
],
"template": """Observation:
{observation}
URL: {url}
Objective: {objective}
Previous action: {previous_action}""",
"meta_data": {
"observation": "accessibility_tree",
"action_type": "id_accessibility_tree",
"keywords": ["url", "objective", "observation", "previous_action"],
"prompt_constructor": "DirectPromptConstructor",
"answer_phrase": "In summary, the next action I will perform is",
"action_splitter": "```",
"force_prefix": "```"
},
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/raw/p_cot_id_actree_2s.py | exps/webarena_exp/agent/prompts/raw/p_cot_id_actree_2s.py | prompt = {
"intro": """You are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.
Here's the information you'll have:
The user's objective: This is the task you're trying to complete.
The current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.
The current web page's URL: This is the page you're currently navigating.
The open tabs: These are the tabs you have open.
The previous action: This is the action you just performed. It may be helpful to track your progress.
The actions you can perform fall into several categories:
Page Operation Actions:
`click [id]`: This action clicks on an element with a specific id on the webpage.
`type [id] [content] [press_enter_after=0|1]`: Use this to type the content into the field with id. By default, the "Enter" key is pressed after typing unless press_enter_after is set to 0.
`hover [id]`: Hover over an element with id.
`press [key_comb]`: Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).
`scroll [direction=down|up]`: Scroll the page up or down.
Tab Management Actions:
`new_tab`: Open a new, empty browser tab.
`tab_focus [tab_index]`: Switch the browser's focus to a specific tab using its index.
`close_tab`: Close the currently active tab.
URL Navigation Actions:
`goto [url]`: Navigate to a specific URL.
`go_back`: Navigate to the previously viewed page.
`go_forward`: Navigate to the next page (if a previous 'go_back' action was performed).
Completion Action:
`stop [answer]`: Issue this action when you believe the task is complete. If the objective is to find a text-based answer, provide the answer in the bracket. If you believe the task is impossible to complete, provide the answer as "N/A" in the bracket.
Homepage:
If you want to visit other websites, check out the homepage at http://homepage.com. It has a list of websites you can visit.
http://homepage.com/password.html lists all the account name and password for the websites. You can use them to log in to the websites.
To be successful, it is very important to follow the following rules:
1. You should only issue an action that is valid given the current observation
2. You should only issue one action at a time.
3. You should follow the examples to reason step by step and then issue the next action.
4. Generate the action in the correct format. Start with a "In summary, the next action I will perform is" phrase, followed by action inside ``````. For example, "In summary, the next action I will perform is ```click [1234]```".
5. Issue stop action when you think you have achieved the objective. Don't generate anything after stop.""",
"examples": [
(
"""OBSERVATION:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
[1760] button 'Add to Wish List'
[1761] button 'Add to Compare'
URL: http://onestopmarket.com/office-products/office-electronics.html
OBJECTIVE: What is the price of HP Inkjet Fax Machine
PREVIOUS ACTION: None""",
"Let's think step-by-step. This page list the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. I think I have achieved the objective. I will issue the stop action with the answer. In summary, the next action I will perform is ```stop [$279.49]```",
),
(
"""OBSERVATION:
[164] textbox 'Search' focused: True required: False
[171] button 'Go'
[174] link 'Find directions between two points'
[212] heading 'Search Results'
[216] button 'Close'
URL: http://openstreetmap.org
OBJECTIVE: Show me the restaurants near CMU
PREVIOUS ACTION: None""",
"Let's think step-by-step. This page has a search box whose ID is [164]. According to the nominatim rule of openstreetmap, I can search for the restaurants near a location by \"restaurants near\". I can submit my typing by pressing the Enter afterwards. In summary, the next action I will perform is ```type [164] [restaurants near CMU] [1]```",
),
],
"template": """OBSERVATION:
{observation}
URL: {url}
OBJECTIVE: {objective}
PREVIOUS ACTION: {previous_action}""",
"meta_data": {
"observation": "accessibility_tree",
"action_type": "id_accessibility_tree",
"keywords": ["url", "objective", "observation", "previous_action"],
"prompt_constructor": "CoTPromptConstructor",
"answer_phrase": "In summary, the next action I will perform is",
"action_splitter": "```"
},
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/raw/p_direct_id_actree_2s_no_na.py | exps/webarena_exp/agent/prompts/raw/p_direct_id_actree_2s_no_na.py | prompt = {
"intro": """You are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.
Here's the information you'll have:
The user's objective: This is the task you're trying to complete.
The current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.
The current web page's URL: This is the page you're currently navigating.
The open tabs: These are the tabs you have open.
The previous action: This is the action you just performed. It may be helpful to track your progress.
The actions you can perform fall into several categories:
Page Operation Actions:
`click [id]`: This action clicks on an element with a specific id on the webpage.
`type [id] [content] [press_enter_after=0|1]`: Use this to type the content into the field with id. By default, the "Enter" key is pressed after typing unless press_enter_after is set to 0.
`hover [id]`: Hover over an element with id.
`press [key_comb]`: Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).
`scroll [direction=down|up]`: Scroll the page up or down.
Tab Management Actions:
`new_tab`: Open a new, empty browser tab.
`tab_focus [tab_index]`: Switch the browser's focus to a specific tab using its index.
`close_tab`: Close the currently active tab.
URL Navigation Actions:
`goto [url]`: Navigate to a specific URL.
`go_back`: Navigate to the previously viewed page.
`go_forward`: Navigate to the next page (if a previous 'go_back' action was performed).
Completion Action:
`stop [answer]`: Issue this action when you believe the task is complete. If the objective is to find a text-based answer, provide the answer in the bracket.
Homepage:
If you want to visit other websites, check out the homepage at http://homepage.com. It has a list of websites you can visit.
http://homepage.com/password.html lists all the account name and password for the websites. You can use them to log in to the websites.
To be successful, it is very important to follow the following rules:
1. You should only issue an action that is valid given the current observation
2. You should only issue one action at a time.
4. Generate the action in the correct format, wrap the action inside ``````. For example, ```click [1234]```".
5. Issue stop action when you think you have achieved the objective.""",
"examples": [
(
"""OBSERVATION:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
[1760] button 'Add to Wish List'
[1761] button 'Add to Compare'
URL: http://onestopmarket.com/office-products/office-electronics.html
OBJECTIVE: What is the price of HP Inkjet Fax Machine
PREVIOUS ACTION: None""",
"```stop [$279.49]```",
),
(
"""OBSERVATION:
[164] textbox 'Search' focused: True required: False
[171] button 'Go'
[174] link 'Find directions between two points'
[212] heading 'Search Results'
[216] button 'Close'
URL: http://openstreetmap.org
OBJECTIVE: Show me the restaurants near CMU
PREVIOUS ACTION: None""",
"```type [164] [restaurants near CMU] [1]```",
),
],
"template": """OBSERVATION:
{observation}
URL: {url}
OBJECTIVE: {objective}
PREVIOUS ACTION: {previous_action}""",
"meta_data": {
"observation": "accessibility_tree",
"action_type": "id_accessibility_tree",
"keywords": ["url", "objective", "observation", "previous_action"],
"prompt_constructor": "CoTPromptConstructor",
"answer_phrase": "In summary, the next action I will perform is",
"action_splitter": "```"
},
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/raw/p_cot_id_actree_2s_reflexion.py | exps/webarena_exp/agent/prompts/raw/p_cot_id_actree_2s_reflexion.py | prompt = {
"intro": """You are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.
Here's the information you'll have:
The user's objective: This is the task you're trying to complete.
The current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.
The current web page's URL: This is the page you're currently navigating.
The open tabs: These are the tabs you have open.
The previous action: This is the action you just performed. It may be helpful to track your progress.
The actions you can perform fall into several categories:
Page Operation Actions:
`click [id]`: This action clicks on an element with a specific id on the webpage.
`type [id] [content] [press_enter_after=0|1]`: Use this to type the content into the field with id. By default, the "Enter" key is pressed after typing unless press_enter_after is set to 0.
`hover [id]`: Hover over an element with id.
`press [key_comb]`: Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).
`scroll [direction=down|up]`: Scroll the page up or down.
Tab Management Actions:
`new_tab`: Open a new, empty browser tab.
`tab_focus [tab_index]`: Switch the browser's focus to a specific tab using its index.
`close_tab`: Close the currently active tab.
URL Navigation Actions:
`goto [url]`: Navigate to a specific URL.
`go_back`: Navigate to the previously viewed page.
`go_forward`: Navigate to the next page (if a previous 'go_back' action was performed).
Completion Action:
`stop [answer]`: Issue this action when you believe the task is complete.
Homepage:
If you want to visit other websites, check out the homepage at http://homepage.com. It has a list of websites you can visit.
http://homepage.com/password.html lists all the account name and password for the websites. You can use them to log in to the websites.
To be successful, it is very important to follow the following rules:
1. You should only issue an action that is valid given the current observation
2. You should only issue one action at a time.
3. You should follow the examples to reason step by step and then issue the next action.
4. Generate the action in the correct format. Start with a "In summary, the next action I will perform is" phrase, followed by action inside ``````. For example, "In summary, the next action I will perform is ```click [1234]```". Including your thinking process before giving the action is encouraged.
5. Issue stop action when you think you have achieved the objective. Don't generate anything after stop.""",
"examples": [
(
"""OBSERVATION:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
[1760] button 'Add to Wish List'
[1761] button 'Add to Compare'
URL: http://onestopmarket.com/office-products/office-electronics.html
OBJECTIVE: What is the price of HP Inkjet Fax Machine
MEMORY: None
PREVIOUS ACTION: None""",
"Let's think step-by-step. This page list the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. I think I have achieved the objective. I will issue the stop action with the answer. In summary, the next action I will perform is ```stop [$279.49]```",
),
(
"""OBSERVATION:
[164] textbox 'Search' focused: True required: False
[171] button 'Go'
[174] link 'Find directions between two points'
[212] heading 'Search Results'
[216] button 'Close'
URL: http://openstreetmap.org
OBJECTIVE: Show me the restaurants near CMU
MEMORY: I first searched "CMU" and then searched for "restaurants". It looks like the search results are different from what I expected, as the search results are about the restaurants in another city. I think I should go back to the previous page and search for "restaurants near CMU".
PREVIOUS ACTION: None""",
"Let's think step-by-step. This page has a search box whose ID is [164]. According to the nominatim rule of openstreetmap, I can search for the restaurants near a location by \"restaurants near\". I can submit my typing by pressing the Enter afterwards. In summary, the next action I will perform is ```type [164] [restaurants near CMU] [1]```",
),
],
"template": """OBSERVATION:
{observation}
URL: {url}
OBJECTIVE: {objective}
MEMORY: {memory}
PREVIOUS ACTION: {previous_action}""",
"meta_data": {
"observation": "accessibility_tree",
"action_type": "id_accessibility_tree",
"keywords": ["url", "objective", "observation", "previous_action", "memory"],
"prompt_constructor": "ReflexionPromptConstructor",
"answer_phrase": "In summary, the next action I will perform is",
"action_splitter": "```"
},
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/exps/webarena_exp/agent/prompts/raw/p_direct_id_actree_2s.py | exps/webarena_exp/agent/prompts/raw/p_direct_id_actree_2s.py | prompt = {
"intro": """You are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.
Here's the information you'll have:
The user's objective: This is the task you're trying to complete.
The current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.
The current web page's URL: This is the page you're currently navigating.
The open tabs: These are the tabs you have open.
The previous action: This is the action you just performed. It may be helpful to track your progress.
The actions you can perform fall into several categories:
Page Operation Actions:
`click [id]`: This action clicks on an element with a specific id on the webpage.
`type [id] [content] [press_enter_after=0|1]`: Use this to type the content into the field with id. By default, the "Enter" key is pressed after typing unless press_enter_after is set to 0.
`hover [id]`: Hover over an element with id.
`press [key_comb]`: Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v).
`scroll [direction=down|up]`: Scroll the page up or down.
Tab Management Actions:
`new_tab`: Open a new, empty browser tab.
`tab_focus [tab_index]`: Switch the browser's focus to a specific tab using its index.
`close_tab`: Close the currently active tab.
URL Navigation Actions:
`goto [url]`: Navigate to a specific URL.
`go_back`: Navigate to the previously viewed page.
`go_forward`: Navigate to the next page (if a previous 'go_back' action was performed).
Completion Action:
`stop [answer]`: Issue this action when you believe the task is complete. If the objective is to find a text-based answer, provide the answer in the bracket. If you believe the task is impossible to complete, provide the answer as "N/A" in the bracket.
Homepage:
If you want to visit other websites, check out the homepage at http://homepage.com. It has a list of websites you can visit.
http://homepage.com/password.html lists all the account name and password for the websites. You can use them to log in to the websites.
To be successful, it is very important to follow the following rules:
1. You should only issue an action that is valid given the current observation
2. You should only issue one action at a time.
3. Generate the action in the correct format. Always put the action inside a pair of ```. For example, ```click [1234]```.
5. Issue stop action when you think you have achieved the objective. Don't generate anything after stop.""",
"examples": [
(
"""OBSERVATION:
[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'
[1749] StaticText '$279.49'
[1757] button 'Add to Cart'
[1760] button 'Add to Wish List'
[1761] button 'Add to Compare'
URL: http://onestopmarket.com/office-products/office-electronics.html
OBJECTIVE: What is the price of HP Inkjet Fax Machine
PREVIOUS ACTION: None""",
"```stop [$279.49]```",
),
(
"""OBSERVATION:
[164] textbox 'Search' focused: True required: False
[171] button 'Go'
[174] link 'Find directions between two points'
[212] heading 'Search Results'
[216] button 'Close'
URL: http://openstreetmap.org
OBJECTIVE: Show me the restaurants near CMU
PREVIOUS ACTION: None""",
"```type [164] [restaurants near CMU] [1]```",
),
],
"template": """OBSERVATION:
{observation}
URL: {url}
OBJECTIVE: {objective}
PREVIOUS ACTION: {previous_action}""",
"meta_data": {
"observation": "accessibility_tree",
"action_type": "id_accessibility_tree",
"keywords": ["url", "objective", "observation", "previous_action"],
"prompt_constructor": "DirectPromptConstructor",
"action_splitter": "```"
},
}
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/setup.py | agent_eval/setup.py | from setuptools import setup
setup(
name="agent_eval",
version="0.0.1",
packages=["agent_eval"],
install_requires=[
"openai",
"requests",
"pillow",
"bs4",
"matplotlib",
"termcolor",
"human_id",
"pandas",
"easy_ocr",
"einops",
"transformers_stream_generator",
"tiktoken"
]
)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/scripts/run_eval_android.py | agent_eval/scripts/run_eval_android.py | from tqdm import tqdm
import json
from human_id import generate_id
import os
import argparse
import multiprocessing as mp
from agent_eval.clients import LM_Client, GPT4V_Client
from agent_eval.domains.unified import UniTrajectoryDataset
from agent_eval.eval.evaluator import Evaluator
from agent_eval.eval.metrics import get_metrics_from_result_json
from termcolor import cprint
import random
PROJECT_ROOT = os.environ.get(
"PROJECT_ROOT", "/home/<user>/code/<removed>_GUI/agent_eval"
)
def process_sample(
idx: str,
traj_info: dict,
# evaluator: Evaluator,
log_save_path,
model: str,
eval_version: str,
) -> list[dict]:
# print(idx, model)
# try:
if True:
oai_key = "<removed>"
clients = {
"gpt-3.5": LM_Client(api_key=oai_key, model_name="gpt-3.5"),
"gpt-4": LM_Client(api_key=oai_key, model_name="gpt-4"),
"mixtral": LM_Client(api_key="<removed>", model_name="mixtral"),
"gpt-4v": GPT4V_Client(api_key=oai_key),
}
evaluator = Evaluator(clients, log_save_path=log_save_path + "/trajs")
out, _ = evaluator(traj_info, model, eval_version)
eval_result = None
if out["status"] == "success" or out["status"] == "Success":
eval_result = True
else:
eval_result = False
return [
{
"idx": idx,
"gt": traj_info["eval"],
"rm": eval_result,
"uid": traj_info["traj_name"],
}
]
# except Exception as e:
# print(f"Error on {idx}, {e}")
# print(traceback.format_exc())
# return []
def main(args):
main_config = {
"caption_data": "ocr-sft-qwenvl-v2",
# "model": args.model,
# "model": "gpt-4v",
# "model": "gpt-3.5",
# "model": "mixtral",
"model": "gpt-4",
# "eval_version": args.prompt,
# "eval_version": "android-gpt4v",
"eval_version": "android",
}
data_config = {
# "dataset_path": "/home/<user>/data/GUI_Proj/unified_datasets/webarena-gpt4cot-release2/",
# "dataset_path": "/home/<user>/data/GUI_Proj/unified_datasets/ios80-cogagent-v0/",
# "dataset_path": "/home/<user>/data/GUI_Proj/unified_datasets/android-cogagent-v0/",
# "dataset_path": "/home/<user>/data/GUI_Proj/unified_datasets/android-cogagent-v0/",
# "dataset_path": "/home/<user>/data/android_offline/output_autoui_base_filteredbc_test/",
"dataset_path": "/home/<user>/data/GUI_Proj/android_result/output_autoui_base/",
# "dataset_path": "/home/<user>/data/GUI_Proj/android_result/output_autoui_large/",
# "dataset_path": "/home/<user>/data/GUI_Proj/android_result/output_cogagent/",
# "dataset_path": "/home/<user>/data/GUI_Proj/android_result/android-gt/",
"eval_log_names": ["filered_v0"],
}
log_save_path = os.path.join(PROJECT_ROOT, "outputs", generate_id(word_count=3))
assert not os.path.exists(log_save_path)
os.makedirs(log_save_path)
os.makedirs(log_save_path + "/trajs")
cprint(f"Saving logs to {log_save_path}", "cyan")
dev_dataset = UniTrajectoryDataset(
**data_config,
captioner_name=main_config["caption_data"],
load_image=True if main_config["model"] == "gpt-4v" else False,
)
samples_to_eval = dev_dataset.get_idx_list_with_annotations()
# if "webarena" in data_config["dataset_path"]:
# samples_to_eval = [dev_dataset.uid_to_idx(uid) for uid in webarena_dev_uid_list]
# samples_to_eval = [dev_dataset.uid_to_idx(uid) for uid in webarena_val_uid_list]
# random.seed(20)
# samples_to_eval = random.sample(samples_to_eval, 50)
pool = mp.Pool(args.num_proc)
jobs = [
pool.apply_async(
process_sample,
args=(
idx,
dev_dataset[idx],
log_save_path,
main_config["model"],
main_config["eval_version"],
),
)
for idx in samples_to_eval
]
pool.close()
def get_gt_label(result):
for user_uid, ann in result["gt"].items():
return ann["annotation"] == "Success"
results = {}
for job in tqdm(jobs):
for res in job.get():
results[res["uid"]] = {"gt": get_gt_label(res), "rm": res["rm"]}
with open(log_save_path + "/rm_results.json", "w") as f:
json.dump(results, f, indent=4)
metrics, _ = get_metrics_from_result_json(log_save_path + "/rm_results.json")
metrics["config"] = {
"model": main_config["model"],
"eval_version": main_config["eval_version"],
"caption_data": main_config["caption_data"],
"samples_to_eval": samples_to_eval,
"dataset_path": data_config["dataset_path"],
}
with open(log_save_path + "/stats.json", "w") as f:
json.dump(metrics, f, indent=4)
print(metrics)
if __name__ == "__main__":
random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
choices=["gpt-3.5", "gpt-4", "mixtral", "gpt-4v"],
default="gpt-3.5",
)
parser.add_argument(
"--prompt",
type=str,
choices=[
"naive-last-frame",
"naive-last-frame-v2",
"naive-last-frame-4v",
"naive-multi-frame",
"naive-multi-frame-v2",
"simplify-last-frame",
"simplify-last-frame-v2",
"simplify-last-3-frame",
"simplify-last-3-frame-v2",
"final-v2",
"final-v3",
"final-v3-gpt4v",
],
default="final-v3",
)
parser.add_argument("--num_proc", type=int, default=10)
args = parser.parse_args()
main(args)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/scripts/run_eval_web.py | agent_eval/scripts/run_eval_web.py | from tqdm import tqdm
import json
from human_id import generate_id
import os
import argparse
import multiprocessing as mp
import traceback
from agent_eval.clients import LM_Client, GPT4V_Client
from agent_eval.domains.unified import UniTrajectoryDataset
from agent_eval.eval.evaluator import Evaluator
from agent_eval.eval.metrics import get_metrics_from_result_json
from termcolor import cprint
import random
PROJECT_ROOT = os.environ.get(
"PROJECT_ROOT", "/home/<user>/code/<removed>_GUI/agent_eval"
)
def process_sample(
idx: str,
traj_info: dict,
# evaluator: Evaluator,
log_save_path,
model: str,
eval_version: str,
) -> list[dict]:
# print(idx, model)
try:
# if True:
oai_key = "<removed>"
clients = {
"gpt-3.5": LM_Client(api_key=oai_key, model_name="gpt-3.5"),
"gpt-4": LM_Client(api_key=oai_key, model_name="gpt-4"),
"mixtral": LM_Client(api_key="<removed>", model_name="mixtral"),
"gpt-4v": GPT4V_Client(api_key=oai_key),
}
evaluator = Evaluator(clients, log_save_path=log_save_path + "/trajs")
out, _ = evaluator(traj_info, model, eval_version)
eval_result = None
if out["status"] == "success" or out["status"] == "Success":
eval_result = True
else:
eval_result = False
return [
{
"idx": idx,
"gt": traj_info["eval"],
"rm": eval_result,
"uid": traj_info["traj_name"],
}
]
except Exception as e:
print(f"Error on {idx}, {e}")
print(traceback.format_exc())
return []
def main(args):
main_config = {
"caption_data": "ocr-sft-qwenvl-v2",
"model": args.model,
# "model": "gpt-3.5",
# "model": "mixtral",
# "model": "gpt-4",
"eval_version": args.prompt,
# "eval_version": "final-v2",
}
data_config = {
"dataset_path": "/home/<user>/data/GUI_Proj/unified_datasets/webarena-gpt4cot-release2/",
"eval_log_names": ["v0"],
}
log_save_path = os.path.join(PROJECT_ROOT, "outputs", generate_id(word_count=3))
assert not os.path.exists(log_save_path)
os.makedirs(log_save_path)
os.makedirs(log_save_path + "/trajs")
cprint(f"Saving logs to {log_save_path}", "cyan")
dev_dataset = UniTrajectoryDataset(
**data_config,
captioner_name=main_config["caption_data"],
load_image=True if args.model == "gpt-4v" else False,
)
samples_to_eval = dev_dataset.get_idx_list_with_annotations()
pool = mp.Pool(args.num_proc)
jobs = [
pool.apply_async(
process_sample,
args=(
idx,
dev_dataset[idx],
log_save_path,
main_config["model"],
main_config["eval_version"],
),
)
for idx in samples_to_eval
]
pool.close()
def get_gt_label(result):
for user_uid, ann in result["gt"].items():
return ann["annotation"] == "Success"
results = {}
for job in tqdm(jobs):
for res in job.get():
results[res["uid"]] = {"gt": get_gt_label(res), "rm": res["rm"]}
with open(log_save_path + "/rm_results.json", "w") as f:
json.dump(results, f, indent=4)
metrics, _ = get_metrics_from_result_json(log_save_path + "/rm_results.json")
metrics["config"] = {
"model": main_config["model"],
"eval_version": main_config["eval_version"],
"caption_data": main_config["caption_data"],
"samples_to_eval": samples_to_eval,
"dataset_path": data_config["dataset_path"],
}
with open(log_save_path + "/stats.json", "w") as f:
json.dump(metrics, f, indent=4)
print(metrics)
if __name__ == "__main__":
random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
choices=["gpt-3.5", "gpt-4", "mixtral", "gpt-4v"],
default="gpt-3.5",
)
parser.add_argument(
"--prompt",
type=str,
choices=[
"naive-last-frame",
"naive-last-frame-v2",
"naive-last-frame-4v",
"naive-multi-frame",
"naive-multi-frame-v2",
"simplify-last-frame",
"simplify-last-frame-v2",
"simplify-last-3-frame",
"simplify-last-3-frame-v2",
"final-v2",
"final-v3",
"final-v3-gpt4v",
],
default="final-v3",
)
parser.add_argument("--num_proc", type=int, default=10)
args = parser.parse_args()
main(args)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/scripts/annotate_ios_dense.py | agent_eval/scripts/annotate_ios_dense.py | from agent_eval.eval.annotator import Annotator
from agent_eval.domains.unified import UniTrajectoryDataset
from agent_eval.clients import LM_Client, GPT4V_Client
import multiprocessing as mp
from tqdm import tqdm
import json
def process_sample(
traj_info: dict,
model: str,
):
try:
print("processing: ", traj_info["traj_name"])
oai_key = "<removed>"
clients = {
"gpt-3.5": LM_Client(api_key=oai_key, model_name="gpt-3.5"),
"gpt-4": LM_Client(api_key=oai_key, model_name="gpt-4"),
"mixtral": LM_Client(api_key="<removed>", model_name="mixtral"),
"gpt-4v": GPT4V_Client(api_key=oai_key),
}
annotator = Annotator(clients)
out = annotator(traj_info, model, "v1")
return {
"uid": traj_info["traj_name"],
"per_step_eval": out,
}
except Exception as e:
print(f"Error on {traj_info['traj_name']}, {e}")
return None
def main():
data_config = {
"dataset_path": "/home/<user>/data/GUI_Proj/unified_datasets/ios80-cogagent-v0/",
"eval_log_names": ["v0"],
}
dev_dataset = UniTrajectoryDataset(
**data_config, captioner_name="ocr-sft-qwenvl-v2", load_image=False
)
pool = mp.Pool(20)
jobs = [
pool.apply_async(
process_sample,
args=(dev_dataset[idx], "mixtral"),
)
for idx in range(len(dev_dataset))
# for idx in range(20)
]
pool.close()
results = {}
for job in tqdm(jobs):
res = job.get()
if res is None:
continue
results[res["uid"]] = res["per_step_eval"]
with open(
"/home/<user>/data/android_offline/output_autoui_base_300/traj_annotations-mixtral.json",
"w",
) as f:
json.dump(results, f)
if __name__ == "__main__":
main()
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/utils.py | agent_eval/agent_eval/utils.py | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
def display_images(images):
"""Display list of PIL images in a 2xN grid."""
n = len(images)
# Calculate grid shape
cols = int(np.ceil(n / 2.0))
rows = 2 if n > 1 else 1
cols, rows = rows, cols # Swap
# Create a grid of subplots
fig, axs = plt.subplots(rows, cols, figsize=(20, 10))
# If there's only one image, axs won't be a 2D array, so make it one for consistency
if n == 1:
axs = np.array([[axs]])
elif n <= cols:
axs = np.array([axs])
# Display each image in its subplot
for i, img in enumerate(images):
row, col = divmod(i, cols)
axs[row][col].imshow(img)
axs[row][col].axis('off') # Hide axes
# For cases where the total number of images is odd, hide the last unused subplot
if n % 2 == 1:
axs[-1, -1].axis('off')
plt.tight_layout()
plt.show()
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/__init__.py | agent_eval/agent_eval/__init__.py | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false | |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/clients.py | agent_eval/agent_eval/clients.py | import openai
from openai.api_resources import ChatCompletion
# from openai import OpenAI, AsyncOpenAI
import requests
from typing import List, Union, Dict, Optional, Tuple
from PIL import Image
from io import BytesIO
import base64
# from openai.types.chat.chat_completion import ChatCompletion
import os
import requests
import json
import numpy as np
def query_anyscale_api(messages, model, temperature=0):
# Set the base URL and API key for the OpenAI API
try:
base_url = "https://api.endpoints.anyscale.com/v1"
api_key = ""
# Endpoint for chat completions
url = f"{base_url}/chat/completions"
# Headers for the request
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
# Data payload for the request
data = {"model": model, "messages": messages, "temperature": temperature}
# Make the POST request and return the response
response = requests.post(url, headers=headers, data=json.dumps(data)).json()
return response["choices"][0]["message"]["content"].lstrip(), response
except Exception as e:
print(f"An error occurred: {e}")
import traceback
print(traceback.format_exc())
return f"API_ERROR: {e}", None
def query_openai_api(messages, model, temperature=0, api_key=None):
try:
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": 4096}
# Make the POST request and return the response
response = requests.post(url, headers=headers, data=json.dumps(data)).json()
# return response
from pprint import pprint
return response["choices"][0]["message"]["content"].lstrip(), response
except Exception as e:
print(f"An error occurred: {e}")
return f"API_ERROR: {e}", None
# curl https://api.openai.com/v1/chat/completions \
# -H "Content-Type: application/json" \
# -H "Authorization: Bearer $OPENAI_API_KEY" \
# -d '{
# "model": "gpt-4-vision-preview",
# "messages": [
# {
# "role": "user",
# "content": [
# {
# "type": "text",
# "text": "What’s in this image?"
# },
# {
# "type": "image_url",
# "image_url": {
# "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
# }
# }
# ]
# }
# ],
# "max_tokens": 300
# }'
class LM_Client:
def __init__(self, api_key, model_name="local"):
# self.client = OpenAI(api_key=api_key)
if model_name == "local":
# TODO: The 'openai.api_base' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(api_base="http://localhost:8082/v1")'
# openai.api_base = "http://localhost:8082/v1"
models = self.client.models.list()
model = models["data"][0]["id"]
self.model = model
elif model_name == "gpt-3.5":
# self.model = "gpt-3.5-turbo-1106"
self.model = "gpt-3.5-turbo"
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.organization = os.environ.get("OPENAI_ORGANIZATION", "")
elif model_name == "gpt-4":
self.model = "gpt-4-1106-preview"
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.organization = os.environ.get("OPENAI_ORGANIZATION", "")
elif model_name == "mixtral":
self.model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
# openai.api_key = "esecret_xv1c6k71xizxxpxed457wgwlib"
# openai.base_url = "https://api.endpoints.anyscale.com/v1"
# self.client = OpenAI(
# base_url = "https://api.endpoints.anyscale.com/v1",
# api_key="esecret_xv1c6k71xizxxpxed457wgwlib",
# )
else:
raise ValueError(f"Invalid model name: {model_name}")
def chat(self, messages, json_mode=False) -> Tuple[str, ChatCompletion]:
"""
messages=[{
"role": "system",
"content": "You are a helpful assistant."
}, {
"role": "user",
"content": "hi"}
])
"""
if "mistral" in self.model:
model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
response_str, chat_completion = query_anyscale_api(messages, self.model)
else:
chat_completion = openai.ChatCompletion.create(
model=self.model,
messages=messages,
response_format={"type": "json_object"} if json_mode else None,
temperature=0,
)
response_str = chat_completion["choices"][0]["message"]["content"]
return response_str, chat_completion
def one_step_chat(
self, text, system_msg: Optional[str] = None, json_mode=False
) -> Tuple[str, ChatCompletion]:
messages = []
if system_msg is not None:
messages.append({"role": "system", "content": system_msg})
messages.append({"role": "user", "content": text})
return self.chat(messages, json_mode=json_mode)
class VLM_Client:
def __init__(self, port=8083) -> None:
self.url = f"http://localhost:{port}"
def chat(self, data: List[Dict[str, Union[str, None]]]) -> str:
"""
Send a request to the FastAPI service and get the model's response.
data = [
[
{'image': 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'},
{'text': "What is it?"},
],
]
:param data: List of dictionaries with keys "image" and/or "text"
:return: Model's response as a string
"""
response = requests.post(f"{self.url}/get_response/", json=data)
# Check if the request was successful
if response.status_code == 200:
return response.json()["response"]
else:
raise Exception(
f"Error with status code {response.status_code}: {response.text}"
)
def one_step_chat(self, img, text) -> str:
data = [[{"image": img}, {"text": text}]]
return self.chat(data)
class GPT4V_Client:
def __init__(self, api_key, model_name="gpt-4-vision-preview", max_tokens=512):
self.api_key = api_key
# self.client = OpenAI(api_key=api_key)
self.model_name = model_name
self.max_tokens = max_tokens
def chat(self, messages, json_mode=False) -> Tuple[str, ChatCompletion]:
return query_openai_api(messages, self.model_name, api_key=self.api_key)
def one_step_chat(
self, text, image: Union[Image.Image, np.ndarray], system_msg: Optional[str] = None, json_mode=False
) -> Tuple[str, ChatCompletion]:
jpeg_buffer = BytesIO()
# Save the image as JPEG to the buffer
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
image = image.convert("RGB")
image.save(jpeg_buffer, format="JPEG")
# Get the byte data from the buffer
jpeg_data = jpeg_buffer.getvalue()
# Encode the JPEG image data in base64
jpg_base64 = base64.b64encode(jpeg_data)
# If you need it in string format
jpg_base64_str = jpg_base64.decode("utf-8")
messages = []
if system_msg is not None:
messages.append({"role": "system", "content": system_msg})
messages += [
{
"role": "user",
"content": [
{"type": "text", "text": text},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{jpg_base64_str}"
},
},
],
}
]
return self.chat(messages, json_mode=json_mode)
def one_step_multi_image_chat(
self, text, images: list[Union[Image.Image, np.ndarray]], system_msg: Optional[str] = None, json_mode=False
) -> Tuple[str, ChatCompletion]:
"""
images: [{"image": PIL.image, "detail": "high" or "low }]
For low res mode, we expect a 512px x 512px image. For high res mode, the short side of the image should be less than 768px and the long side should be less than 2,000px.
"""
details = [i["detail"] for i in images]
img_strs = []
for img_info in images:
image = img_info["image"]
jpeg_buffer = BytesIO()
# Save the image as JPEG to the buffer
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
image = image.convert("RGB")
image.save(jpeg_buffer, format="JPEG")
# Get the byte data from the buffer
jpeg_data = jpeg_buffer.getvalue()
# Encode the JPEG image data in base64
jpg_base64 = base64.b64encode(jpeg_data)
# If you need it in string format
jpg_base64_str = jpg_base64.decode("utf-8")
img_strs.append(f"data:image/jpeg;base64,{jpg_base64_str}")
messages = []
if system_msg is not None:
messages.append({"role": "system", "content": system_msg})
img_sub_msg = [
{
"type": "image_url",
"image_url": {"url": img_str, "detail": detail},
}
for img_str, detail in zip(img_strs, details)
]
messages += [
{
"role": "user",
"content": [
{"type": "text", "text": text},
]
+ img_sub_msg,
}
]
return self.chat(messages, json_mode=json_mode)
# class BatchGPT4V:
# def __init__(self, api_key, model_name="gpt-4-vision-preview", max_tokens=512):
# self.client = AsyncOpenAI(api_key=api_key)
# self.model_name = model_name
# self.max_tokens = max_tokens
# async def one_step_chat(
# self, text, image: Image, system_msg: Optional[str] = None, json_mode=False
# ):
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/domains/unified.py | agent_eval/agent_eval/domains/unified.py | from PIL import Image
import os
import json
from typing import List, Dict, Any, Tuple
from collections import defaultdict
from termcolor import cprint
import re
import numpy as np
class UniTrajectoryDataset:
def __init__(
self,
dataset_path: str,
eval_log_names: List[str],
captioner_name: str = "ocr-sft-qwenvl-v1",
load_image=True,
) -> None:
assert dataset_path[-1] == "/", "dataset_path must end with a /"
self.dataset_path = dataset_path
self.data_log = json.load(open(dataset_path + "trajectory_log.json", "r"))
cprint(f"Using dataset: {dataset_path}", "green")
if os.path.exists(dataset_path + "captions/" + captioner_name + ".json"):
self.captions = json.load(
open(dataset_path + "captions/" + captioner_name + ".json", "r")
)
else:
self.captions = None
cprint(f"Using eval logs from: {eval_log_names}", "green")
self.evals = defaultdict(lambda: defaultdict())
for eval_log_name in eval_log_names:
eval_log_path = dataset_path + "evals/" + eval_log_name + ".jsonl"
with open(eval_log_path, "r") as f:
for line in f:
ann = json.loads(line)
if ann["task_uid"] != "" and ann["task_idx"] != -1:
self.evals[ann["task_uid"]][ann["user_uid"]] = ann
self.uid_to_idx_map = {dp["uid"]: idx for idx, dp in enumerate(self.data_log)}
self.idx_to_uid_map = {idx: dp["uid"] for idx, dp in enumerate(self.data_log)}
self.load_image = load_image
def uid_to_idx(self, uid):
return self.uid_to_idx_map[uid]
def idx_to_uid(self, idx):
return self.idx_to_uid_map[idx]
def get_idx_list_with_annotations(self):
return [self.uid_to_idx(uid) for uid in self.evals.keys()]
def __len__(self):
return len(self.data_log)
def __getitem__(self, idx):
dp = self.data_log[idx]
imgs = None
if self.load_image:
imgs = []
for step in dp["steps"]:
img = Image.open(self.dataset_path + f"images/{step['img']}")
img.load()
imgs.append(img)
info = {
"intent": dp["intent"],
"images": imgs,
"image_paths": [
self.dataset_path + f"images/{step['img']}" for step in dp["steps"]
],
"response": dp["response"],
"captions": [self.captions[f"{step['img'][:-4]}"] for step in dp["steps"]]
if self.captions
else None,
"traj_name": dp["uid"],
"eval": self.evals[dp["uid"]] if dp["uid"] in self.evals else None,
"actions": self._get_actions(dp),
}
# assert len(info['captions']) == len(info['images']), f"traj {idx} has {len(info['captions'])} captions and {len(info['images'])} images"
return info
def _get_actions(self, dp):
actions = []
for step in dp["steps"]:
action = None
try:
if "web" in self.dataset_path:
raw_action = step["other"]["raw_action"]
splits = raw_action.split(" ")
if not splits:
action = raw_action.replace("_", " ")
elif splits[0] == "click":
element_str = " ".join(splits[6:])
action = f"click at [{element_str}]"
elif splits[0] in ["scroll", "stop"]:
action = raw_action
elif splits[0] == "type":
matches = re.findall(r"\[(.*?)\]", raw_action)
typed = matches[1].strip()
last_bracket_pos = raw_action.rfind("]")
element_str = raw_action[last_bracket_pos + 1 :].strip()
action = f"type [{typed}] at [{element_str}]"
else:
action = raw_action
elif "ios" in self.dataset_path:
action = step["other"]["translated_action"]
if "tap" in action:
action = "tap"
elif "android" in self.dataset_path:
action = step["other"]["action"]
if "DualPoint" in action:
# Regex to find all numbers, including decimals
coordinates = re.findall(r"(\d+\.\d+)", action)
# Convert found strings to float and assign to variables
# Since the points are the same, we technically only need to parse two numbers, but for completeness:
x1, y1, x2, y2 = map(float, coordinates)
# Calculate the distance with numpy
distance = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)
# Check if the distance is larger than 0.05
is_distance_larger_than_0_05 = distance > 0.05
if is_distance_larger_than_0_05:
action = "swipe on screen"
else:
action = "tap"
except Exception as e:
print("Error in extracting acrtion", e, step)
actions.append(action)
return actions
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/domains/unify_webarena.py | agent_eval/agent_eval/domains/unify_webarena.py | # %%
import agent_eval
from agent_eval.domains.webarena import extract_trajectory_info, extract_eval_results
import json
import os
# %%
# raw_dataset_path = "/home/<user>/code/WebArena/webarena_traj/102023_release_v2/919_gpt4_8k_cot"
# output_dataset_path = "/home/<user>/data/GUI_Proj/unified_datasets/webarena-gpt4cot-release2"
# raw_dataset_path ="/home/<user>/code/WebArena/webarena_traj/102023_release_v2/919_gpt35_16k_cot"
# output_dataset_path = "/home/<user>/data/GUI_Proj/unified_datasets/webarena-gpt35cot-release2"
raw_dataset_path = "/home/<user>/code/WebArena/webarena_traj/gpt4_v1"
output_dataset_path = (
"/home/<user>/data/GUI_Proj/unified_datasets/webarena-gpt4cot-release1"
)
assert not os.path.exists(output_dataset_path)
os.makedirs(output_dataset_path)
os.makedirs(os.path.join(output_dataset_path, "images"))
os.makedirs(os.path.join(output_dataset_path, "evals"))
os.makedirs(os.path.join(output_dataset_path, "captions"))
# %% [markdown]
# ### Record WebArena's Eval Results
# %%
log_str = open(os.path.join(raw_dataset_path, "merged_log.txt")).read()
eval_results = extract_eval_results(log_str)
formated_eval_results = []
for uid, eval_result in eval_results.items():
formated_eval_results.append(
{
"dataset_path": output_dataset_path.split("/")[-1],
"task_idx": uid,
"task_uid": uid,
"user_uid": "WebArena",
"annotation": "Success" if eval_result else "Failure",
"comment": "",
}
)
with open(os.path.join(output_dataset_path, "evals", "gt.jsonl"), "w") as file:
for item in formated_eval_results:
# Convert each dictionary to a JSON string and write it to a file
json_string = json.dumps(item)
file.write(json_string + "\n")
# %% [markdown]
# ### Get the Trajectory Log and Images
# %%
all_files = os.listdir(raw_dataset_path)
trajs = [f for f in all_files if f.endswith(".html")]
# %%
html_content_str = open(os.path.join(raw_dataset_path, trajs[0])).read()
# %%
extract_trajectory_info(html_content_str)
# %%
trajs[0]
# %%
html_content_str = open(os.path.join(raw_dataset_path, "render_40.html")).read()
info = extract_trajectory_info(html_content_str)
# %%
info
# %%
from tqdm import tqdm
import re
traj_log = []
for traj_name in tqdm(trajs):
html_content_str = open(os.path.join(raw_dataset_path, traj_name)).read()
traj_id = traj_name.replace(".html", "").replace("render_", "")
info = extract_trajectory_info(html_content_str)
# save the image
images = info["images"]
if len(images) != len(info["actions"]):
print(
f"{traj_id} has {len(images)} images but {len(info['actions'])} actions | skip"
)
continue
for img_idx, img in enumerate(images):
img_name = f"{traj_id}_{img_idx}.png"
img.save(os.path.join(output_dataset_path, "images", img_name))
match = re.search(r"<pre>(.*?)</pre>", html_content_str, re.DOTALL)
config = match.group(1) if match else None
this_log = {
"uid": traj_id,
"intent": info["intent"],
"response": info["response"], # TODO
"other": {"config": config},
"steps": [],
}
for step_idx, (img, action) in enumerate(zip(images, info["actions"])):
img_name = f"{traj_id}_{step_idx}.png"
this_log["steps"].append({"img": img_name, "other": {"raw_action": action}})
traj_log.append(this_log)
with open(os.path.join(output_dataset_path, "trajectory_log.json"), "w") as file:
json.dump(traj_log, file, indent=2)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/domains/__init__.py | agent_eval/agent_eval/domains/__init__.py | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false | |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/domains/webarena.py | agent_eval/agent_eval/domains/webarena.py | import base64
from bs4 import BeautifulSoup
from io import BytesIO
from PIL import Image
import os
import json
import re
import random
def extract_eval_results(merged_log: str):
"""Extract the evaluation results from the merged log file."""
results = {}
for line in merged_log.splitlines():
if '[Result]' in line:
# Extract the result status (PASS/FAIL)
result_status = 'PASS' if '(PASS)' in line else 'FAIL'
# Extract the index from the file name
match = re.search(r'/(\d+)\.json', line)
if match:
index = match.group(1)
results[index] = result_status == 'PASS'
return results
def extract_intent_from_text(html_content: str):
"""Extract the intent line from HTML content."""
# Split the HTML content into lines
lines = html_content.splitlines()
# Search for the line that starts with "intent:"
for line in lines:
stripped_line = line.strip()
if stripped_line.startswith("intent:"):
return stripped_line.replace("intent:", "").strip()
return ""
def extract_trajectory_info(html_content: str):
"""Extract intent and Base64 encoded images from HTML content."""
# Parse the HTML content
parsed_action_search = re.search(r"<div class='parsed_action'.*?><pre>stop \[(.*?)\]</pre></div>", html_content, re.DOTALL)
# Extract the content inside the brackets
response = parsed_action_search.group(1) if parsed_action_search else None
soup = BeautifulSoup(html_content, "html.parser")
# Extract intent from <h2> or <h3> tags
intent = extract_intent_from_text(html_content)
# Find all image tags with src attributes
image_tags = soup.find_all("img", src=True)
# Extract Base64 encoded images and convert to PIL Image objects
images = []
for img in image_tags:
if img["src"].startswith("data:image/"):
img_data = img["src"].split(",")[1]
byte_data = base64.b64decode(img_data)
image = Image.open(BytesIO(byte_data))
images.append(image)
# Extract parsed actions
actions = re.findall(r"<div class='parsed_action'.*?<pre>(.*?)</pre>", html_content, re.DOTALL)
return {"intent": intent, "images": images, "response": response, "actions": actions}
class WebArenaData:
def __init__(self, trajectory_root_path: str, caption_data_path: str, eval_dps_path: str, gt_results_path: str, configs_path: str = None) -> None:
self.trajectory_root_path = trajectory_root_path
with open(caption_data_path, 'r') as f:
self.captions = json.load(f)
with open(gt_results_path, 'r') as f:
self.gt_results = json.load(f)
with open(eval_dps_path, 'r') as f:
self.dev_ids = json.load(f)
self.config_path = configs_path
def get_traj_info(self, idx):
html_file = f"{self.trajectory_root_path}/render_{idx}.html"
with open(html_file, 'r') as f:
html_content = f.read()
info = extract_trajectory_info(html_content)
info['captions'] = self.captions[f'render_{idx}.html']
assert len(info['captions']) == len(info['images'])
info['traj_name'] = f'render_{idx}.html'
if self.config_path:
with open(self.config_path + f"/{idx}.json", 'r') as f:
config = json.load(f)
info['config'] = config
return info
def sample_traj_id(self, success_only=False, fail_only=False):
if not success_only and not fail_only:
success_only = random.choice([True, False])
if success_only:
id = random.choice(self.dev_ids['success'])
else:
id = random.choice(self.dev_ids['fail'])
return id
def get_all_samples(self):
samples = []
for id in self.dev_ids['success']:
samples.append((id, True))
for id in self.dev_ids['fail']:
samples.append((id, False))
return samples | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/captioner/annotate_screenshots.py | agent_eval/agent_eval/captioner/annotate_screenshots.py | # %%
from agent_eval.clients import GPT4V_Client
from PIL import Image
import os, time
import random
from tqdm import tqdm
import json
from langdetect import detect as lang_detect
import pytesseract
random.seed(42)
# %%
def get_cap(img):
# if random.random() < 0.5:
# print("=")
# else:
# print("-")
if isinstance(img, str):
img_path = img
img = Image.open(img)
ocr_text = pytesseract.image_to_string(img)
if "Website" in img_path:
lang_code = lang_detect(ocr_text)
else:
lang_code = "en"
prompt = "You are an advanced GUI captioner. Please describe this GUI interface in details and don't miss anything. Your response should be hierarchical and in Markdown format. Don't do paraphrase. Don't wrap your response in a code block."
if lang_code != "en":
return "LANG-ERROR"
try:
# from IPython import embed; embed()
out = client.one_step_chat(prompt, img)
except Exception as e:
print(e)
return "ERROR"
return out
client = GPT4V_Client(api_key="<removed>", max_tokens=2048)
screenshot_source = [("/home/<user>/data/GUI_Proj/gui_cap_dataset/images", -1)]
images_to_cap = []
for path, num in screenshot_source:
if num == -1:
images_to_cap += [os.path.join(path, x) for x in os.listdir(path)]
else:
images_to_cap += random.sample(
[os.path.join(path, x) for x in os.listdir(path)], num
)
caps = []
def process_image(img):
try:
cap = get_cap(img)[0]
if cap == "ERROR":
time.sleep(10)
return {"img": img, "cap": "ERROR"}
elif cap == "LANG-ERROR":
return {"img": img, "cap": "LANG-ERROR"}
else:
return {"img": img, "cap": cap}
except Exception as e:
return {"img": img, "cap": "EXCEPTION"}
def save_caps(caps):
# with open("/home/<user>/data/GUI_Proj/screenshots/v3_cap.json", 'w') as f:
with open("/home/<user>/data/GUI_Proj/gui_cap_dataset/final.json", "w") as f:
json.dump(caps, f, indent=2)
caps = []
from concurrent.futures import ThreadPoolExecutor, as_completed
# Adjust the max_workers based on your needs and system capabilities
from time import sleep
with ThreadPoolExecutor(max_workers=30) as executor:
future_to_img = {executor.submit(process_image, img): img for img in images_to_cap}
for future in tqdm(as_completed(future_to_img), total=len(images_to_cap)):
img = future_to_img[future]
try:
result = future.result()
if result["cap"] not in ["ERROR", "LANG-ERROR", "EXCEPTION"]:
caps.append(result)
except Exception as exc:
print(f"{img} generated an exception: {exc}")
save_caps(caps)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/captioner/prepare_caps_sub.py | agent_eval/agent_eval/captioner/prepare_caps_sub.py | import os
import json
from tqdm import tqdm
from gradio_client import Client
from collections import defaultdict
def save(this_obj, file_path):
with open(file_path, 'w') as f:
json.dump(this_obj, f)
def predict(img_path, client):
result = client.predict(
img_path, # str in 'text' Textbox component
api_name="/predict"
)
return result
def is_last_two(file_path, total_images_per_traj):
traj_name = "_".join(file_path.split("_")[:-1])
traj_idx = int(file_path.split("_")[-1].split(".")[0])
return traj_idx >= total_images_per_traj[traj_name] - 2
def caption_with_ocr_sft_model(directory, save_path, client, total, worker_idx, only_last_two):
files = [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
total_images_per_traj = defaultdict(int)
for file in files:
traj_name = "_".join(file.split("_")[:-1])
idx = int(file.split("_")[-1].split(".")[0])
total_images_per_traj[traj_name] = max(idx+1, total_images_per_traj[traj_name])
files = [files[i] for i in range(len(files)) if i % total == worker_idx]
outs = {}
for idx, file_path in enumerate(tqdm(files)):
try:
if only_last_two and not is_last_two(file_path, total_images_per_traj):
o = "SKIP, NOT LAST TWO"
else:
o = predict(file_path, client)
file_name = os.path.basename(file_path)[:-4]
outs[file_name] = o
if idx % 5 == 0:
save(outs, save_path)
except:
print(f"Error processing {file_path}")
save(outs, save_path)
def main(args):
from time import sleep
while True:
try:
client = Client(f"http://localhost:{args.port}")
break
except:
print("Waiting for server to start...")
sleep(5)
images_path = args.images_path
save_path = args.output_path.replace(".json", f"_{args.idx}.json")
total = args.total
idx = args.idx
caption_with_ocr_sft_model(images_path, save_path, client, total, idx, args.only_last_two)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--images_path", type=str)
parser.add_argument("--output_path", type=str)
parser.add_argument("--port", type=int)
parser.add_argument("--idx", type=int)
parser.add_argument("--total", type=int)
parser.add_argument("--only-last-two", action="store_true")
args = parser.parse_args()
main(args) | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/captioner/utils.py | agent_eval/agent_eval/captioner/utils.py | def detect_repetition() | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/captioner/__init__.py | agent_eval/agent_eval/captioner/__init__.py | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false | |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/captioner/captioner_server.py | agent_eval/agent_eval/captioner/captioner_server.py | from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
__version__,
GenerationConfig,
)
from PIL import Image
import gradio as gr
import argparse
import tempfile
from PIL import Image
import easyocr
import torch
assert (
__version__ == "4.32.0"
), "Please use transformers version 4.32.0, pip install transformers==4.32.0"
reader = easyocr.Reader(
["en"]
) # this needs to run only once to load the model into memory
def get_easy_text(img_file):
out = reader.readtext(img_file, detail=0, paragraph=True)
if isinstance(out, list):
return "\n".join(out)
return out
model_name = "DigitalAgent/Captioner"
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = (
AutoModelForCausalLM.from_pretrained(
model_name, trust_remote_code=True
).to(device)
.eval()
.half()
)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
generation_config = GenerationConfig.from_dict(
{
"chat_format": "chatml",
"do_sample": True,
"eos_token_id": 151643,
"max_new_tokens": 2048,
"max_window_size": 6144,
"pad_token_id": 151643,
"repetition_penalty": 1.2,
"top_k": 0,
"top_p": 0.3,
"transformers_version": "4.31.0",
}
)
def generate(image: Image):
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=True) as tmp:
image.save(tmp.name)
ocr_result = get_easy_text(tmp.name)
text = f"Please describe the screenshot above in details.\nOCR Result:\n{ocr_result}"
history = []
input_data = [{"image": tmp.name}, {"text": text}]
query = tokenizer.from_list_format(input_data)
response, _ = model.chat(
tokenizer, query=query, history=history, generation_config=generation_config
)
return response
def main(port, share):
demo = gr.Interface(
fn=generate, inputs=[gr.Image(type="pil")], outputs="text", concurrency_limit=1
)
demo.queue().launch(server_port=port, share=share)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int)
parser.add_argument("--share", action="store_true", default=False)
args = parser.parse_args()
main(args.port, args.share)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/eval/prompts.py | agent_eval/agent_eval/eval/prompts.py | def build_obs_simplifier_prompt(cap, intent, response) -> str:
prompt = f"""Given the following user question and context, extract part of the context that is unbiased, so that using that text alone would be good context for providing an unbiased answer to the user query.
**User Query**: The bot responded with "{response}", does it execute this task "{intent}" successfully?
**Full Context**:
```md
{cap}
```
Start your answer with “Unbiased text context (includes all relevant content):"
"""
return prompt
def build_naive_last_frame_eval_prompt(cap, intent, response) -> str:
prompt = f"""**User Intent**: {intent}
**Bot's Final Observation**:
```md
{cap}
```
**Bot's response to the user**: {response if response else "None"}.
---
Based on the provided user intent, the caption of bot's final observation and its response, did the bot successfully execute the task? Please reason step by step.
Note:
- The trajectory descriptions are essentially noisy captions of the screenshots captured during bot's execution. And you should infer what actions the bot took yourself.
- You should categorize the execution into one of the three status:
- task-possible-bot-success: The bot successfully executed the task.
- task-possible-bot-fail: The bot failed to execute the task.
- task-impossible: The task is impossible to execute in nature given the user intent and the environment. For example, if the user wants to buy a product that does not exist in the environment. You should carefully distinguish this from bot-fail.
Format your response as a valid json:
{{
"thoughts": "{{Your thoughts here, discuss if and how the trajectory progress towards the task and then reason about the final status. You should provide an explicit reason when determining the final status.}}",
"status": "task-possible-bot-success" or "task-possible-bot-fail" or "task-impossible"
}}"""
return prompt
def build_naive_multi_frame_eval_prompt(caps, intent, response) -> str:
captions_str = "\n".join(
[f"{idx+1}:\n```md\n{caption}\n```\n" for idx, caption in enumerate(caps[-3:])]
)
prompt = f"""**User Intent**: {intent}
**Bot's observation through execution**:
{captions_str}
**Bot's response to the user**: {response if response else "None"}.
---
Based on the provided user intent, bot's observation in captions and its response, did the bot successfully execute the task? Please reason step by step.
Note:
- You should categorize the execution into one of the three status:
- task-possible-bot-success: The bot successfully executed the task.
- task-possible-bot-fail: The bot failed to execute the task.
- task-impossible: The task is impossible to execute in nature given the user intent and the environment. For example, if the user wants to buy a product that does not exist in the environment. You should carefully distinguish this from bot-fail.
Format your response as a valid json:
{{
"thoughts": "{{Your thoughts here, discuss if and how the trajectory progress towards the task and then reason about the final status. You should provide an explicit reason when determining the final status.}}",
"status": "task-possible-bot-success" or "task-possible-bot-fail" or "task-impossible"
}}"""
return prompt
def build_naive_last_frame_eval_prompt_v2(cap, intent, response) -> tuple[str, str]:
system_msg = (
"You are an expert in assessing the performance of a web navigation bot, "
"whose role is to help a human user navigate a website to complete a task. "
"You are given the user's intent, the web page snapshots "
"captions during the bot's execution and the bot's response to the user. "
"Your goal is to classfiy the bot's execution in the following three cases: "
"\n1. `task-possible-bot-success`: The bot successfully executed an achievable "
" task. "
"\n2. `task-possible-bot-fail`: The bot failed to execute an achievable task. "
"\n3. `task-impossible`: The task is unachievable in natural under the "
"condition. For example, the user wants to buy a product that does not exist "
"in a shopping website. "
)
prompt = f"""The user:'s intent {intent}
The last snapshot of the web page:
```md
{cap}
```
Bot response to the user: {response if response else "None"}.
Please reason step by step on what actions the bot may have taken, whether the final web page meets the user's requirement, etc. Note that the bot response may not be necessary for intents other than information seeking. Always try to provide an explicit reason to justify your prediction about the bot's execution status.
Format your thoughts and the final judgement in a valid json format:
{{
"thoughts": "YOUR THOUGHTS AND REASONING PROCESS",
"status": "task-possible-bot-success" or "task-possible-bot-fail" or "task-impossible"
}}"""
return prompt, system_msg
def build_naive_multi_frame_eval_prompt_v2(
caps, intent, response, num_frames=3
) -> tuple[str, str]:
num_frames = min(num_frames, len(caps))
frames_mention = (
"last screen snapshot"
if num_frames == 1
else f"last {num_frames} screen snapshots (from earlier to later)"
)
captions_str = "\n".join(
[
f"{idx+1}:\n```md\n{caption}\n```\n"
for idx, caption in enumerate(caps[-num_frames:])
]
)
system_msg = (
"You are an expert in assessing the performance of a web navigation bot, "
"whose role is to help a human user navigate a website to complete a task. "
"You are given the user's intent, the web page snapshots "
"captions during the bot's execution and the bot's response to the user. "
"Your goal is to classfiy the bot's execution in the following three cases: "
"\n1. `task-possible-bot-success`: The bot successfully executed an achievable "
" task. "
"\n2. `task-possible-bot-fail`: The bot failed to execute an achievable task. "
"\n3. `task-impossible`: The task is unachievable in natural under the "
"condition. For example, the user wants to buy a product that does not exist "
"in a shopping website.\n\n"
"Please reason step by step before giving the final judgement. Hints:\n"
"(1) Infer what actions the bot may have taken by inspecting the difference between successive snapshots. "
"(2) Take close look at the final snapshot in whether it meets the user's requirement or not. "
"(3) The bot response may not be necessary for intents other than information seeking. "
"(4) Always try to provide an explicit reason to justify your prediction about the bot's execution status. "
"\n\n"
"Format your thoughts and the final judgement in a valid json format: "
'{"thoughts": "YOUR THOUGHTS AND REASONING PROCESS", '
'"status": "task-possible-bot-success" or "task-possible-bot-fail" or "task-impossible"}'
)
prompt = f"""The user:'s intent: {intent}
The {frames_mention}:
```
{captions_str}
```
Bot response to the user: {response if response else "None"}.
Remember to follow all the hints and format your thoughts and the final judgement in a valid json format:
{{
"thoughts": "YOUR THOUGHTS AND REASONING PROCESS",
"status": "task-possible-bot-success" or "task-possible-bot-fail" or "task-impossible"
}}
"""
return prompt, system_msg
def build_obs_simplifier_prompt_v2(cap, intent, response) -> str:
system_msg = (
"You are an expert in extracting and summarizing key information from a "
"web page. You are given the user's intent, and a web page snapshot "
"caption, try to extract the most relevant information with the user intent "
"from the caption."
)
prompt = f"""User intent: {intent}
Caption of the webpage snapshot:
```md
{cap}
```
Give the summarization in Markdown format similar to the caption above."
"""
return prompt, system_msg
def build_naive_last_frame_4v_eval_prompt(intent, response) -> tuple[str, str]:
system_msg = (
"You are an expert in assessing the performance of a web navigation bot, "
"whose role is to help a human user navigate a website to complete a task. "
"You are given the user's intent, the web page snapshot at the end of bot's execution and the bot's response to the user. "
"Your goal is to claasify the bot's execution in the following three cases: "
"\n1. `task-possible-bot-success`: The bot successfully executed an achievable "
" task. "
"\n2. `task-possible-bot-fail`: The bot failed to execute an achievable task. "
"\n3. `task-impossible`: The task is unachievable in natural under the "
"condition. For example, the user wants to buy a product that does not exist "
"in a shopping website. \n"
)
prompt = f"""The user:'s intent {intent}
The last snapshot of the web page is shown in the image.
Bot response to the user: {response if response else "None"}.
Please reason step by step on what actions the bot may have taken, whether the final web page meets the user's requirement, etc. Note that the bot response may not be necessary for intents other than information seeking. Always try to provide an explicit reason to justify your prediction about the bot's execution status.
Format your thoughts and the final judgement in a valid json format:
{{
"thoughts": "YOUR THOUGHTS AND REASONING PROCESS",
"status": "task-possible-bot-success" or "task-possible-bot-fail" or "task-impossible"
}}
Do not use code blocks like ``` in your response but instead start your response with {{
"""
return prompt, system_msg
def build_final_eval_v1_prompt(cap, intent, response) -> tuple[str, str]:
"""
This version only takes
"""
system_msg = """You are a GUI trajectory evaluator. Given an user instruction and a description of the final state of the GUI, your goal is to classify the bot's execution into the following cases:
1. "task-impossible": The task is unachievable in nature. For example, if the goal is to buy a product that does not exist in a shopping website.
2. "task-possible-bot-success": The bot successfully executed an achievable task.
3. "task-possible-bot-fail": The bot failed to execute an achievable task.
Format your thoughts and the final judgement in json format:
{{
"thoughts": "<your thoughts and reasoning process>",
"status": "task-impossible", "task-possible-bot-success", or "task-possible-bot-fail"
}}
Following are some response examples:
{{
"thoughts": "The goal is "Buy a sandwich at Burger King's website". The last snapshot shows that the bot is at the checkout page of Burger King with a sandwich in the cart, which matches the user's goal. So the bot successfully executed the task.",
"status": "task-possible-bot-success",
}}
{{
"thoughts": "The goal is "Cancel the 8:00am clock". The last snapshot shows that the bot is at the alarm setting page. Since there's no alarm set at 8:00am from the snapshot, the bot has already finished the task. So the bot successfully executed the task.",
"status": "task-possible-bot-success",
}}
{{
"thoughts": "The goal is "What's the weather today in New York?". The last snapshot shows that the weather in New York in 70F. The bot's answer is also "70F". So the bot successfully executed the task.",
"status": "task-possible-bot-success",
}}
{{
"thoughts": "The goal is "Find John Doe's phone number. The last snapshot shows that the bot is at John Doe's contact page. However, since John's phone number is not included in his contact page, this task is impossible to execute in nature."
"status": "task-impossible",
}}
{{
"thoughts": "The goal is to send a post titled \"Sharing some photo's about my cat\" to the subreddit \"cat\". The last snapshot shows the bot at the dashboard of Reddit. Since there's no evidence that the bot completed the task, the bot failed to execute the task.",
"status": "task-possible-bot-fail",
}}
{{
"thoughts": "The goal is to send a WhatsApp message to Sam saying "Do you want to hang out?". The last snapshot shows the bot at the message page of Sam and sent the message "Do you want to stick around? around?". Since the bot did not send the correct message, the bot failed to execute the task.",
"status": "task-possible-bot-fail",
}}
"""
prompt = f"""Goal: {intent}
The final state of the GUI:
```md
{cap}
```
Bot response to the user: {response if response else "None"}.
"""
return prompt, system_msg
def build_final_eval_v2_per_state_info_extraction_prompt(
history, cap, prev_action, intent
) -> tuple[str, str]:
sys_prompt = """You are a GUI agent trajectory summarizer. Given an user instruction, descriptions of bot's trajectory, its last action, the current state of the GUI post-action, your goal is to update the trajectory description with the most relevant information in this action-state pair.
Format your response into two lines as shown below
Thoughts: <your thoughts and reasoning process>
Info: <the information you want to add>
Following are some examples:
---
Instruction: Buy a sandwich at Burger King's website.
History: None
Last Action: <Omitted>
Current State: <Omitted>
Response:
Thoughts: The current state shows that it has successfully made an order at Burger King.
Info: The bot successfully made an order.
---
Instruction: Set an alarm at 4am.
History:
1. The bot is at the homepage of Android.
2. The bot is at the clock app.
3. The bot is at the homepage.
Last Action: <Omitted>
Current State: <Omitted>
Response:
Thoughts: The current state shows the bot at the Contact app, which is not relevant to the instruction.
Info: Irrelevant.
---
Instruction: Set an alarm at 4am.
History:
1. The bot is at the homepage of Android.
2. The bot is at the clock app.
3. The bot is at the homepage.
Last Action: <Omitted>
Current State: <Omitted>
Response:
Thoughts: The current state shows the bot at the Contact app, which is not relevant to the instruction.
Info: Irrelevant.
---
Instruction: Write a post titled "Sharing some photo's about my cat" to the subreddit "cat".
History:
1. The bot is at subreddit "cat".
2. The bot opens a page about creating a post.
Last Action: <Omitted>
Current State: <Omitted>
Response:
Thoughts: From the current state, the bot has typed "Sharing some photo's about my cat" in the body part of the post.
Info: The bot types "Sharing some photo's about my cat" in the body part of the post.
"""
history_str = f"\n{history}" if history is not None else "None"
prompt = f"""Instruction: {intent}
History: {history_str}
Last Action: {prev_action}
Current State:
```md
{cap}
```"""
return prompt, sys_prompt
def extract_content(text, start_tag):
"""
Extract the content that follows 'Info:' in a given string.
:param text: A string that may contain lines starting with 'Info:'
:return: The content that follows 'Info:' or None if not found
"""
# Split the text into lines
lines = text.split("\n")
# Loop through each line to find a line that starts with 'Info:'
for line in lines:
if line.startswith(start_tag):
# Extract and return the content after 'Info:'
return line[len(start_tag) :].strip()
# Return None if 'Info:' is not found in any line
return ""
def build_final_eval_v2_final_prompt(
cap, intent, response, history, last_actions
) -> tuple[str, str]:
system_msg = """You are a GUI trajectory evaluator. Given a user instruction, descriptions of bot's history trajectory and the final state of the GUI, your goal is to classify the bot's execution into the following cases:
1. "success": The bot successfully completed the task.
2. "failure": The bot hasn't completed the task.
Format your response into two lines as shown below
Thoughts: <your thoughts and reasoning process>"
Status: "success" or "failure"
"""
prompt = f"""User instruction: {intent}
Action History:
{last_actions}
The detailed final state of the GUI:
```md
{cap}
```
Bot response to the user: {response if response else "None"}."""
return prompt, system_msg
def build_final_eval_v3_final_prompt(
cap, intent, response, last_actions
) -> tuple[str, str]:
system_msg = """You are an expert in evaluating the performance of a web navigation agent. The agent is designed to help a human user navigate a website to complete a task. Given the user's intent, the agent's action history, the final state of the webpage, and the agent's response to the user, your goal is to decide whether the agent's execution is successful or not.
There are three types of tasks:
1. Information seeking: The user wants to obtain certain information from the webpage, such as the information of a product, reviews, map info, comparison of map routes, etc. The bot's response must contain the information the user wants, or explicitly state that the information is not available. Otherwise, e.g. the bot encounters an exception and respond with the error content, the task is considered a failure. Besides, be careful about the sufficiency of the agent's actions. For example, when asked to list the top-searched items in a shop, the agent should order the items by the number of searches, and then return the top items. If the ordering action is missing, the task is likely to fail.
2. Site navigation: The user wants to navigate to a specific page. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response.
3. Content modification: The user wants to modify the content of a webpage or configuration. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response.
*IMPORTANT*
Format your response into two lines as shown below:
Thoughts: <your thoughts and reasoning process>"
Status: "success" or "failure"
"""
prompt = f"""User Intent: {intent}
Action History:
{last_actions}
The detailed final state of the webpage:
```md
{cap}
```
Bot response to the user: {response if response else "N/A"}."""
return prompt, system_msg
def build_final_eval_v3_final_prompt_gpt4v(
intent, response, last_actions
) -> tuple[str, str]:
system_msg = """You are an expert in evaluating the performance of a web navigation agent. The agent is designed to help a human user navigate a website to complete a task. Given the user's intent, the agent's action history, the final state of the webpage, and the agent's response to the user, your goal is to decide whether the agent's execution is successful or not.
There are three types of tasks:
1. Information seeking: The user wants to obtain certain information from the webpage, such as the information of a product, reviews, map info, comparison of map routes, etc. The bot's response must contain the information the user wants, or explicitly state that the information is not available. Otherwise, e.g. the bot encounters an exception and respond with the error content, the task is considered a failure. Besides, be careful about the sufficiency of the agent's actions. For example, when asked to list the top-searched items in a shop, the agent should order the items by the number of searches, and then return the top items. If the ordering action is missing, the task is likely to fail.
2. Site navigation: The user wants to navigate to a specific page. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response.
3. Content modification: The user wants to modify the content of a webpage or configuration. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response.
*IMPORTANT*
Format your response into two lines as shown below:
Thoughts: <your thoughts and reasoning process>
Status: "success" or "failure"
"""
prompt = f"""User Intent: {intent}
Action History:
{last_actions}
The last snapshot of the web page is shown in the image."""
return prompt, system_msg
def build_android_prompt_gpt4v(intent, response, last_actions) -> tuple[str, str]:
system_msg = """You are an expert in evaluating the performance of an android navigation agent. The agent is designed to help a human user navigate the device to complete a task. Given the user's intent, and the final state of the screen, your goal is to decide whether the agent has successfully completed the task or not.
*IMPORTANT*
Format your response into two lines as shown below:
Thoughts: <your thoughts and reasoning process>"
Status: "success" or "failure"
"""
prompt = f"""User Intent: {intent}
Action History:
{last_actions}
The last snapshot of the screen is shown in the image.
Bot response to the user: {response if response else "N/A"}.
"""
return prompt, system_msg
def A_build_android_prompt(cap, sec_cap, intent, response, last_actions) -> tuple[str, str]:
system_msg = """You are an expert in evaluating the performance of an android navigation agent. The agent is designed to help a human user navigate the device to complete a task. Given the user's intent, and the last two states of the screen, your goal is to decide whether the agent has successfully completed the task or not.
*IMPORTANT*
Format your response into two lines as shown below:
Thoughts: <your thoughts and reasoning process>"
Status: "success" or "failure"
"""
prompt = f"""User Intent: {intent}
Action History:
{last_actions}
The detailed second last state of the screen:
```md
{sec_cap}
```
The detailed final state of the screen:
```md
{cap}
```"""
return prompt, system_msg
def build_android_prompt(cap, sec_cap, intent, response, last_actions) -> tuple[str, str]:
system_msg = """You are an expert in evaluating the performance of an android navigation agent. The agent is designed to help a human user navigate the device to complete a task. Given the user's intent, and the state of the screen, your goal is to decide whether the agent has successfully completed the task or not.
*IMPORTANT*
Format your response into two lines as shown below:
Thoughts: <your thoughts and reasoning process>"
Status: "success" or "failure"
"""
prompt = f"""User Intent: {intent}
Action History:
{last_actions}
The detailed final state of the screen:
```md
{cap}
```"""
return prompt, system_msg
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/eval/metrics.py | agent_eval/agent_eval/eval/metrics.py | import os
import json
import pandas as pd
LABEL_CORRECTION = {
"24": False,
"201": False,
"225": False,
"247": False,
"390": False,
"435": False,
"466": False,
"677": False,
"678": False,
"679": False,
"680": False,
"752": False,
"792": False,
"793": False,
}
def calculate_performance(dataframe):
# Initialize the counts
TP = FP = TN = FN = 0
# Iterate through each column (each category)
for column in dataframe.columns:
gt, rm = dataframe[column]
if gt and rm:
TP += 1
elif not gt and not rm:
TN += 1
elif not gt and rm:
FP += 1
elif gt and not rm:
FN += 1
return TP, FP, TN, FN
def get_metrics_from_result_json(result_json_path):
data = pd.read_json(result_json_path)
# Calculate the performance metrics
classification_dict = {"TP": [], "TN": [], "FP": [], "FN": []}
with open(result_json_path, "r") as json_file:
json_data = json.load(json_file)
# Classify each entry
for key, values in json_data.items():
gt = values["gt"]
if key in LABEL_CORRECTION:
gt = LABEL_CORRECTION[key]
if gt and values["rm"]:
classification_dict["TP"].append(key)
elif not gt and not values["rm"]:
classification_dict["TN"].append(key)
elif not gt and values["rm"]:
classification_dict["FP"].append(key)
elif gt and not values["rm"]:
classification_dict["FN"].append(key)
# TP, FP, TN, FN = calculate_performance(data)
TP = len(classification_dict["TP"])
FP = len(classification_dict["FP"])
TN = len(classification_dict["TN"])
FN = len(classification_dict["FN"])
# Calculate Accuracy, Precision, Recall, and F1 Score
accuracy = (TP + TN) / (TP + FP + TN + FN)
precision = TP / (TP + FP) if (TP + FP) > 0 else 0
recall = TP / (TP + FN) if (TP + FN) > 0 else 0
f1_score = (
2 * (precision * recall) / (precision + recall)
if (precision + recall) > 0
else 0
)
performance_metrics = {
"True Positives": TP,
"False Positives": FP,
"True Negatives": TN,
"False Negatives": FN,
"Accuracy": accuracy,
"Precision": precision,
"Recall": recall,
"F1 Score": f1_score,
}
return performance_metrics, classification_dict
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/eval/annotate_app.py | agent_eval/agent_eval/eval/annotate_app.py | import gradio as gr
from matplotlib import pyplot as plt
import math
import io
from PIL import Image
from numpy import asarray
from agent_eval.domains.unified import UniTrajectoryDataset
import time
import json
from collections import defaultdict
import os
def main(dataset_abs_path, log_name):
annotation_log_path = dataset_abs_path + "evals/" + log_name + ".jsonl"
Dataset = UniTrajectoryDataset(dataset_abs_path, eval_log_names=[])
if not os.path.exists(annotation_log_path):
with open(annotation_log_path, "w") as f:
pass # Create an empty file if it doesn't exist
all_added_annotations = defaultdict(lambda: defaultdict())
with open(annotation_log_path, "r") as f:
for line in f:
ann = json.loads(line)
all_added_annotations[ann["task_uid"]][ann["user_uid"]] = ann
def downsample_img(img, scale=4):
return img.resize((int(img.width / scale), int(img.height / scale)))
def plot_images_in_grid(image_list, max_columns=3, down_scale=4):
# start_time = time.time()
# Determine the size of the grid
num_images = len(image_list)
num_rows = math.ceil(num_images / max_columns)
image_list = [downsample_img(img, down_scale) for img in image_list]
# Determine the size of the composite image
max_width = max(img.width for img in image_list)
max_height = max(img.height for img in image_list)
composite = Image.new("RGB", (max_columns * max_width, num_rows * max_height))
# Loop through the list of images and paste them into the composite image
for i, image in enumerate(image_list):
row = i // max_columns
col = i % max_columns
composite.paste(image, (col * max_width, row * max_height))
# Save or display the composite image as needed
# composite.show() # Un-comment to display the image
# print(f"Time to plot {num_images} images: {time.time() - start_time}")
return composite
def _is_un_annotated(task_idx):
task_uid = Dataset.idx_to_uid(task_idx)
return len(all_added_annotations[task_uid]) == 0
def submit_annotation_and_next(
task_idx,
task_uid,
user_uid,
annotation,
comment_box,
show_exisiting_annotations,
only_unannotated,
):
human_readable_time = time.strftime("%m-%d %H:%M:%S", time.localtime())
print(user_uid, task_idx, human_readable_time)
if user_uid == "" or user_uid == None:
gr.Error("Please enter your annotator ID")
return render_task(task_idx, user_uid, show_exisiting_annotations)
this_ann = {
"dataset_path": dataset_abs_path[:-1].split("/")[-1],
"task_idx": task_idx,
"task_uid": task_uid,
"user_uid": user_uid,
"annotation": annotation,
"comment": comment_box,
}
all_added_annotations[task_uid][user_uid] = this_ann
with open(annotation_log_path, "a") as f:
f.write(json.dumps(this_ann) + "\n")
for i in range(task_idx + 1, len(Dataset)):
if only_unannotated and not _is_un_annotated(i):
continue
return render_task(i, user_uid, show_exisiting_annotations)
def next_task(task_idx, user_uid, show_exisiting_annotations, only_unannotated):
for i in range(task_idx + 1, len(Dataset)):
if only_unannotated and not _is_un_annotated(i):
continue
return render_task(i, user_uid, show_exisiting_annotations)
def prev_task(task_idx, user_uid, show_exisiting_annotations, only_unannotated):
for i in range(task_idx - 1, -1, -1):
if only_unannotated and not _is_un_annotated(i):
continue
return render_task(i, user_uid, show_exisiting_annotations)
def render_task(task_idx, user_uid, show_exisiting_annotations):
# task_idx = Dataset.uid_to_idx(task_uid)
task = Dataset[task_idx]
task_uid = task["traj_name"]
task_goal = task["intent"]
if "web" in Dataset.dataset_path:
task_img = plot_images_in_grid(task["images"], max_columns=1, down_scale=2)
elif "android" in Dataset.dataset_path:
task_img = plot_images_in_grid(task["images"], max_columns=3, down_scale=2)
else:
task_img = plot_images_in_grid(task["images"])
comment = (
all_added_annotations[task_uid][user_uid]["comment"]
if user_uid in all_added_annotations[task_uid]
else ""
)
annotation = (
all_added_annotations[task_uid][user_uid]["annotation"]
if user_uid in all_added_annotations[task_uid]
else ""
)
annotation = "Failure"
existing_annootations = ""
if show_exisiting_annotations:
for uid, ann in all_added_annotations[task_uid].items():
existing_annootations += (
f"{uid}: {ann['annotation']} | {ann['comment']}\n"
)
else:
existing_annootations = "Not Visible"
agent_response = task["response"]
act_str = ""
for idx, act in enumerate(task["actions"]):
act_str += f"{idx+1}: {act}\n"
return (
task_idx,
task_uid,
task_goal,
task_img,
comment,
annotation,
existing_annootations,
agent_response,
act_str,
)
with gr.Blocks(title=dataset_abs_path) as demo:
with gr.Row():
with gr.Column(scale=1):
task_img = gr.Image(
label="Goal Image", interactive=False
) # managed by the app
with gr.Column(scale=1):
with gr.Group():
task_idx = gr.Number(
value=-1, label="Task Index", precision=0
) # managed by the app
total_task = gr.Number(
value=len(Dataset), label="Total task number", precision=0
) # managed by the app
task_uid = gr.Textbox(
lines=1, label="Task UID", interactive=False
) # managed by the app
task_goal = gr.Textbox(
lines=1, label="Goal", interactive=False
) # managed by the app
actions_box = gr.Textbox(
lines=10, label="Actions", interactive=False
)
agent_response_box = gr.Textbox(
lines=1, label="Agent Response", interactive=False
)
existing_annootations = gr.Textbox(
label="Existing Annotations", interactive=False
)
with gr.Group():
user_uid = gr.Textbox(
lines=1,
label="Annotator ID",
placeholder="Enter your annotator ID",
)
comment_box = gr.Textbox(lines=1, label="Optional Comments")
annotation = gr.Dropdown(
value="Failure",
choices=["Success", "Failure", "Unsure", "Emulator Error"],
label="Annotation",
)
show_exisiting_annotations = gr.Checkbox(
value=True, label="Show Existing Annotations"
)
only_unannotated = gr.Checkbox(
value=True, label="Only Show Unannotated Task"
)
with gr.Group():
submit_and_next = gr.Button(value="Submit and Next Task")
# submit = gr.Button(value="Submit Annotation")
next = gr.Button(value="Skip to Next Task")
prev = gr.Button(value="Previous Task")
goto = gr.Button(value="Go to Task Index")
show_exisiting_annotations.change(
render_task,
inputs=[task_idx, user_uid, show_exisiting_annotations],
outputs=[
task_idx,
task_uid,
task_goal,
task_img,
comment_box,
annotation,
existing_annootations,
agent_response_box,
actions_box,
],
)
submit_and_next.click(
submit_annotation_and_next,
inputs=[
task_idx,
task_uid,
user_uid,
annotation,
comment_box,
show_exisiting_annotations,
only_unannotated,
],
outputs=[
task_idx,
task_uid,
task_goal,
task_img,
comment_box,
annotation,
existing_annootations,
agent_response_box,
actions_box,
],
)
next.click(
next_task,
inputs=[
task_idx,
user_uid,
show_exisiting_annotations,
only_unannotated,
],
outputs=[
task_idx,
task_uid,
task_goal,
task_img,
comment_box,
annotation,
existing_annootations,
agent_response_box,
actions_box,
],
)
prev.click(
prev_task,
inputs=[
task_idx,
user_uid,
show_exisiting_annotations,
only_unannotated,
],
outputs=[
task_idx,
task_uid,
task_goal,
task_img,
comment_box,
annotation,
existing_annootations,
agent_response_box,
actions_box,
],
)
goto.click(
render_task,
inputs=[task_idx, user_uid, show_exisiting_annotations],
outputs=[
task_idx,
task_uid,
task_goal,
task_img,
comment_box,
annotation,
existing_annootations,
agent_response_box,
actions_box,
],
)
demo.launch(share=True)
"""
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/ios_traj/unified/android-cogagent-v0/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/unified_datasets/ios80-cogagent-v0/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/ios_traj/unified/ios20-zeroshot-v0/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/unified_datasets/ios20-selftrain-v0/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/filteredbc_jan24/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/filteredbc_jan24/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/zeroshot_deterministic_jan28/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/gpt4_deterministic_jan28/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/output_autoui_large/ --log_name v0
# iOS
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/ios_exps/zeroshot_deterministic_jan28/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/ios_exps/selftrain_jan28/ --log_name v0
python annotate_app.py --dataset /home/<user>/data/GUI_Proj/ios_exps/mistral_jan29/ --log_name v0
"""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--log_name", type=str, default="v0")
args = parser.parse_args()
main(args.dataset, args.log_name)
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/eval/annotator.py | agent_eval/agent_eval/eval/annotator.py | import json
import os
from typing import Any, List, Tuple
from termcolor import cprint
sys_prompt_v1_icl = """You are a GUI Trajectory Evaluator. Your task is to observe a bot's action within a graphical user interface (GUI) and classify its behavior into one of four categories based on its progress towards a specified goal. The categories are:
1. "towards-the-goal" - The bot is moving closer to achieving the goal.
2. "not-sure" - It's unclear if the bot's actions are helping reach the goal.
3. "goal-reached" - The bot has successfully completed the goal.
4. "away-from-the-goal" - The bot's actions are diverting it from the goal.
Please format your response as follows:
Thoughts: [Explain your reasoning here]
Response: "towards-the-goal", "not-sure", "goal-reached", or "away-from-the-goal"
Here are some example responses:
---
Example 1:
Thoughts: The goal is to 'set an alarm at 8:00 am.' Initially, the bot is on the home screen. After a tap action, it navigates to the alarm app, indicating progress towards the goal.
Response: "towards-the-goal"
Example 2:
Thoughts: The goal is to 'buy the latest iPhone on Amazon.' The bot starts at the checkout page on Amazon. After a tap action, the screen shows a successful purchase, signifying that the goal has been reached.
Response: "goal-reached"
Example 3:
Thoughts: The goal is to 'show me the weather in New York.' The bot begins on London's weather page. After pressing 'home', it returns to the home screen, moving away from the goal.
Response: "away-from-the-goal"
Example 4:
Thoughts: The goal is to 'buy some coffee on the Starbucks app.' The bot begins on the Amazon app. After pressing 'back,' it moves to the home screen, which is a prerequisite for opening the Starbucks app.
Response: "towards-the-goal"
Example 5:
Thoughts: The goal is to 'open YouTube.' The bot begins on the home screen. After a swipe, it appears to remain on the same page, suggesting no progress towards the goal.
Response: "not-sure"
Note:
You should be extra-careful when assigning "goal-reached" or "towards-the-goal" labels. If you are unsure, please select "not-sure" instead.
---
"""
def built_user_prompt_v1(intent, current_state, action, next_state):
return f"""Goal: {intent}
Original State:
```md
{current_state}
```
State after action: "{action}":
```md
{next_state}
```"""
class Annotator:
def __init__(self, lm_clients, log_save_path=None):
self.lm_clients = lm_clients
self.log_save_path = log_save_path
def __call__(self, info, client, version):
assert (
client in self.lm_clients
), f"Client {client} not found in {self.lm_clients.keys()}"
client = self.lm_clients[client]
if version == "v1":
return self.annotate_v1(info, client)
def annotate_v1(self, info, client):
"""
Given a series of trajectory, return if each of the state-action pair is towards the goal.
"""
"""
How this works:
Given a
state -> action -> next_state pairs
We classify it into:
1. it is towards the goal
2. not sure
3. it completes the task
4. it doesn't lead us closer to the goal
"""
intent = info["intent"]
results = []
sys_prompt = sys_prompt_v1_icl
# cprint(sys_prompt, "blue")
for idx in range(len(info["captions"])):
current_state = info["captions"][idx]
if idx == len(info["captions"]) - 1:
"If it's the last one, the next step is termination"
next_state = "Terminated"
else:
next_state = info["captions"][idx + 1]
action = info["actions"][idx]
user_prompt = built_user_prompt_v1(
intent, current_state, action, next_state
)
# out, _ = client.one_step_chat(text=user_prompt, system_msg=sys_prompt)
extended_user_prompt = f"{sys_prompt}\n\n{user_prompt}"
out, _ = client.one_step_chat(text=extended_user_prompt)
# cprint(user_prompt, "green")
# cprint(out, "yellow")
results.append(out)
return results
| python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Berkeley-NLP/Agent-Eval-Refine | https://github.com/Berkeley-NLP/Agent-Eval-Refine/blob/0eef6eba80d91d99d99133c319f464d7adf47abe/agent_eval/agent_eval/eval/evaluator.py | agent_eval/agent_eval/eval/evaluator.py | import json
import os
from typing import Any, List, Tuple
from termcolor import cprint
from agent_eval.eval.prompts import *
class Evaluator:
def __init__(self, lm_clients, log_save_path=None):
self.lm_clients = lm_clients
self.log_save_path = log_save_path
def __call__(self, info, client="gpt-3.5", version="naive"):
assert (
client in self.lm_clients
), f"Client {client} not found in {self.lm_clients.keys()}"
if version == "final-v3":
eval_info, eval_str, prompt = self.final_eval_v3(info, client)
elif version == "final-v3-gpt4v":
eval_info, eval_str, prompt = self.final_eval_v3_gpt4v(info, client)
elif version == "android":
eval_info, eval_str, prompt = self.eval_android(info, client)
elif version == "android-gpt4v":
eval_info, eval_str, prompt = self.eval_android_gpt4v(info, client)
elif version == "naive-last-frame-4v":
eval_info, eval_str, prompt = self.naive_last_frame_eval_4v(info, client)
else:
raise NotImplementedError(f"Version {version} not implemented")
if self.log_save_path:
with open(self.log_save_path + "/outputs.jsons", "a") as f:
f.write(
json.dumps(
{
"id": info["traj_name"],
"eval_info": eval_info,
}
)
+ "\n"
)
with open(f"{self.log_save_path}/{info['traj_name']}.md", "w") as md_file:
md_file.write(f"## Intent\n\n{info['intent']}\n\n")
md_file.write(f"## RM\n\n{eval_str}\n\n")
md_file.write(f"## Final Response {info['response']}\n\n")
if "captions" in info and info['captions'] is not None:
md_file.write("## Captions\n\n")
for idx, cap in enumerate(info["captions"]):
md_file.write(f"===============")
md_file.write(f"{cap}\n")
md_file.write("\n## Images\n\n")
for idx, img in enumerate(info["image_paths"]):
rel_img_path = os.path.relpath(img, self.log_save_path)
md_file.write(f"\n")
if "config" in info:
md_file.write("## Config\n\n")
cofig_str = json.dumps(info["config"], indent=4)
md_file.write(f"```json\n{cofig_str}\n```\n")
if prompt:
md_file.write("## Main Prompt\n\n")
md_file.write(f"```md\n{prompt}\n```\n")
return eval_info, prompt
def naive_last_frame_eval_4v(self, info, client):
assert client == "gpt-4v"
prompt, sys_msg = build_naive_last_frame_4v_eval_prompt(
info["intent"],
info["response"] if info["response"] else "None",
)
img = info["images"][-1]
lm_client = self.lm_clients[client]
msg_str, _ = lm_client.one_step_chat(
prompt, img, system_msg=sys_msg, json_mode=False
)
msg_dict = json.loads(msg_str)
return msg_dict, msg_str, prompt
def final_eval_v3(self, info, client):
response = info["response"] if info["response"] else "None"
lm_client = self.lm_clients[client]
action_history = ""
for idx, act in enumerate(info["actions"]):
action_history += f"{idx+1}: {act}\n"
prompt, sys_msg = build_final_eval_v3_final_prompt(
info["captions"][-1], info["intent"], response, action_history
)
# lm_client = self.lm_clients["gpt-4"]
msg_str, _ = lm_client.one_step_chat(prompt, system_msg=sys_msg)
msg_dict = {
"thoughts": extract_content(msg_str, "Thoughts:"),
"status": extract_content(msg_str, "Status:").replace('"', ""),
}
return msg_dict, msg_str, prompt
def final_eval_v3_gpt4v(self, info, client):
assert client == "gpt-4v"
action_history = ""
for idx, act in enumerate(info["actions"]):
action_history += f"{idx+1}: {act}\n"
prompt, sys_msg = build_final_eval_v3_final_prompt_gpt4v(
info["intent"], info["response"], action_history
)
img = info["images"][-1]
lm_client = self.lm_clients[client]
msg_str, _ = lm_client.one_step_chat(
prompt, img, system_msg=sys_msg, json_mode=False
)
del info["images"]
# msg_dict = json.loads(msg_str)
msg_dict = {
"thoughts": extract_content(msg_str, "Thoughts:"),
"status": extract_content(msg_str, "Status:").replace('"', ""),
}
return msg_dict, msg_str, prompt
def eval_android_gpt4v(self, info, client):
assert client == "gpt-4v"
action_history = ""
for idx, act in enumerate(info["actions"]):
action_history += f"{idx+1}: {act}\n"
prompt, sys_msg = build_android_prompt_gpt4v(
info["intent"], info["response"], action_history
)
img = {"image":info["images"][-1], "detail": "high"}
# if len(info["images"]) >= 2:
# sec_img = {"image":info["images"][-2], "detail": "high"}
# image_input = [sec_img, img]
# else:
# image_input = [img]
image_input = [img]
lm_client = self.lm_clients[client]
# msg_str, _ = lm_client.one_step_chat(
# prompt, img, system_msg=sys_msg, json_mode=False
# )
msg_str, _ = lm_client.one_step_multi_image_chat(
prompt, image_input, system_msg=sys_msg, json_mode=False
)
# msg_dict = json.loads(msg_str)
msg_dict = {
"thoughts": extract_content(msg_str, "Thoughts:"),
"status": extract_content(msg_str, "Status:").replace('"', ""),
}
return msg_dict, msg_str, prompt
def eval_android(self, info, client):
response = info["response"] if info["response"] else "None"
lm_client = self.lm_clients[client]
action_history = ""
for idx, act in enumerate(info["actions"]):
action_history += f"{idx+1}: {act}\n"
prompt, sys_msg = build_android_prompt(
info["captions"][-1], info['captions'][-2] if len(info['captions']) > 1 else None,
info["intent"], response, action_history
)
msg_str, _ = lm_client.one_step_chat(prompt, system_msg=sys_msg)
msg_dict = {
"thoughts": extract_content(msg_str, "Thoughts:"),
"status": extract_content(msg_str, "Status:").replace('"', ""),
}
return msg_dict, msg_str, prompt | python | BSD-3-Clause | 0eef6eba80d91d99d99133c319f464d7adf47abe | 2026-01-05T07:13:34.281805Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.