repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/config/__init__.py | pyasic/config/__init__.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from typing import Any
from pydantic import BaseModel, Field
from pyasic.config.fans import (
FanModeConfig,
FanModeImmersion,
FanModeManual,
FanModeNormal,
)
from pyasic.config.mining import (
MiningModeConfig,
MiningModeHashrateTune,
MiningModeHPM,
MiningModeLPM,
MiningModeManual,
MiningModeNormal,
MiningModePowerTune,
MiningModePreset,
MiningModeSleep,
)
# Type aliases for config field types
FanModeType = FanModeNormal | FanModeManual | FanModeImmersion | FanModeConfig
MiningModeType = (
MiningModeNormal
| MiningModeHPM
| MiningModeLPM
| MiningModeSleep
| MiningModeManual
| MiningModePowerTune
| MiningModeHashrateTune
| MiningModePreset
| MiningModeConfig
)
from pyasic.config.mining.scaling import ScalingConfig
from pyasic.config.pools import PoolConfig
from pyasic.config.temperature import TemperatureConfig
from pyasic.misc import merge_dicts
class MinerConfig(BaseModel):
"""Represents the configuration for a miner including pool configuration,
fan mode, temperature settings, mining mode, and power scaling."""
class Config:
arbitrary_types_allowed = True
pools: PoolConfig = Field(default_factory=PoolConfig.default)
fan_mode: FanModeType = Field(default_factory=FanModeConfig.default)
temperature: TemperatureConfig = Field(default_factory=TemperatureConfig.default)
mining_mode: MiningModeType = Field(default_factory=MiningModeConfig.default)
def __getitem__(self, item: str) -> Any:
try:
return getattr(self, item)
except AttributeError:
raise KeyError
def as_dict(self) -> dict:
"""Converts the MinerConfig object to a dictionary."""
return self.model_dump()
def as_am_modern(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for modern Antminers."""
return {
**self.fan_mode.as_am_modern(),
"freq-level": "100",
**self.mining_mode.as_am_modern(),
**self.pools.as_am_modern(user_suffix=user_suffix),
**self.temperature.as_am_modern(),
}
def as_hiveon_modern(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for modern Hiveon."""
return {
**self.fan_mode.as_hiveon_modern(),
"freq-level": "100",
**self.mining_mode.as_hiveon_modern(),
**self.pools.as_hiveon_modern(user_suffix=user_suffix),
**self.temperature.as_hiveon_modern(),
}
def as_elphapex(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for modern Elphapex."""
return {
**self.fan_mode.as_elphapex(),
"fc-freq-level": "100",
**self.mining_mode.as_elphapex(),
**self.pools.as_elphapex(user_suffix=user_suffix),
**self.temperature.as_elphapex(),
}
def as_wm(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for Whatsminers."""
return {
**self.fan_mode.as_wm(),
**self.mining_mode.as_wm(),
**self.pools.as_wm(user_suffix=user_suffix),
**self.temperature.as_wm(),
}
def as_btminer_v3(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for Whatsminers running BTMiner V3."""
return {
"set.miner.pools": self.pools.as_btminer_v3(),
**self.mining_mode.as_btminer_v3(),
}
def as_am_old(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for old versions of Antminers."""
return {
**self.fan_mode.as_am_old(),
**self.mining_mode.as_am_old(),
**self.pools.as_am_old(user_suffix=user_suffix),
**self.temperature.as_am_old(),
}
def as_goldshell(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for Goldshell miners."""
return {
**self.fan_mode.as_goldshell(),
**self.mining_mode.as_goldshell(),
**self.pools.as_goldshell(user_suffix=user_suffix),
**self.temperature.as_goldshell(),
}
def as_avalon(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for Avalonminers."""
return {
**self.fan_mode.as_avalon(),
**self.mining_mode.as_avalon(),
**self.pools.as_avalon(user_suffix=user_suffix),
**self.temperature.as_avalon(),
}
def as_inno(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for Innosilicon miners."""
return {
**self.fan_mode.as_inno(),
**self.mining_mode.as_inno(),
**self.pools.as_inno(user_suffix=user_suffix),
**self.temperature.as_inno(),
}
def as_bosminer(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the bosminer.toml format."""
return {
**merge_dicts(self.fan_mode.as_bosminer(), self.temperature.as_bosminer()),
**self.mining_mode.as_bosminer(),
**self.pools.as_bosminer(user_suffix=user_suffix),
}
def as_boser(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for BOSer."""
return {
**self.fan_mode.as_boser(),
**self.temperature.as_boser(),
**self.mining_mode.as_boser(),
**self.pools.as_boser(user_suffix=user_suffix),
}
def as_epic(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for ePIC miners."""
return {
**merge_dicts(self.fan_mode.as_epic(), self.temperature.as_epic()),
**self.mining_mode.as_epic(),
**self.pools.as_epic(user_suffix=user_suffix),
}
def as_auradine(self, user_suffix: str | None = None) -> dict:
"""Generates the configuration in the format suitable for Auradine miners."""
return {
**self.fan_mode.as_auradine(),
**self.temperature.as_auradine(),
**self.mining_mode.as_auradine(),
**self.pools.as_auradine(user_suffix=user_suffix),
}
def as_mara(self, user_suffix: str | None = None) -> dict:
return {
**self.fan_mode.as_mara(),
**self.temperature.as_mara(),
**self.mining_mode.as_mara(),
**self.pools.as_mara(user_suffix=user_suffix),
}
def as_espminer(self, user_suffix: str | None = None) -> dict:
return {
**self.fan_mode.as_espminer(),
**self.temperature.as_espminer(),
**self.mining_mode.as_espminer(),
**self.pools.as_espminer(user_suffix=user_suffix),
}
def as_luxos(self, user_suffix: str | None = None) -> dict:
return {
**self.fan_mode.as_luxos(),
**self.temperature.as_luxos(),
**self.mining_mode.as_luxos(),
**self.pools.as_luxos(user_suffix=user_suffix),
}
def as_vnish(self, user_suffix: str | None = None) -> dict:
main_cfg = {
"miner": {
**self.fan_mode.as_vnish(),
**self.temperature.as_vnish(),
**self.mining_mode.as_vnish(),
**self.pools.as_vnish(user_suffix=user_suffix),
}
}
if isinstance(self.fan_mode, FanModeNormal):
main_cfg["miner"]["cooling"]["mode"]["param"] = self.temperature.target
return main_cfg
def as_hammer(self, *args, **kwargs) -> dict:
return self.as_am_modern(*args, **kwargs)
@classmethod
def from_dict(cls, dict_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from a dictionary."""
return cls(
pools=PoolConfig.from_dict(dict_conf.get("pools")),
mining_mode=MiningModeConfig.from_dict(dict_conf.get("mining_mode")),
fan_mode=FanModeConfig.from_dict(dict_conf.get("fan_mode")),
temperature=TemperatureConfig.from_dict(dict_conf.get("temperature")),
)
@classmethod
def from_api(cls, api_pools: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from API pool data."""
return cls(pools=PoolConfig.from_api(api_pools))
@classmethod
def from_am_modern(cls, web_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for modern Antminers."""
return cls(
pools=PoolConfig.from_am_modern(web_conf),
mining_mode=MiningModeConfig.from_am_modern(web_conf),
fan_mode=FanModeConfig.from_am_modern(web_conf),
)
@classmethod
def from_hiveon_modern(cls, web_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for Hiveon."""
return cls(
pools=PoolConfig.from_hiveon_modern(web_conf),
mining_mode=MiningModeConfig.from_hiveon_modern(web_conf),
fan_mode=FanModeConfig.from_hiveon_modern(web_conf),
)
@classmethod
def from_elphapex(cls, web_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for modern Antminers."""
return cls(
pools=PoolConfig.from_elphapex(web_conf),
mining_mode=MiningModeConfig.from_elphapex(web_conf),
fan_mode=FanModeConfig.from_elphapex(web_conf),
)
@classmethod
def from_am_old(cls, web_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for old versions of Antminers."""
return cls.from_am_modern(web_conf)
@classmethod
def from_goldshell(cls, web_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for Goldshell miners."""
return cls(pools=PoolConfig.from_am_modern(web_conf))
@classmethod
def from_goldshell_list(cls, web_conf: list) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for Goldshell miners."""
return cls(pools=PoolConfig.from_goldshell(web_conf))
@classmethod
def from_goldshell_byte(cls, web_conf: list) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for Goldshell Byte miners."""
return cls(pools=PoolConfig.from_goldshell_byte(web_conf))
@classmethod
def from_inno(cls, web_pools: list) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for Innosilicon miners."""
return cls(pools=PoolConfig.from_inno(web_pools))
@classmethod
def from_bosminer(cls, toml_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from the bosminer.toml file, same as the `as_bosminer` dumps a dict for writing to that file as toml."""
return cls(
pools=PoolConfig.from_bosminer(toml_conf),
mining_mode=MiningModeConfig.from_bosminer(toml_conf),
fan_mode=FanModeConfig.from_bosminer(toml_conf),
temperature=TemperatureConfig.from_bosminer(toml_conf),
)
@classmethod
def from_boser(cls, grpc_miner_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from gRPC configuration for BOSer."""
return cls(
pools=PoolConfig.from_boser(grpc_miner_conf),
mining_mode=MiningModeConfig.from_boser(grpc_miner_conf),
fan_mode=FanModeConfig.from_boser(grpc_miner_conf),
temperature=TemperatureConfig.from_boser(grpc_miner_conf),
)
@classmethod
def from_epic(cls, web_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for ePIC miners."""
return cls(
pools=PoolConfig.from_epic(web_conf),
fan_mode=FanModeConfig.from_epic(web_conf),
temperature=TemperatureConfig.from_epic(web_conf),
mining_mode=MiningModeConfig.from_epic(web_conf),
)
@classmethod
def from_vnish(
cls, web_settings: dict, web_presets: list[dict], web_perf_summary: dict
) -> "MinerConfig":
"""Constructs a MinerConfig object from web settings for VNish miners."""
return cls(
pools=PoolConfig.from_vnish(web_settings),
fan_mode=FanModeConfig.from_vnish(web_settings),
temperature=TemperatureConfig.from_vnish(web_settings),
mining_mode=MiningModeConfig.from_vnish(
web_settings, web_presets, web_perf_summary
),
)
@classmethod
def from_auradine(cls, web_conf: dict) -> "MinerConfig":
"""Constructs a MinerConfig object from web configuration for Auradine miners."""
return cls(
pools=PoolConfig.from_api(web_conf["pools"]),
fan_mode=FanModeConfig.from_auradine(web_conf["fan"]),
mining_mode=MiningModeConfig.from_auradine(web_conf["mode"]),
)
@classmethod
def from_mara(cls, web_miner_config: dict) -> "MinerConfig":
return cls(
pools=PoolConfig.from_mara(web_miner_config),
fan_mode=FanModeConfig.from_mara(web_miner_config),
mining_mode=MiningModeConfig.from_mara(web_miner_config),
)
@classmethod
def from_espminer(cls, web_system_info: dict) -> "MinerConfig":
return cls(
pools=PoolConfig.from_espminer(web_system_info),
fan_mode=FanModeConfig.from_espminer(web_system_info),
)
@classmethod
def from_iceriver(cls, web_userpanel: dict) -> "MinerConfig":
return cls(
pools=PoolConfig.from_iceriver(web_userpanel),
)
@classmethod
def from_luxos(
cls,
rpc_tempctrl: dict,
rpc_fans: dict,
rpc_pools: dict,
rpc_groups: dict,
rpc_config: dict,
rpc_profiles: dict,
) -> "MinerConfig":
return cls(
temperature=TemperatureConfig.from_luxos(rpc_tempctrl=rpc_tempctrl),
fan_mode=FanModeConfig.from_luxos(
rpc_tempctrl=rpc_tempctrl, rpc_fans=rpc_fans
),
pools=PoolConfig.from_luxos(rpc_pools=rpc_pools, rpc_groups=rpc_groups),
mining_mode=MiningModeConfig.from_luxos(
rpc_config=rpc_config, rpc_profiles=rpc_profiles
),
)
@classmethod
def from_hammer(cls, *args, **kwargs) -> "MinerConfig":
return cls.from_am_modern(*args, **kwargs)
@classmethod
def from_btminer_v3(
cls, rpc_pools: dict, rpc_settings: dict, rpc_device_info: dict
) -> "MinerConfig":
return cls(
pools=PoolConfig.from_btminer_v3(rpc_pools=rpc_pools["msg"]),
mining_mode=MiningModeConfig.from_btminer_v3(
rpc_device_info=rpc_device_info, rpc_settings=rpc_settings
),
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/config/base.py | pyasic/config/base.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
from enum import Enum
from typing import Any
from pydantic import BaseModel
class MinerConfigOption(Enum):
@classmethod
def from_dict(cls, dict_conf: dict | None):
return cls.default()
def as_am_modern(self) -> dict:
return self.value.as_am_modern()
def as_hiveon_modern(self) -> dict:
return self.value.as_hiveon_modern()
def as_am_old(self) -> dict:
return self.value.as_am_old()
def as_wm(self) -> dict:
return self.value.as_wm()
def as_inno(self) -> dict:
return self.value.as_inno()
def as_goldshell(self) -> dict:
return self.value.as_goldshell()
def as_avalon(self) -> dict:
return self.value.as_avalon()
def as_bosminer(self) -> dict:
return self.value.as_bosminer()
def as_boser(self) -> dict:
return self.value.as_boser
def as_epic(self) -> dict:
return self.value.as_epic()
def as_vnish(self) -> dict:
return self.value.as_vnish()
def as_auradine(self) -> dict:
return self.value.as_auradine()
def as_mara(self) -> dict:
return self.value.as_mara()
def as_espminer(self) -> dict:
return self.value.as_espminer()
def as_luxos(self) -> dict:
return self.value.as_luxos()
def as_elphapex(self) -> dict:
return self.value.as_elphapex()
def __call__(self, *args, **kwargs):
return self.value(*args, **kwargs)
@classmethod
def default(cls):
pass
def __getitem__(self, item):
try:
return getattr(self, item)
except AttributeError:
raise KeyError
class MinerConfigValue(BaseModel):
@classmethod
def from_dict(cls, dict_conf: dict):
return cls()
def as_dict(self) -> dict:
return self.model_dump()
def as_am_modern(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_hiveon_modern(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_am_old(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_wm(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_btminer_v3(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_inno(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_goldshell(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_avalon(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_bosminer(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_boser(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_epic(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_vnish(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_auradine(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_mara(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_espminer(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_luxos(self, *args: Any, **kwargs: Any) -> Any:
return {}
def as_elphapex(self, *args: Any, **kwargs: Any) -> Any:
return {}
def __getitem__(self, item):
try:
return getattr(self, item)
except AttributeError:
raise KeyError
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/config/pools.py | pyasic/config/pools.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import random
import string
from typing import Any
from pydantic import Field
from pyasic.config.base import MinerConfigValue
from pyasic.web.braiins_os.proto.braiins.bos.v1 import (
PoolConfiguration,
PoolGroupConfiguration,
Quota,
SaveAction,
SetPoolGroupsRequest,
)
class Pool(MinerConfigValue):
url: str
user: str
password: str
def as_am_modern(self, user_suffix: str | None = None) -> dict:
return {
"url": self.url,
"user": f"{self.user}{user_suffix or ''}",
"pass": self.password,
}
def as_hiveon_modern(self, user_suffix: str | None = None) -> dict:
return {
"url": self.url,
"user": f"{self.user}{user_suffix or ''}",
"pass": self.password,
}
def as_elphapex(self, user_suffix: str | None = None) -> dict:
return {
"url": self.url,
"user": f"{self.user}{user_suffix or ''}",
"pass": self.password,
}
def as_wm(self, idx: int = 1, user_suffix: str | None = None) -> dict:
return {
f"pool_{idx}": self.url,
f"worker_{idx}": f"{self.user}{user_suffix or ''}",
f"passwd_{idx}": self.password,
}
def as_btminer_v3(self, user_suffix: str | None = None) -> dict:
return {
"pool": self.url,
"worker": f"{self.user}{user_suffix or ''}",
"passwd": self.password,
}
def as_am_old(
self, *args: Any, user_suffix: str | None = None, **kwargs: Any
) -> dict:
idx = args[0] if args else kwargs.get("idx", 1)
return {
f"_ant_pool{idx}url": self.url,
f"_ant_pool{idx}user": f"{self.user}{user_suffix or ''}",
f"_ant_pool{idx}pw": self.password,
}
def as_goldshell(self, user_suffix: str | None = None) -> dict:
return {
"url": self.url,
"user": f"{self.user}{user_suffix or ''}",
"pass": self.password,
}
def as_avalon(self, user_suffix: str | None = None) -> str:
return ",".join([self.url, f"{self.user}{user_suffix or ''}", self.password])
def as_inno(
self, *args: Any, user_suffix: str | None = None, **kwargs: Any
) -> dict:
idx = args[0] if args else kwargs.get("idx", 1)
return {
f"Pool{idx}": self.url,
f"UserName{idx}": f"{self.user}{user_suffix or ''}",
f"Password{idx}": self.password,
}
def as_bosminer(self, user_suffix: str | None = None) -> dict:
return {
"url": self.url,
"user": f"{self.user}{user_suffix or ''}",
"password": self.password,
}
def as_auradine(self, user_suffix: str | None = None) -> dict:
return {
"url": self.url,
"user": f"{self.user}{user_suffix or ''}",
"pass": self.password,
}
def as_epic(self, user_suffix: str | None = None) -> dict:
return {
"pool": self.url,
"login": f"{self.user}{user_suffix or ''}",
"password": self.password,
}
def as_mara(self, user_suffix: str | None = None) -> dict:
return {
"url": self.url,
"user": f"{self.user}{user_suffix or ''}",
"pass": self.password,
}
def as_espminer(self, user_suffix: str | None = None) -> dict:
return {
"stratumURL": self.url,
"stratumUser": f"{self.user}{user_suffix or ''}",
"stratumPassword": self.password,
}
def as_boser(self, user_suffix: str | None = None) -> PoolConfiguration:
return PoolConfiguration(
url=self.url,
user=f"{self.user}{user_suffix or ''}",
password=self.password,
enabled=True,
)
def as_vnish(self, user_suffix: str | None = None) -> dict:
return {
"url": self.url,
"user": f"{self.user}{user_suffix or ''}",
"pass": self.password,
}
@classmethod
def from_dict(cls, dict_conf: dict | None) -> Pool:
if dict_conf is None:
raise ValueError("dict_conf cannot be None")
return cls(
url=dict_conf["url"], user=dict_conf["user"], password=dict_conf["password"]
)
@classmethod
def from_api(cls, api_pool: dict) -> Pool:
return cls(url=api_pool["URL"], user=api_pool["User"], password="x")
@classmethod
def from_btminer_v3(cls, api_pool: dict) -> Pool:
return cls(url=api_pool["url"], user=api_pool["account"], password="x")
@classmethod
def from_epic(cls, api_pool: dict) -> Pool:
return cls(
url=api_pool["pool"], user=api_pool["login"], password=api_pool["password"]
)
@classmethod
def from_am_modern(cls, web_pool: dict) -> Pool:
return cls(
url=web_pool["url"], user=web_pool["user"], password=web_pool["pass"]
)
@classmethod
def from_hiveon_modern(cls, web_pool: dict) -> Pool:
return cls(
url=web_pool["url"], user=web_pool["user"], password=web_pool["pass"]
)
@classmethod
def from_elphapex(cls, web_pool: dict) -> Pool:
return cls(
url=web_pool["url"], user=web_pool["user"], password=web_pool["pass"]
)
# TODO: check if this is accurate, user/username, pass/password
@classmethod
def from_goldshell(cls, web_pool: dict) -> Pool:
return cls(
url=web_pool["url"], user=web_pool["user"], password=web_pool["pass"]
)
@classmethod
def from_inno(cls, web_pool: dict) -> Pool:
return cls(
url=web_pool["url"], user=web_pool["user"], password=web_pool["pass"]
)
@classmethod
def from_bosminer(cls, toml_pool_conf: dict) -> Pool:
return cls(
url=toml_pool_conf["url"],
user=toml_pool_conf["user"],
password=toml_pool_conf.get("password", ""),
)
@classmethod
def from_vnish(cls, web_pool: dict) -> Pool:
return cls(
url="stratum+tcp://" + web_pool["url"],
user=web_pool["user"],
password=web_pool["pass"],
)
@classmethod
def from_boser(cls, grpc_pool: dict) -> Pool:
return cls(
url=grpc_pool["url"],
user=grpc_pool["user"],
password=grpc_pool["password"],
)
@classmethod
def from_mara(cls, web_pool: dict) -> Pool:
return cls(
url=web_pool["url"],
user=web_pool["user"],
password=web_pool["pass"],
)
@classmethod
def from_espminer(cls, web_system_info: dict) -> Pool:
url = f"stratum+tcp://{web_system_info['stratumURL']}:{web_system_info['stratumPort']}"
return cls(
url=url,
user=web_system_info["stratumUser"],
password=web_system_info.get("stratumPassword", ""),
)
@classmethod
def from_luxos(cls, rpc_pools: dict) -> Pool:
return cls.from_api(rpc_pools)
@classmethod
def from_iceriver(cls, web_pool: dict) -> Pool:
return cls(
url=web_pool["addr"],
user=web_pool["user"],
password=web_pool["pass"],
)
class PoolGroup(MinerConfigValue):
pools: list[Pool] = Field(default_factory=list)
quota: int = 1
name: str | None = None
def __post_init__(self):
if self.name is None:
self.name = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(6)
) # generate random pool group name in case it isn't set
def as_am_modern(self, user_suffix: str | None = None) -> list:
pools = []
idx = 0
while idx < 3:
if len(self.pools) > idx:
pools.append(self.pools[idx].as_am_modern(user_suffix=user_suffix))
else:
pools.append(Pool(url="", user="", password="").as_am_modern())
idx += 1
return pools
def as_hiveon_modern(self, user_suffix: str | None = None) -> list:
pools = []
idx = 0
while idx < 3:
if len(self.pools) > idx:
pools.append(self.pools[idx].as_hiveon_modern(user_suffix=user_suffix))
else:
pools.append(Pool(url="", user="", password="").as_hiveon_modern())
idx += 1
return pools
def as_elphapex(self, user_suffix: str | None = None) -> list:
pools = []
idx = 0
while idx < 3:
if len(self.pools) > idx:
pools.append(self.pools[idx].as_elphapex(user_suffix=user_suffix))
else:
pools.append(Pool(url="", user="", password="").as_elphapex())
idx += 1
return pools
def as_wm(self, *args: Any, user_suffix: str | None = None, **kwargs: Any) -> dict:
pools: dict[str, str] = {}
idx = 0
while idx < 3:
if len(self.pools) > idx:
pools.update(**self.pools[idx].as_wm(idx + 1, user_suffix=user_suffix))
else:
pools.update(**Pool(url="", user="", password="").as_wm(idx + 1))
idx += 1
return pools
def as_btminer_v3(self, user_suffix: str | None = None) -> list:
return [pool.as_btminer_v3(user_suffix) for pool in self.pools[:3]]
def as_am_old(
self, *args: Any, user_suffix: str | None = None, **kwargs: Any
) -> dict:
pools: dict[str, str] = {}
idx = 0
while idx < 3:
if len(self.pools) > idx:
pools.update(
**self.pools[idx].as_am_old(idx + 1, user_suffix=user_suffix)
)
else:
pools.update(**Pool(url="", user="", password="").as_am_old(idx + 1))
idx += 1
return pools
def as_goldshell(self, user_suffix: str | None = None) -> list:
return [pool.as_goldshell(user_suffix) for pool in self.pools]
def as_avalon(self, user_suffix: str | None = None) -> str:
if len(self.pools) > 0:
return self.pools[0].as_avalon(user_suffix=user_suffix)
return Pool(url="", user="", password="").as_avalon()
def as_inno(
self, *args: Any, user_suffix: str | None = None, **kwargs: Any
) -> dict:
pools: dict[str, str] = {}
idx = 0
while idx < 3:
if len(self.pools) > idx:
pools.update(
**self.pools[idx].as_inno(idx + 1, user_suffix=user_suffix)
)
else:
pools.update(**Pool(url="", user="", password="").as_inno(idx + 1))
idx += 1
return pools
def as_bosminer(self, user_suffix: str | None = None) -> dict:
if len(self.pools) > 0:
conf: dict[str, Any] = {
"name": self.name,
"pool": [
pool.as_bosminer(user_suffix=user_suffix) for pool in self.pools
],
}
if self.quota is not None:
conf["quota"] = self.quota
return conf
return {"name": "Group", "pool": []}
def as_auradine(self, user_suffix: str | None = None) -> list:
return [p.as_auradine(user_suffix=user_suffix) for p in self.pools]
def as_epic(self, user_suffix: str | None = None) -> list:
return [p.as_epic(user_suffix=user_suffix) for p in self.pools]
def as_mara(self, user_suffix: str | None = None) -> list:
return [p.as_mara(user_suffix=user_suffix) for p in self.pools]
def as_espminer(self, user_suffix: str | None = None) -> dict:
return self.pools[0].as_espminer(user_suffix=user_suffix)
def as_boser(self, user_suffix: str | None = None) -> PoolGroupConfiguration:
return PoolGroupConfiguration(
name=self.name or "",
quota=Quota(value=self.quota),
pools=[p.as_boser() for p in self.pools],
)
def as_vnish(self, user_suffix: str | None = None) -> dict:
return {"pools": [p.as_vnish(user_suffix=user_suffix) for p in self.pools]}
@classmethod
def from_dict(cls, dict_conf: dict | None) -> PoolGroup:
if dict_conf is None:
return cls()
cls_conf = {}
if dict_conf.get("quota") is not None:
cls_conf["quota"] = dict_conf["quota"]
if dict_conf.get("name") is not None:
cls_conf["name"] = dict_conf["name"]
cls_conf["pools"] = [Pool.from_dict(p) for p in dict_conf["pools"]]
return cls(**cls_conf)
@classmethod
def from_api(cls, api_pool_list: list) -> PoolGroup:
pools = []
for pool in api_pool_list:
pools.append(Pool.from_api(pool))
return cls(pools=pools)
@classmethod
def from_btminer_v3(cls, api_pool_list: list) -> PoolGroup:
pools = []
for pool in api_pool_list:
pools.append(Pool.from_btminer_v3(pool))
return cls(pools=pools)
@classmethod
def from_epic(cls, api_pool_list: list) -> PoolGroup:
pools = []
for pool in api_pool_list:
pools.append(Pool.from_epic(pool))
return cls(pools=pools)
@classmethod
def from_am_modern(cls, web_pool_list: list) -> PoolGroup:
pools = []
for pool in web_pool_list:
pools.append(Pool.from_am_modern(pool))
return cls(pools=pools)
@classmethod
def from_hiveon_modern(cls, web_pool_list: list) -> PoolGroup:
pools = []
for pool in web_pool_list:
pools.append(Pool.from_hiveon_modern(pool))
return cls(pools=pools)
@classmethod
def from_elphapex(cls, web_pool_list: list) -> PoolGroup:
pools = []
for pool in web_pool_list:
pools.append(Pool.from_elphapex(pool))
return cls(pools=pools)
@classmethod
def from_goldshell(cls, web_pools: list) -> PoolGroup:
return cls(pools=[Pool.from_goldshell(p) for p in web_pools])
@classmethod
def from_inno(cls, web_pools: list) -> PoolGroup:
return cls(pools=[Pool.from_inno(p) for p in web_pools])
@classmethod
def from_bosminer(cls, toml_group_conf: dict) -> PoolGroup:
if toml_group_conf.get("pool") is not None:
return cls(
name=toml_group_conf["name"],
quota=toml_group_conf.get("quota", 1),
pools=[Pool.from_bosminer(p) for p in toml_group_conf["pool"]],
)
return cls()
@classmethod
def from_vnish(cls, web_settings_pools: dict) -> PoolGroup:
return cls(
pools=[Pool.from_vnish(p) for p in web_settings_pools if p["url"] != ""]
)
@classmethod
def from_boser(cls, grpc_pool_group: dict) -> PoolGroup:
try:
return cls(
pools=[Pool.from_boser(p) for p in grpc_pool_group["pools"]],
name=grpc_pool_group["name"],
quota=(
grpc_pool_group["quota"]["value"]
if grpc_pool_group.get("quota") is not None
else 1
),
)
except LookupError:
return cls()
@classmethod
def from_mara(cls, web_config_pools: dict) -> PoolGroup:
return cls(pools=[Pool.from_mara(pool_conf) for pool_conf in web_config_pools])
@classmethod
def from_espminer(cls, web_system_info: dict) -> PoolGroup:
return cls(pools=[Pool.from_espminer(web_system_info)])
@classmethod
def from_iceriver(cls, web_userpanel: dict) -> PoolGroup:
return cls(
pools=[
Pool.from_iceriver(web_pool)
for web_pool in web_userpanel["data"]["pools"]
]
)
class PoolConfig(MinerConfigValue):
groups: list[PoolGroup] = Field(default_factory=list)
@classmethod
def default(cls) -> PoolConfig:
return cls()
@classmethod
def from_dict(cls, dict_conf: dict | None) -> PoolConfig:
if dict_conf is None:
return cls.default()
return cls(groups=[PoolGroup.from_dict(g) for g in dict_conf["groups"]])
@classmethod
def simple(cls, pools: list[Pool | dict[str, str]]) -> PoolConfig:
group_pools = []
for pool in pools:
if isinstance(pool, dict):
pool = Pool(**pool)
group_pools.append(pool)
return cls(groups=[PoolGroup(pools=group_pools)])
def as_am_modern(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {"pools": self.groups[0].as_am_modern(user_suffix=user_suffix)}
return {"pools": PoolGroup().as_am_modern()}
def as_hiveon_modern(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {"pools": self.groups[0].as_hiveon_modern(user_suffix=user_suffix)}
return {"pools": PoolGroup().as_hiveon_modern()}
def as_elphapex(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {"pools": self.groups[0].as_elphapex(user_suffix=user_suffix)}
return {"pools": PoolGroup().as_elphapex()}
def as_wm(self, *args: Any, user_suffix: str | None = None, **kwargs: Any) -> dict:
if len(self.groups) > 0:
return {"pools": self.groups[0].as_wm(user_suffix=user_suffix)}
return {"pools": PoolGroup().as_wm()}
def as_btminer_v3(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {"pools": self.groups[0].as_btminer_v3(user_suffix=user_suffix)}
return {"pools": PoolGroup().as_btminer_v3()}
def as_am_old(
self, *args: Any, user_suffix: str | None = None, **kwargs: Any
) -> dict:
if len(self.groups) > 0:
return self.groups[0].as_am_old(user_suffix=user_suffix)
return PoolGroup().as_am_old()
def as_goldshell(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {"pools": self.groups[0].as_goldshell(user_suffix=user_suffix)}
return {"pools": PoolGroup().as_goldshell()}
def as_avalon(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {"pools": self.groups[0].as_avalon(user_suffix=user_suffix)}
return {"pools": PoolGroup().as_avalon()}
def as_inno(
self, *args: Any, user_suffix: str | None = None, **kwargs: Any
) -> dict:
if len(self.groups) > 0:
return self.groups[0].as_inno(user_suffix=user_suffix)
return PoolGroup().as_inno()
def as_bosminer(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {
"group": [g.as_bosminer(user_suffix=user_suffix) for g in self.groups]
}
return {"group": [PoolGroup().as_bosminer()]}
def as_boser(self, user_suffix: str | None = None) -> dict:
return {
"set_pool_groups": SetPoolGroupsRequest(
save_action=SaveAction(SaveAction.SAVE_AND_APPLY),
pool_groups=[g.as_boser(user_suffix=user_suffix) for g in self.groups],
)
}
def as_auradine(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {
"updatepools": {
"pools": self.groups[0].as_auradine(user_suffix=user_suffix)
}
}
return {"updatepools": {"pools": PoolGroup().as_auradine()}}
def as_epic(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {
"pools": {
"coin": "Btc",
"stratum_configs": self.groups[0].as_epic(user_suffix=user_suffix),
"unique_id": False,
}
}
return {
"pools": {
"coin": "Btc",
"stratum_configs": [PoolGroup().as_epic()],
"unique_id": False,
}
}
def as_mara(self, user_suffix: str | None = None) -> dict:
if len(self.groups) > 0:
return {"pools": self.groups[0].as_mara(user_suffix=user_suffix)}
return {"pools": []}
def as_espminer(self, user_suffix: str | None = None) -> dict:
return self.groups[0].as_espminer(user_suffix=user_suffix)
def as_luxos(self, user_suffix: str | None = None) -> dict:
return {}
def as_vnish(self, user_suffix: str | None = None) -> dict:
return self.groups[0].as_vnish(user_suffix=user_suffix)
@classmethod
def from_api(cls, api_pools: dict) -> PoolConfig:
try:
pool_data = api_pools["POOLS"]
except KeyError:
return PoolConfig.default()
pool_data = sorted(pool_data, key=lambda x: int(x["POOL"]))
return cls(groups=[PoolGroup.from_api(pool_data)])
@classmethod
def from_btminer_v3(cls, rpc_pools: dict) -> PoolConfig:
try:
pool_data = rpc_pools["pools"]
except KeyError:
return PoolConfig.default()
pool_data = sorted(pool_data, key=lambda x: int(x["id"]))
return cls(groups=[PoolGroup.from_btminer_v3(pool_data)])
@classmethod
def from_epic(cls, web_conf: dict) -> PoolConfig:
pool_data = web_conf["StratumConfigs"]
return cls(groups=[PoolGroup.from_epic(pool_data)])
@classmethod
def from_am_modern(cls, web_conf: dict) -> PoolConfig:
try:
pool_data = web_conf["pools"]
except KeyError:
return cls(groups=[])
return cls(groups=[PoolGroup.from_am_modern(pool_data)])
@classmethod
def from_hiveon_modern(cls, web_conf: dict) -> PoolConfig:
try:
pool_data = web_conf["pools"]
except KeyError:
return cls(groups=[])
return cls(groups=[PoolGroup.from_hiveon_modern(pool_data)])
@classmethod
def from_elphapex(cls, web_conf: dict) -> PoolConfig:
pool_data = web_conf["pools"]
return cls(groups=[PoolGroup.from_elphapex(pool_data)])
@classmethod
def from_goldshell(cls, web_pools: list) -> PoolConfig:
return cls(groups=[PoolGroup.from_goldshell(web_pools)])
@classmethod
def from_goldshell_byte(cls, web_pools: list) -> PoolConfig:
return cls(
groups=[
PoolGroup.from_goldshell(g["pools"])
for g in web_pools
if len(g["pools"]) > 0
]
)
@classmethod
def from_inno(cls, web_pools: list) -> PoolConfig:
return cls(groups=[PoolGroup.from_inno(web_pools)])
@classmethod
def from_bosminer(cls, toml_conf: dict) -> PoolConfig:
if toml_conf.get("group") is None:
return cls()
return cls(groups=[PoolGroup.from_bosminer(g) for g in toml_conf["group"]])
@classmethod
def from_vnish(cls, web_settings: dict) -> PoolConfig:
try:
return cls(groups=[PoolGroup.from_vnish(web_settings["miner"]["pools"])])
except LookupError:
return cls()
@classmethod
def from_boser(cls, grpc_miner_conf: dict) -> PoolConfig:
try:
return cls(
groups=[
PoolGroup.from_boser(group)
for group in grpc_miner_conf["poolGroups"]
]
)
except LookupError:
return cls()
@classmethod
def from_mara(cls, web_config: dict) -> PoolConfig:
return cls(groups=[PoolGroup.from_mara(web_config["pools"])])
@classmethod
def from_espminer(cls, web_system_info: dict) -> PoolConfig:
return cls(groups=[PoolGroup.from_espminer(web_system_info)])
@classmethod
def from_iceriver(cls, web_userpanel: dict) -> PoolConfig:
return cls(groups=[PoolGroup.from_iceriver(web_userpanel)])
@classmethod
def from_luxos(cls, rpc_groups: dict, rpc_pools: dict) -> PoolConfig:
return cls(
groups=[
PoolGroup(
pools=[
Pool.from_luxos(pool)
for pool in rpc_pools["POOLS"]
if pool["GROUP"] == group["GROUP"]
],
name=group["Name"],
quota=group["Quota"],
)
for group in rpc_groups["GROUPS"]
]
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/config/mining/presets.py | pyasic/config/mining/presets.py | from pyasic.config.base import MinerConfigValue
class MiningPreset(MinerConfigValue):
name: str | None = None
power: int | None = None
hashrate: int | None = None
tuned: bool | None = None
modded_psu: bool | None = None
frequency: int | None = None
voltage: float | None = None
def as_vnish(self) -> dict:
if self.name is not None:
return {"preset": self.name}
return {}
@classmethod
def from_vnish(cls, web_preset: dict):
name = web_preset["name"]
hr_power_split = web_preset["pretty"].split("~")
if len(hr_power_split) == 1:
power = None
hashrate = None
else:
power = hr_power_split[0].replace("watt", "").strip()
hashrate = (
hr_power_split[1]
.replace("TH", "")
.replace("GH", "")
.replace("MH", "")
.replace(" LC", "")
.strip()
)
tuned = web_preset["status"] == "tuned"
modded_psu = web_preset["modded_psu_required"]
return cls(
name=name,
power=power,
hashrate=hashrate,
tuned=tuned,
modded_psu=modded_psu,
)
@classmethod
def from_luxos(cls, profile: dict):
return cls(
name=profile["Profile Name"],
power=profile["Watts"],
hashrate=round(profile["Hashrate"]),
tuned=profile["IsTuned"],
frequency=profile["Frequency"],
voltage=profile["Voltage"],
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/config/mining/algo.py | pyasic/config/mining/algo.py | from __future__ import annotations
from dataclasses import field
from typing import Any, TypeVar
from pyasic.config.base import MinerConfigOption, MinerConfigValue
class StandardTuneAlgo(MinerConfigValue):
mode: str = field(init=False, default="standard")
def as_epic(self) -> str:
return VOptAlgo().as_epic()
class VOptAlgo(MinerConfigValue):
mode: str = field(init=False, default="voltage_optimizer")
def as_epic(self) -> str:
return "VoltageOptimizer"
class BoardTuneAlgo(MinerConfigValue):
mode: str = field(init=False, default="board_tune")
def as_epic(self) -> str:
return "BoardTune"
class ChipTuneAlgo(MinerConfigValue):
mode: str = field(init=False, default="chip_tune")
def as_epic(self) -> str:
return "ChipTune"
class TunerAlgo(MinerConfigOption):
standard = StandardTuneAlgo
voltage_optimizer = VOptAlgo
board_tune = BoardTuneAlgo
chip_tune = ChipTuneAlgo
@classmethod
def default(cls) -> StandardTuneAlgo:
return cls.standard()
@classmethod
def from_dict(
cls, dict_conf: dict[Any, Any] | None
) -> StandardTuneAlgo | VOptAlgo | BoardTuneAlgo | ChipTuneAlgo:
if dict_conf is None:
return cls.default()
mode = dict_conf.get("mode")
if mode is None:
return cls.default()
cls_attr = getattr(cls, mode, None)
if cls_attr is not None:
return cls_attr().from_dict(dict_conf)
return cls.default()
TunerAlgoType = TypeVar(
"TunerAlgoType",
bound=StandardTuneAlgo | VOptAlgo | BoardTuneAlgo | ChipTuneAlgo,
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/config/mining/scaling.py | pyasic/config/mining/scaling.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
from pyasic.config.base import MinerConfigValue
class ScalingShutdown(MinerConfigValue):
enabled: bool = False
duration: int | None = None
@classmethod
def from_dict(cls, dict_conf: dict | None) -> ScalingShutdown:
if dict_conf is None:
return cls()
return cls(
enabled=dict_conf.get("enabled", False), duration=dict_conf.get("duration")
)
@classmethod
def from_bosminer(cls, power_scaling_conf: dict):
sd_enabled = power_scaling_conf.get("shutdown_enabled")
if sd_enabled is not None:
return cls(
enabled=sd_enabled, duration=power_scaling_conf.get("shutdown_duration")
)
return None
@classmethod
def from_boser(cls, power_scaling_conf: dict):
sd_enabled = power_scaling_conf.get("shutdownEnabled")
if sd_enabled is not None:
try:
return cls(
enabled=sd_enabled,
duration=power_scaling_conf["shutdownDuration"]["hours"],
)
except KeyError:
return cls(enabled=sd_enabled)
return None
def as_bosminer(self) -> dict:
cfg: dict[str, bool | int] = {"shutdown_enabled": self.enabled}
if self.duration is not None:
cfg["shutdown_duration"] = self.duration
return cfg
def as_boser(self) -> dict:
return {"enable_shutdown": self.enabled, "shutdown_duration": self.duration}
class ScalingConfig(MinerConfigValue):
step: int | None = None
minimum: int | None = None
shutdown: ScalingShutdown | None = None
@classmethod
def from_dict(cls, dict_conf: dict | None) -> ScalingConfig:
if dict_conf is None:
return cls()
cls_conf = {
"step": dict_conf.get("step"),
"minimum": dict_conf.get("minimum"),
}
shutdown = dict_conf.get("shutdown")
if shutdown is not None:
cls_conf["shutdown"] = ScalingShutdown.from_dict(shutdown)
return cls(**cls_conf)
@classmethod
def from_bosminer(cls, toml_conf: dict, mode: str | None = None):
if mode == "power":
return cls._from_bosminer_power(toml_conf)
if mode == "hashrate":
# not implemented yet
pass
@classmethod
def _from_bosminer_power(cls, toml_conf: dict):
power_scaling = toml_conf.get("power_scaling")
if power_scaling is None:
power_scaling = toml_conf.get("performance_scaling")
if power_scaling is not None:
enabled = power_scaling.get("enabled")
if not enabled:
return None
power_step = power_scaling.get("power_step")
min_power = power_scaling.get("min_psu_power_limit")
if min_power is None:
min_power = power_scaling.get("min_power_target")
sd_mode = ScalingShutdown.from_bosminer(power_scaling)
return cls(step=power_step, minimum=min_power, shutdown=sd_mode)
@classmethod
def from_boser(cls, grpc_miner_conf: dict, mode: str | None = None):
if mode == "power":
return cls._from_boser_power(grpc_miner_conf)
if mode == "hashrate":
# not implemented yet
pass
@classmethod
def _from_boser_power(cls, grpc_miner_conf: dict):
try:
dps_conf = grpc_miner_conf["dps"]
if not dps_conf.get("enabled", False):
return None
except LookupError:
return None
conf = {"shutdown": ScalingShutdown.from_boser(dps_conf)}
if dps_conf.get("minPowerTarget") is not None:
conf["minimum"] = dps_conf["minPowerTarget"]["watt"]
if dps_conf.get("powerStep") is not None:
conf["step"] = dps_conf["powerStep"]["watt"]
return cls(**conf)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/config/mining/__init__.py | pyasic/config/mining/__init__.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
from dataclasses import field
from typing import Any, TypeVar
from pyasic import settings
from pyasic.config.base import MinerConfigOption, MinerConfigValue
from pyasic.web.braiins_os.proto.braiins.bos.v1 import (
DpsHashrateTarget,
DpsPowerTarget,
DpsTarget,
HashrateTargetMode,
PerformanceMode,
Power,
PowerTargetMode,
SaveAction,
SetDpsRequest,
SetPerformanceModeRequest,
TeraHashrate,
TunerPerformanceMode,
)
from .algo import (
BoardTuneAlgo,
ChipTuneAlgo,
StandardTuneAlgo,
TunerAlgo,
TunerAlgoType,
VOptAlgo,
)
from .presets import MiningPreset
from .scaling import ScalingConfig
class MiningModeNormal(MinerConfigValue):
mode: str = field(init=False, default="normal")
@classmethod
def from_dict(cls, dict_conf: dict | None) -> MiningModeNormal:
return cls()
def as_am_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_hiveon_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_elphapex(self) -> dict:
return {"miner-mode": 0}
def as_wm(self) -> dict:
return {"mode": self.mode}
def as_btminer_v3(self) -> dict:
return {"set.miner.service": "start", "set.miner.power_mode": self.mode}
def as_auradine(self) -> dict:
return {"mode": {"mode": self.mode}}
def as_epic(self) -> dict:
return {"ptune": {"enabled": False}}
def as_goldshell(self) -> dict:
return {"settings": {"level": 0}}
def as_mara(self) -> dict:
return {
"mode": {
"work-mode-selector": "Stock",
}
}
def as_luxos(self) -> dict:
return {"autotunerset": {"enabled": False}}
def as_bosminer(self) -> dict:
return {"autotuning": {"enabled": True}}
class MiningModeSleep(MinerConfigValue):
mode: str = field(init=False, default="sleep")
@classmethod
def from_dict(cls, dict_conf: dict | None) -> MiningModeSleep:
return cls()
def as_am_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "1"}
return {"miner-mode": 1}
def as_hiveon_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "1"}
return {"miner-mode": 1}
def as_elphapex(self) -> dict:
return {"miner-mode": 1}
def as_wm(self) -> dict:
return {"mode": self.mode}
def as_btminer_v3(self) -> dict:
return {"set.miner.service": "stop"}
def as_auradine(self) -> dict:
return {"mode": {"sleep": "on"}}
def as_epic(self) -> dict:
return {"ptune": {"algo": "Sleep", "target": 0}}
def as_goldshell(self) -> dict:
return {"settings": {"level": 3}}
def as_mara(self) -> dict:
return {
"mode": {
"work-mode-selector": "Sleep",
}
}
class MiningModeLPM(MinerConfigValue):
mode: str = field(init=False, default="low")
@classmethod
def from_dict(cls, dict_conf: dict | None) -> MiningModeLPM:
return cls()
def as_am_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "3"}
return {"miner-mode": 3}
def as_hiveon_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "3"}
return {"miner-mode": 3}
def as_elphapex(self) -> dict:
return {"miner-mode": 3}
def as_wm(self) -> dict:
return {"mode": self.mode}
def as_btminer_v3(self) -> dict:
return {"set.miner.service": "start", "set.miner.power_mode": self.mode}
def as_auradine(self) -> dict:
return {"mode": {"mode": "eco"}}
def as_goldshell(self) -> dict:
return {"settings": {"level": 1}}
class MiningModeHPM(MinerConfigValue):
mode: str = field(init=False, default="high")
@classmethod
def from_dict(cls, dict_conf: dict | None) -> MiningModeHPM:
return cls()
def as_am_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_hiveon_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_elphapex(self) -> dict:
return {"miner-mode": 0}
def as_wm(self) -> dict:
return {"mode": self.mode}
def as_btminer_v3(self) -> dict:
return {"set.miner.service": "start", "set.miner.power_mode": self.mode}
def as_auradine(self) -> dict:
return {"mode": {"mode": "turbo"}}
class MiningModePowerTune(MinerConfigValue):
class Config:
arbitrary_types_allowed = True
mode: str = field(init=False, default="power_tuning")
power: int | None = None
algo: StandardTuneAlgo | VOptAlgo | BoardTuneAlgo | ChipTuneAlgo = field(
default_factory=TunerAlgo.default
)
scaling: ScalingConfig | None = None
@classmethod
def from_dict(cls, dict_conf: dict | None) -> MiningModePowerTune:
if dict_conf is None:
return cls()
cls_conf = {}
if dict_conf.get("power"):
cls_conf["power"] = dict_conf["power"]
if dict_conf.get("algo"):
cls_conf["algo"] = TunerAlgo.from_dict(dict_conf["algo"])
if dict_conf.get("scaling"):
cls_conf["scaling"] = ScalingConfig.from_dict(dict_conf["scaling"])
return cls(**cls_conf)
def as_am_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_hiveon_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_elphapex(self) -> dict:
return {"miner-mode": 0}
def as_wm(self) -> dict:
if self.power is not None:
return {"mode": self.mode, self.mode: {"wattage": self.power}}
return {}
def as_btminer_v3(self) -> dict:
return {"set.miner.service": "start", "set.miner.power_limit": self.power}
def as_bosminer(self) -> dict:
tuning_cfg = {"enabled": True, "mode": "power_target"}
if self.power is not None:
tuning_cfg["power_target"] = self.power
cfg = {"autotuning": tuning_cfg}
if self.scaling is not None:
scaling_cfg: dict[str, Any] = {"enabled": True}
if self.scaling.step is not None:
scaling_cfg["power_step"] = self.scaling.step
if self.scaling.minimum is not None:
scaling_cfg["min_power_target"] = self.scaling.minimum
if self.scaling.shutdown is not None:
scaling_cfg.update(self.scaling.shutdown.as_bosminer())
cfg["performance_scaling"] = scaling_cfg
return cfg
def as_boser(self) -> dict:
cfg: dict[str, Any] = {
"set_performance_mode": SetPerformanceModeRequest(
save_action=SaveAction(SaveAction.SAVE_AND_APPLY),
mode=PerformanceMode(
tuner_mode=TunerPerformanceMode(
power_target=PowerTargetMode(
power_target=Power(watt=self.power)
if self.power is not None
else None # type: ignore[arg-type]
)
)
),
),
}
if self.scaling is not None:
sd_cfg = {}
if self.scaling.shutdown is not None:
sd_cfg = self.scaling.shutdown.as_boser()
power_target_kwargs: dict[str, Any] = {}
if self.scaling.step is not None:
power_target_kwargs["power_step"] = Power(watt=self.scaling.step)
if self.scaling.minimum is not None:
power_target_kwargs["min_power_target"] = Power(
watt=self.scaling.minimum
)
cfg["set_dps"] = SetDpsRequest(
save_action=SaveAction(SaveAction.SAVE_AND_APPLY),
enable=True,
**sd_cfg,
target=DpsTarget(power_target=DpsPowerTarget(**power_target_kwargs)),
)
return cfg
def as_auradine(self) -> dict:
return {"mode": {"mode": "custom", "tune": "power", "power": self.power}}
def as_mara(self) -> dict:
return {
"mode": {
"work-mode-selector": "Auto",
"concorde": {
"mode-select": "PowerTarget",
"power-target": self.power,
},
}
}
def as_luxos(self) -> dict:
return {"autotunerset": {"enabled": True}}
class MiningModeHashrateTune(MinerConfigValue):
class Config:
arbitrary_types_allowed = True
mode: str = field(init=False, default="hashrate_tuning")
hashrate: int | None = None
algo: StandardTuneAlgo | VOptAlgo | BoardTuneAlgo | ChipTuneAlgo = field(
default_factory=TunerAlgo.default
)
scaling: ScalingConfig | None = None
@classmethod
def from_dict(cls, dict_conf: dict | None) -> MiningModeHashrateTune:
if dict_conf is None:
return cls()
cls_conf = {}
if dict_conf.get("hashrate"):
cls_conf["hashrate"] = dict_conf["hashrate"]
if dict_conf.get("algo"):
cls_conf["algo"] = TunerAlgo.from_dict(dict_conf["algo"])
if dict_conf.get("scaling"):
cls_conf["scaling"] = ScalingConfig.from_dict(dict_conf["scaling"])
return cls(**cls_conf)
def as_am_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_hiveon_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_elphapex(self) -> dict:
return {"miner-mode": 0}
def as_bosminer(self) -> dict:
conf = {"enabled": True, "mode": "hashrate_target"}
if self.hashrate is not None:
conf["hashrate_target"] = self.hashrate
return {"autotuning": conf}
def as_boser(self) -> dict:
cfg: dict[str, Any] = {
"set_performance_mode": SetPerformanceModeRequest(
save_action=SaveAction(SaveAction.SAVE_AND_APPLY),
mode=PerformanceMode(
tuner_mode=TunerPerformanceMode(
hashrate_target=HashrateTargetMode(
hashrate_target=TeraHashrate(
terahash_per_second=float(self.hashrate)
if self.hashrate is not None
else None # type: ignore[arg-type]
)
)
)
),
)
}
if self.scaling is not None:
sd_cfg = {}
if self.scaling.shutdown is not None:
sd_cfg = self.scaling.shutdown.as_boser()
hashrate_target_kwargs: dict[str, Any] = {}
if self.scaling.step is not None:
hashrate_target_kwargs["hashrate_step"] = TeraHashrate(
terahash_per_second=float(self.scaling.step)
)
if self.scaling.minimum is not None:
hashrate_target_kwargs["min_hashrate_target"] = TeraHashrate(
terahash_per_second=float(self.scaling.minimum)
)
cfg["set_dps"] = SetDpsRequest(
save_action=SaveAction(SaveAction.SAVE_AND_APPLY),
enable=True,
**sd_cfg,
target=DpsTarget(
hashrate_target=DpsHashrateTarget(**hashrate_target_kwargs)
),
)
return cfg
def as_auradine(self) -> dict:
return {"mode": {"mode": "custom", "tune": "ths", "ths": self.hashrate}}
def as_epic(self) -> dict:
mode = {
"ptune": {
"algo": (
self.algo.as_epic()
if hasattr(self.algo, "as_epic")
else TunerAlgo.default().as_epic()
),
"target": self.hashrate,
}
}
if self.scaling is not None:
if self.scaling.minimum is not None:
mode["ptune"]["min_throttle"] = self.scaling.minimum
if self.scaling.step is not None:
mode["ptune"]["throttle_step"] = self.scaling.step
return mode
def as_mara(self) -> dict:
return {
"mode": {
"work-mode-selector": "Auto",
"concorde": {
"mode-select": "Hashrate",
"hash-target": self.hashrate,
},
}
}
def as_luxos(self) -> dict:
return {"autotunerset": {"enabled": True}}
class MiningModePreset(MinerConfigValue):
mode: str = field(init=False, default="preset")
active_preset: MiningPreset
available_presets: list[MiningPreset] = field(default_factory=list)
def as_vnish(self) -> dict:
return {"overclock": {**self.active_preset.as_vnish()}}
@classmethod
def from_vnish(
cls,
web_overclock_settings: dict,
web_presets: list[dict],
web_perf_summary: dict,
) -> MiningModePreset:
active_preset = web_perf_summary.get("current_preset")
if active_preset is None:
for preset in web_presets:
if preset["name"] == web_overclock_settings["preset"]:
active_preset = preset
return cls(
active_preset=MiningPreset.from_vnish(active_preset or {}),
available_presets=[MiningPreset.from_vnish(p) for p in web_presets],
)
@classmethod
def from_luxos(cls, rpc_config: dict, rpc_profiles: dict) -> MiningModePreset:
active_preset = cls.get_active_preset_from_luxos(rpc_config, rpc_profiles)
return cls(
active_preset=active_preset,
available_presets=[
MiningPreset.from_luxos(p) for p in rpc_profiles["PROFILES"]
],
)
@classmethod
def get_active_preset_from_luxos(
cls, rpc_config: dict, rpc_profiles: dict
) -> MiningPreset:
active_preset = None
active_profile = rpc_config["CONFIG"][0]["Profile"]
for profile in rpc_profiles["PROFILES"]:
if profile["Profile Name"] == active_profile:
active_preset = profile
return MiningPreset.from_luxos(active_preset or {})
class ManualBoardSettings(MinerConfigValue):
freq: float
volt: float
@classmethod
def from_dict(cls, dict_conf: dict) -> ManualBoardSettings:
return cls(freq=dict_conf["freq"], volt=dict_conf["volt"])
def as_am_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_hiveon_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_elphapex(self) -> dict:
return {"miner-mode": 0}
def as_vnish(self) -> dict:
return {"freq": self.freq}
class MiningModeManual(MinerConfigValue):
mode: str = field(init=False, default="manual")
global_freq: float
global_volt: float
boards: dict[int, ManualBoardSettings] = field(default_factory=dict)
@classmethod
def from_dict(cls, dict_conf: dict) -> MiningModeManual:
return cls(
global_freq=dict_conf["global_freq"],
global_volt=dict_conf["global_volt"],
boards={
i: ManualBoardSettings.from_dict(dict_conf[i])
for i in dict_conf
if isinstance(i, int)
},
)
def as_am_modern(self) -> dict:
if settings.get("antminer_mining_mode_as_str", False):
return {"miner-mode": "0"}
return {"miner-mode": 0}
def as_elphapex(self) -> dict:
return {"miner-mode": 0}
def as_vnish(self) -> dict:
chains = [b.as_vnish() for b in self.boards.values() if b.freq != 0]
return {
"overclock": {
"chains": chains if chains != [] else None,
"globals": {
"freq": int(self.global_freq),
"volt": int(self.global_volt),
},
}
}
@classmethod
def from_vnish(cls, web_overclock_settings: dict) -> MiningModeManual:
# will raise KeyError if it cant find the settings, values cannot be empty
voltage = web_overclock_settings["globals"]["volt"]
freq = web_overclock_settings["globals"]["freq"]
boards = {
idx: ManualBoardSettings(
freq=board["freq"],
volt=voltage if not board["freq"] == 0 else 0,
)
for idx, board in enumerate(web_overclock_settings["chains"])
}
return cls(global_freq=freq, global_volt=voltage, boards=boards)
@classmethod
def from_epic(cls, epic_conf: dict) -> MiningModeManual:
voltage = 0
freq = 0
if epic_conf.get("HwConfig") is not None:
freq = epic_conf["HwConfig"]["Boards Target Clock"][0]["Data"]
if epic_conf.get("Power Supply Stats") is not None:
voltage = epic_conf["Power Supply Stats"]["Target Voltage"]
boards = {}
if epic_conf.get("HBs") is not None:
boards = {
board["Index"]: ManualBoardSettings(
freq=board["Core Clock Avg"], volt=board["Input Voltage"]
)
for board in epic_conf["HBs"]
}
return cls(global_freq=freq, global_volt=voltage, boards=boards)
def as_mara(self) -> dict:
return {
"mode": {
"work-mode-selector": "Fixed",
"fixed": {
"frequency": str(self.global_freq),
"voltage": self.global_volt,
},
}
}
class MiningModeConfig(MinerConfigOption):
normal = MiningModeNormal
low = MiningModeLPM
high = MiningModeHPM
sleep = MiningModeSleep
power_tuning = MiningModePowerTune
hashrate_tuning = MiningModeHashrateTune
preset = MiningModePreset
manual = MiningModeManual
@classmethod
def default(cls) -> MiningModeConfig:
return cls.normal()
@classmethod
def from_dict(cls, dict_conf: dict | None) -> MiningModeConfig:
if dict_conf is None:
return cls.default()
mode = dict_conf.get("mode")
if mode is None:
return cls.default()
cls_attr = getattr(cls, mode, None)
if cls_attr is not None:
return cls_attr().from_dict(dict_conf)
return cls.default()
@classmethod
def from_am_modern(cls, web_conf: dict) -> MiningModeConfig:
if web_conf.get("bitmain-work-mode") is not None:
work_mode = web_conf["bitmain-work-mode"]
if work_mode == "":
return cls.default()
if int(work_mode) == 0:
return cls.normal()
elif int(work_mode) == 1:
return cls.sleep()
elif int(work_mode) == 3:
return cls.low()
return cls.default()
@classmethod
def from_hiveon_modern(cls, web_conf: dict) -> MiningModeConfig:
if web_conf.get("bitmain-work-mode") is not None:
work_mode = web_conf["bitmain-work-mode"]
if work_mode == "":
return cls.default()
if int(work_mode) == 0:
return cls.normal()
elif int(work_mode) == 1:
return cls.sleep()
elif int(work_mode) == 3:
return cls.low()
return cls.default()
@classmethod
def from_elphapex(cls, web_conf: dict) -> MiningModeConfig:
if web_conf.get("fc-work-mode") is not None:
work_mode = web_conf["fc-work-mode"]
if work_mode == "":
return cls.default()
if int(work_mode) == 0:
return cls.normal()
elif int(work_mode) == 1:
return cls.sleep()
elif int(work_mode) == 3:
return cls.low()
return cls.default()
@classmethod
def from_epic(cls, web_conf: dict) -> MiningModeConfig:
try:
tuner_running = web_conf["PerpetualTune"]["Running"]
if tuner_running:
algo_info = web_conf["PerpetualTune"]["Algorithm"]
if algo_info.get("VoltageOptimizer") is not None:
scaling_cfg = None
if "Throttle Step" in algo_info["VoltageOptimizer"]:
scaling_cfg = ScalingConfig(
minimum=algo_info["VoltageOptimizer"].get(
"Min Throttle Target"
),
step=algo_info["VoltageOptimizer"].get("Throttle Step"),
)
return cls.hashrate_tuning(
hashrate=algo_info["VoltageOptimizer"].get("Target"),
algo=TunerAlgo.voltage_optimizer(),
scaling=scaling_cfg,
)
elif algo_info.get("BoardTune") is not None:
scaling_cfg = None
if "Throttle Step" in algo_info["BoardTune"]:
scaling_cfg = ScalingConfig(
minimum=algo_info["BoardTune"].get("Min Throttle Target"),
step=algo_info["BoardTune"].get("Throttle Step"),
)
return cls.hashrate_tuning(
hashrate=algo_info["BoardTune"].get("Target"),
algo=TunerAlgo.board_tune(),
scaling=scaling_cfg,
)
else:
return cls.hashrate_tuning(
hashrate=algo_info["ChipTune"].get("Target"),
algo=TunerAlgo.chip_tune(),
)
else:
return cls.manual.from_epic(web_conf)
except KeyError:
return cls.default()
@classmethod
def from_bosminer(cls, toml_conf: dict) -> MiningModeConfig:
if toml_conf.get("autotuning") is None:
return cls.default()
autotuning_conf = toml_conf["autotuning"]
if autotuning_conf.get("enabled") is None:
return cls.default()
if not autotuning_conf["enabled"]:
return cls.default()
if autotuning_conf.get("psu_power_limit") is not None:
# old autotuning conf
return cls.power_tuning(
power=autotuning_conf["psu_power_limit"],
scaling=ScalingConfig.from_bosminer(toml_conf, mode="power"),
)
if autotuning_conf.get("mode") is not None:
# new autotuning conf
mode = autotuning_conf["mode"]
if mode == "power_target":
if autotuning_conf.get("power_target") is not None:
return cls.power_tuning(
power=autotuning_conf["power_target"],
scaling=ScalingConfig.from_bosminer(toml_conf, mode="power"),
)
return cls.power_tuning(
scaling=ScalingConfig.from_bosminer(toml_conf, mode="power"),
)
if mode == "hashrate_target":
if autotuning_conf.get("hashrate_target") is not None:
return cls.hashrate_tuning(
hashrate=autotuning_conf["hashrate_target"],
scaling=ScalingConfig.from_bosminer(toml_conf, mode="hashrate"),
)
return cls.hashrate_tuning(
scaling=ScalingConfig.from_bosminer(toml_conf, mode="hashrate"),
)
return cls.default()
@classmethod
def from_vnish(
cls, web_settings: dict, web_presets: list[dict], web_perf_summary: dict
) -> MiningModeConfig:
try:
mode_settings = web_settings["miner"]["overclock"]
except KeyError:
return cls.default()
if mode_settings["preset"] == "disabled":
return cls.manual.from_vnish(mode_settings, web_presets, web_perf_summary)
else:
return cls.preset.from_vnish(mode_settings, web_presets, web_perf_summary)
@classmethod
def from_boser(cls, grpc_miner_conf: dict) -> MiningModeConfig:
try:
tuner_conf = grpc_miner_conf["tuner"]
if not tuner_conf.get("enabled", False):
return cls.default()
except LookupError:
return cls.default()
if tuner_conf.get("tunerMode") is not None:
if tuner_conf["tunerMode"] == 1:
if tuner_conf.get("powerTarget") is not None:
return cls.power_tuning(
power=tuner_conf["powerTarget"]["watt"],
scaling=ScalingConfig.from_boser(grpc_miner_conf, mode="power"),
)
return cls.power_tuning(
scaling=ScalingConfig.from_boser(grpc_miner_conf, mode="power")
)
if tuner_conf["tunerMode"] == 2:
if tuner_conf.get("hashrateTarget") is not None:
return cls.hashrate_tuning(
hashrate=int(tuner_conf["hashrateTarget"]["terahashPerSecond"]),
scaling=ScalingConfig.from_boser(
grpc_miner_conf, mode="hashrate"
),
)
return cls.hashrate_tuning(
scaling=ScalingConfig.from_boser(grpc_miner_conf, mode="hashrate"),
)
if tuner_conf.get("powerTarget") is not None:
return cls.power_tuning(
power=tuner_conf["powerTarget"]["watt"],
scaling=ScalingConfig.from_boser(grpc_miner_conf, mode="power"),
)
if tuner_conf.get("hashrateTarget") is not None:
return cls.hashrate_tuning(
hashrate=int(tuner_conf["hashrateTarget"]["terahashPerSecond"]),
scaling=ScalingConfig.from_boser(grpc_miner_conf, mode="hashrate"),
)
return cls.default()
@classmethod
def from_auradine(cls, web_mode: dict) -> MiningModeConfig:
try:
mode_data = web_mode["Mode"][0]
if mode_data.get("Sleep") == "on":
return cls.sleep()
if mode_data.get("Mode") == "normal":
return cls.normal()
if mode_data.get("Mode") == "eco":
return cls.low()
if mode_data.get("Mode") == "turbo":
return cls.high()
if mode_data.get("Ths") is not None:
return cls.hashrate_tuning(hashrate=mode_data["Ths"])
if mode_data.get("Power") is not None:
return cls.power_tuning(power=mode_data["Power"])
except LookupError:
return cls.default()
return cls.default()
@classmethod
def from_btminer_v3(
cls, rpc_device_info: dict, rpc_settings: dict
) -> MiningModeConfig:
try:
is_mining = rpc_device_info["msg"]["miner"]["working"] == "true"
if not is_mining:
return cls.sleep()
power_limit = rpc_settings["msg"]["power-limit"]
if not power_limit == 0:
return cls.power_tuning(power=power_limit)
power_mode = rpc_settings["msg"]["power-mode"]
if power_mode == "normal":
return cls.normal()
if power_mode == "high":
return cls.high()
if power_mode == "low":
return cls.low()
except LookupError:
return cls.default()
return cls.default()
@classmethod
def from_mara(cls, web_config: dict) -> MiningModeConfig:
try:
mode = web_config["mode"]["work-mode-selector"]
if mode == "Fixed":
fixed_conf = web_config["mode"]["fixed"]
return cls.manual(
global_freq=int(fixed_conf["frequency"]),
global_volt=fixed_conf["voltage"],
)
elif mode == "Stock":
return cls.normal()
elif mode == "Sleep":
return cls.sleep()
elif mode == "Auto":
auto_conf = web_config["mode"]["concorde"]
auto_mode = auto_conf["mode-select"]
if auto_mode == "Hashrate":
return cls.hashrate_tuning(hashrate=auto_conf["hash-target"])
elif auto_mode == "PowerTarget":
return cls.power_tuning(power=auto_conf["power-target"])
except LookupError:
pass
return cls.default()
@classmethod
def from_luxos(cls, rpc_config: dict, rpc_profiles: dict) -> MiningModeConfig:
preset_info = MiningModePreset.from_luxos(rpc_config, rpc_profiles)
return cls.preset(
active_preset=preset_info.active_preset,
available_presets=preset_info.available_presets,
)
def as_btminer_v3(self) -> dict:
"""Delegate to the default instance for btminer v3 configuration."""
return self.default().as_btminer_v3()
MiningMode = TypeVar(
"MiningMode",
bound=MiningModeNormal
| MiningModeHPM
| MiningModeLPM
| MiningModeSleep
| MiningModeManual
| MiningModePowerTune
| MiningModeHashrateTune
| MiningModePreset,
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/network/__init__.py | pyasic/network/__init__.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
import asyncio
import ipaddress
import logging
from collections.abc import AsyncIterator
from typing import cast
from pyasic import settings
from pyasic.miners.factory import AnyMiner, miner_factory
class MinerNetwork:
"""A class to handle a network containing miners. Handles scanning and gets miners via [`MinerFactory`][pyasic.miners.factory.MinerFactory].
Parameters:
hosts: A list of `ipaddress.IPv4Address` to be used when scanning.
"""
def __init__(self, hosts: list[ipaddress.IPv4Address]):
self.hosts = hosts
semaphore_limit = settings.get("network_scan_semaphore", 255)
if semaphore_limit is None:
semaphore_limit = 255
self.semaphore = asyncio.Semaphore(semaphore_limit)
def __len__(self) -> int:
return len(self.hosts)
@classmethod
def from_list(cls, addresses: list[str]) -> "MinerNetwork":
"""Parse a list of address constructors into a MinerNetwork.
Parameters:
addresses: A list of address constructors, such as `["10.1-2.1.1-50", "10.4.1-2.1-50"]`.
"""
hosts: list[ipaddress.IPv4Address] = []
for address in addresses:
hosts = [*hosts, *cls.from_address(address).hosts]
return cls(sorted(list(set(hosts))))
@classmethod
def from_address(cls, address: str) -> "MinerNetwork":
"""Parse an address constructor into a MinerNetwork.
Parameters:
address: An address constructor, such as `"10.1-2.1.1-50"`.
"""
octets = address.split(".")
if len(octets) > 4:
raise ValueError("Too many octets in IP constructor.")
if len(octets) < 4:
raise ValueError("Too few octets in IP constructor.")
return cls.from_octets(*octets)
@classmethod
def from_octets(
cls, oct_1: str, oct_2: str, oct_3: str, oct_4: str
) -> "MinerNetwork":
"""Parse 4 octet constructors into a MinerNetwork.
Parameters:
oct_1: An octet constructor, such as `"10"`.
oct_2: An octet constructor, such as `"1-2"`.
oct_3: An octet constructor, such as `"1"`.
oct_4: An octet constructor, such as `"1-50"`.
"""
hosts: list[ipaddress.IPv4Address] = []
oct_1_start, oct_1_end = compute_oct_range(oct_1)
for oct_1_idx in range((abs(oct_1_end - oct_1_start)) + 1):
oct_1_val = str(oct_1_idx + oct_1_start)
oct_2_start, oct_2_end = compute_oct_range(oct_2)
for oct_2_idx in range((abs(oct_2_end - oct_2_start)) + 1):
oct_2_val = str(oct_2_idx + oct_2_start)
oct_3_start, oct_3_end = compute_oct_range(oct_3)
for oct_3_idx in range((abs(oct_3_end - oct_3_start)) + 1):
oct_3_val = str(oct_3_idx + oct_3_start)
oct_4_start, oct_4_end = compute_oct_range(oct_4)
for oct_4_idx in range((abs(oct_4_end - oct_4_start)) + 1):
oct_4_val = str(oct_4_idx + oct_4_start)
ip_addr = ipaddress.ip_address(
".".join([oct_1_val, oct_2_val, oct_3_val, oct_4_val])
)
if isinstance(ip_addr, ipaddress.IPv4Address):
hosts.append(ip_addr)
return cls(sorted(hosts))
@classmethod
def from_subnet(cls, subnet: str) -> "MinerNetwork":
"""Parse a subnet into a MinerNetwork.
Parameters:
subnet: A subnet string, such as `"10.0.0.1/24"`.
"""
network = ipaddress.ip_network(subnet, strict=False)
hosts = [
host for host in network.hosts() if isinstance(host, ipaddress.IPv4Address)
]
return cls(hosts)
async def scan(self) -> list[AnyMiner]:
"""Scan the network for miners.
Returns:
A list of found miners.
"""
return await self.scan_network_for_miners()
async def scan_network_for_miners(self) -> list[AnyMiner]:
logging.debug(f"{self} - (Scan Network For Miners) - Scanning")
raw_miners: list[AnyMiner | None] = await asyncio.gather(
*[self.ping_and_get_miner(host) for host in self.hosts]
)
# remove all None from the miner list
miners: list[AnyMiner] = cast(
list[AnyMiner], [miner for miner in raw_miners if miner is not None]
)
logging.debug(
f"{self} - (Scan Network For Miners) - Found {len(miners)} miners"
)
# return the miner objects
return miners
async def scan_network_generator(self) -> AsyncIterator[AnyMiner | None]:
"""
Scan the network for miners using an async generator.
Returns:
An asynchronous generator containing found miners.
"""
# create a list of scan tasks
tasks: list[asyncio.Task[AnyMiner | None]] = [
asyncio.create_task(self.ping_and_get_miner(host)) for host in self.hosts
]
for miner in asyncio.as_completed(tasks):
try:
result = await miner
yield result
except TimeoutError:
yield None
return
async def ping_and_get_miner(
self, ip: ipaddress.IPv4Address | ipaddress.IPv6Address
) -> AnyMiner | None:
if settings.get("network_scan_semaphore") is None:
return await self._ping_and_get_miner(ip) # type: ignore[func-returns-value]
async with self.semaphore:
return await self._ping_and_get_miner(ip) # type: ignore[func-returns-value]
@staticmethod
async def _ping_and_get_miner(
ip: ipaddress.IPv4Address | ipaddress.IPv6Address,
) -> AnyMiner | None:
try:
return await ping_and_get_miner(ip) # type: ignore[func-returns-value]
except ConnectionRefusedError:
tasks: list[asyncio.Task[AnyMiner | None]] = [
asyncio.create_task(ping_and_get_miner(ip, port=port))
for port in [4028, 4029, 8889]
]
for miner in asyncio.as_completed(tasks):
try:
return await miner
except ConnectionRefusedError:
pass
return None
async def ping_and_get_miner(
ip: ipaddress.IPv4Address | ipaddress.IPv6Address, port: int = 80
) -> AnyMiner | None:
for _ in range(settings.get("network_ping_retries", 1)):
try:
connection_fut = asyncio.open_connection(str(ip), port)
# get the read and write streams from the connection
_, writer = await asyncio.wait_for(
connection_fut, timeout=settings.get("network_ping_timeout", 3)
)
# immediately close connection, we know connection happened
writer.close()
# make sure the writer is closed
await writer.wait_closed()
# ping was successful
return await miner_factory.get_miner(ip) # type: ignore[func-returns-value]
except asyncio.exceptions.TimeoutError:
# ping failed if we time out
continue
except OSError as e:
raise ConnectionRefusedError from e
except Exception as e:
logging.warning(f"{str(ip)}: Unhandled ping exception: {e}")
return None
return None
def compute_oct_range(octet: str) -> tuple[int, int]:
octet_split = octet.split("-")
octet_start = int(octet_split[0])
octet_end = None
try:
octet_end = int(octet_split[1])
except IndexError:
pass
if octet_end is None:
octet_end = int(octet_start)
return octet_start, octet_end
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/boards.py | pyasic/data/boards.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Optional
from pydantic import BaseModel
from pyasic.device.algorithm.hashrate import AlgoHashRateType
class HashBoard(BaseModel):
"""A Dataclass to standardize hashboard data.
Attributes:
slot: The slot of the board as an int.
hashrate: The hashrate of the board in TH/s as a float.
inlet_temp: Inlet temperature for hydro asics as an int
outlet_temp: Outlet temperature for hydro asics as an int
temp: The temperature of the PCB as an int.
chip_temp: The temperature of the chips as an int.
chips: The chip count of the board as an int.
expected_chips: The expected chip count of the board as an int.
serial_number: The serial number of the board.
missing: Whether the board is returned from the miners data as a bool.
tuned: Whether the board is tuned as a bool.
active: Whether the board is currently tuning as a bool.
voltage: Current input voltage of the board as a float.
"""
slot: int = 0
hashrate: AlgoHashRateType | None = None
inlet_temp: float | None = None
outlet_temp: float | None = None
temp: float | None = None
chip_temp: float | None = None
chips: int | None = None
expected_chips: int | None = None
serial_number: str | None = None
missing: bool = True
tuned: bool | None = None
active: bool | None = None
voltage: float | None = None
@classmethod
def fields(cls) -> set:
all_fields = set(cls.model_fields.keys())
all_fields.update(set(cls.model_computed_fields.keys()))
return all_fields
def get(self, __key: str, default: Any | None = None):
try:
val = self.__getitem__(__key)
if val is None:
return default
return val
except KeyError:
return default
def __getitem__(self, item: str):
try:
return getattr(self, item)
except AttributeError:
raise KeyError(f"{item}")
def as_influxdb(self, key_root: str, level_delimiter: str = ".") -> str:
def serialize_int(key: str, value: int) -> str:
return f"{key}={value}"
def serialize_float(key: str, value: float) -> str:
return f"{key}={value}"
def serialize_str(key: str, value: str) -> str:
return f'{key}="{value}"'
def serialize_algo_hash_rate(key: str, value: AlgoHashRateType) -> str:
return f"{key}={round(float(value), 2)}"
def serialize_bool(key: str, value: bool) -> str:
return f"{key}={str(value).lower()}"
serialization_map_instance = {
AlgoHashRateType: serialize_algo_hash_rate,
}
serialization_map = {
int: serialize_int,
float: serialize_float,
str: serialize_str,
bool: serialize_bool,
}
include = [
"hashrate",
"temp",
"chip_temp",
"chips",
"expected_chips",
"tuned",
"active",
"voltage",
]
field_data = []
for field in include:
field_val = getattr(self, field)
serialization_func: Callable[[str, Any], str | None] = (
serialization_map.get(
type(field_val),
lambda _k, _v: None, # type: ignore
)
)
serialized = serialization_func(
f"{key_root}{level_delimiter}{field}", field_val
)
if serialized is not None:
field_data.append(serialized)
continue
for datatype in serialization_map_instance:
if serialized is None:
if isinstance(field_val, datatype):
serialized = serialization_map_instance[datatype](
f"{key_root}{level_delimiter}{field}", field_val
)
if serialized is not None:
field_data.append(serialized)
return ",".join(field_data)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/fans.py | pyasic/data/fans.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from typing import Any, Optional
from pydantic import BaseModel
class Fan(BaseModel):
"""A Dataclass to standardize fan data.
Attributes:
speed: The speed of the fan.
"""
speed: int | None = None
def get(self, __key: str, default: Any | None = None):
try:
val = self.__getitem__(__key)
if val is None:
return default
return val
except KeyError:
return default
def __getitem__(self, item: str):
try:
return getattr(self, item)
except AttributeError:
raise KeyError(f"{item}")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/__init__.py | pyasic/data/__init__.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
import copy
import time
from collections.abc import Callable
from datetime import datetime, timezone
from typing import Any, Optional
from pydantic import BaseModel, Field, computed_field
from pyasic.config import MinerConfig
from pyasic.config.mining import MiningModePowerTune
from pyasic.data.pools import PoolMetrics, Scheme
from pyasic.device.algorithm.hashrate import AlgoHashRateType
from pyasic.device.algorithm.hashrate.base import GenericHashrate
from .boards import HashBoard
from .device import DeviceInfo
from .error_codes import BraiinsOSError, InnosiliconError, WhatsminerError, X19Error
from .error_codes.base import BaseMinerError
from .fans import Fan
class MinerData(BaseModel):
"""A Dataclass to standardize data returned from miners (specifically `AnyMiner().get_data()`)
Attributes:
ip: The IP of the miner as a str.
datetime: The time and date this data was generated.
uptime: The uptime of the miner in seconds.
mac: The MAC address of the miner as a str.
device_info: Info about the device, such as model, make, and firmware.
model: The model of the miner as a str.
make: The make of the miner as a str.
firmware: The firmware on the miner as a str.
algo: The mining algorithm of the miner as a str.
api_ver: The current api version on the miner as a str.
fw_ver: The current firmware version on the miner as a str.
hostname: The network hostname of the miner as a str.
hashrate: The hashrate of the miner in TH/s as a float. Calculated automatically.
expected_hashrate: The factory nominal hashrate of the miner in TH/s as a float.
sticker_hashrate: The factory sticker hashrate of the miner as a float.
hashboards: A list of [`HashBoard`][pyasic.data.HashBoard]s on the miner with their statistics.
temperature_avg: The average temperature across the boards. Calculated automatically.
env_temp: The environment temps as a float.
wattage: Current power draw of the miner as an int.
voltage: Current output voltage of the PSU as an float.
wattage_limit: Power limit of the miner as an int.
fans: A list of fans on the miner with their speeds.
expected_fans: The number of fans expected on a miner.
fan_psu: The speed of the PSU on the fan if the miner collects it.
total_chips: The total number of chips on all boards. Calculated automatically.
expected_chips: The expected number of chips in the miner as an int.
percent_expected_chips: The percent of total chips out of the expected count. Calculated automatically.
percent_expected_hashrate: The percent of total hashrate out of the expected hashrate. Calculated automatically.
percent_expected_wattage: The percent of total wattage out of the expected wattage. Calculated automatically.
nominal: Whether the number of chips in the miner is nominal. Calculated automatically.
config: The parsed config of the miner, using [`MinerConfig`][pyasic.config.MinerConfig].
errors: A list of errors on the miner.
fault_light: Whether the fault light is on as a boolean.
efficiency: Efficiency of the miner in J/TH (Watts per TH/s). Calculated automatically.
efficiency_fract: Same as efficiency, but is not rounded to integer. Calculated automatically.
is_mining: Whether the miner is mining.
pools: A list of PoolMetrics instances, each representing metrics for a pool.
"""
# general
ip: str
raw_datetime: datetime = Field(
exclude=True, default_factory=datetime.now(timezone.utc).astimezone, repr=False
)
# about
device_info: DeviceInfo | None = None
serial_number: str | None = None
mac: str | None = None
api_ver: str | None = None
fw_ver: str | None = None
hostname: str | None = None
# hashrate
raw_hashrate: AlgoHashRateType | None = Field(
exclude=True, default=None, repr=False
)
# sticker
sticker_hashrate: AlgoHashRateType | None = None
# expected
expected_hashrate: AlgoHashRateType | None = None
expected_hashboards: int | None = None
expected_chips: int | None = None
expected_fans: int | None = None
# temperature
env_temp: float | None = None
# power
wattage: int | None = None
voltage: float | None = None
raw_wattage_limit: int | None = Field(exclude=True, default=None, repr=False)
# fans
fans: list[Fan] = Field(default_factory=list)
fan_psu: int | None = None
# boards
hashboards: list[HashBoard] = Field(default_factory=list)
# config
config: MinerConfig | None = None
fault_light: bool | None = None
# errors
errors: list[BaseMinerError] = Field(default_factory=list)
# mining state
is_mining: bool = True
uptime: int | None = None
# pools
pools: list[PoolMetrics] = Field(default_factory=list)
@classmethod
def fields(cls) -> set:
all_fields = set(cls.model_fields.keys())
all_fields.update(set(cls.model_computed_fields.keys()))
return all_fields
def get(self, __key: str, default: Any | None = None):
try:
val = self.__getitem__(__key)
if val is None:
return default
return val
except KeyError:
return default
def __getitem__(self, item: str):
try:
return getattr(self, item)
except AttributeError:
raise KeyError(f"{item}")
def __setitem__(self, key, value):
return setattr(self, key, value)
def __iter__(self):
return iter([item for item in self.asdict()])
def __truediv__(self, other):
return self // other
def __floordiv__(self, other):
cp = copy.deepcopy(self)
for key in self.fields():
item = getattr(self, key)
if isinstance(item, int):
setattr(cp, key, item // other)
if isinstance(item, float):
setattr(cp, key, item / other)
return cp
def __add__(self, other):
if not isinstance(other, MinerData):
raise TypeError("Cannot add MinerData to non MinerData type.")
cp = copy.deepcopy(self)
for key in self.fields():
item = getattr(self, key)
other_item = getattr(other, key)
if item is None:
item = 0
if other_item is None:
other_item = 0
if isinstance(item, int):
setattr(cp, key, item + other_item)
if isinstance(item, float):
setattr(cp, key, item + other_item)
if isinstance(item, str):
setattr(cp, key, "")
if isinstance(item, list):
setattr(cp, key, item + other_item)
if isinstance(item, bool):
setattr(cp, key, item & other_item)
return cp
@computed_field # type: ignore[prop-decorator]
@property
def hashrate(self) -> AlgoHashRateType | None:
if len(self.hashboards) > 0:
hr_data = []
for item in self.hashboards:
if item.hashrate is not None:
hr_data.append(item.hashrate)
if len(hr_data) > 0:
if self.device_info is not None and self.device_info.algo is not None:
from pyasic.device.algorithm.hashrate.unit.base import GenericUnit
return sum(
hr_data,
start=self.device_info.algo.hashrate(
rate=0, unit=GenericUnit.H
),
)
else:
return sum(hr_data, start=GenericHashrate(rate=0))
return self.raw_hashrate
@hashrate.setter
def hashrate(self, val):
self.raw_hashrate = val
@computed_field # type: ignore[prop-decorator]
@property
def wattage_limit(self) -> int | None:
if self.config is not None:
if isinstance(self.config.mining_mode, MiningModePowerTune):
return self.config.mining_mode.power
return self.raw_wattage_limit
@wattage_limit.setter
def wattage_limit(self, val: int):
self.raw_wattage_limit = val
@computed_field # type: ignore[prop-decorator]
@property
def total_chips(self) -> int | None:
if len(self.hashboards) > 0:
chip_data = []
for item in self.hashboards:
if item.chips is not None:
chip_data.append(item.chips)
if len(chip_data) > 0:
return sum(chip_data)
return None
return 0
@computed_field # type: ignore[prop-decorator]
@property
def nominal(self) -> bool | None:
if self.total_chips is None or self.expected_chips is None:
return None
return self.expected_chips == self.total_chips
@computed_field # type: ignore[prop-decorator]
@property
def percent_expected_chips(self) -> int | None:
if self.total_chips is None or self.expected_chips is None:
return None
if self.total_chips == 0 or self.expected_chips == 0:
return 0
return round((self.total_chips / self.expected_chips) * 100)
@computed_field # type: ignore[prop-decorator]
@property
def percent_expected_hashrate(self) -> int | None:
if self.hashrate is None or self.expected_hashrate is None:
return None
try:
return round((self.hashrate / self.expected_hashrate) * 100)
except ZeroDivisionError:
return 0
@computed_field # type: ignore[prop-decorator]
@property
def percent_expected_wattage(self) -> int | None:
if self.wattage_limit is None or self.wattage is None:
return None
try:
return round((self.wattage / self.wattage_limit) * 100)
except ZeroDivisionError:
return 0
@computed_field # type: ignore[prop-decorator]
@property
def temperature_avg(self) -> int | None:
total_temp: float = 0
temp_count = 0
for hb in self.hashboards:
if hb.temp is not None:
total_temp += hb.temp
temp_count += 1
if not temp_count > 0:
return None
return round(total_temp / temp_count)
@computed_field # type: ignore[prop-decorator]
@property
def efficiency(self) -> int | None:
efficiency = self._efficiency(0)
if efficiency is None:
return None
else:
return int(efficiency)
@computed_field # type: ignore[prop-decorator]
@property
def efficiency_fract(self) -> float | None:
return self._efficiency(2)
def _efficiency(self, ndigits: int) -> float | None:
if self.hashrate is None or self.wattage is None:
return None
try:
return round(self.wattage / float(self.hashrate), ndigits)
except ZeroDivisionError:
return 0.0
@computed_field # type: ignore[prop-decorator]
@property
def datetime(self) -> str:
return self.raw_datetime.isoformat()
@computed_field # type: ignore[prop-decorator]
@property
def timestamp(self) -> int:
return int(time.mktime(self.raw_datetime.timetuple()))
@computed_field # type: ignore[prop-decorator]
@property
def make(self) -> str | None:
if self.device_info is not None and self.device_info.make is not None:
return str(self.device_info.make)
return ""
@computed_field # type: ignore[prop-decorator]
@property
def model(self) -> str | None:
if self.device_info is not None and self.device_info.model is not None:
return str(self.device_info.model)
return ""
@computed_field # type: ignore[prop-decorator]
@property
def firmware(self) -> str | None:
if self.device_info is not None and self.device_info.firmware is not None:
return str(self.device_info.firmware)
return ""
@computed_field # type: ignore[prop-decorator]
@property
def algo(self) -> str | None:
if self.device_info is not None and self.device_info.algo is not None:
return str(self.device_info.algo)
return ""
def keys(self) -> list:
return list(self.model_fields.keys())
def asdict(self) -> dict:
return self.model_dump()
def as_dict(self) -> dict:
"""Get this dataclass as a dictionary.
Returns:
A dictionary version of this class.
"""
return self.asdict()
def as_json(self) -> str:
"""Get this dataclass as JSON.
Returns:
A JSON version of this class.
"""
return self.model_dump_json()
def as_csv(self) -> str:
"""Get this dataclass as CSV.
Returns:
A CSV version of this class with no headers.
"""
data = self.asdict()
errs = []
for error in data["errors"]:
errs.append(error["error_message"])
data["errors"] = "; ".join(errs)
data_list = [str(data[item]) for item in data]
return ",".join(data_list)
def as_influxdb(
self, measurement_name: str = "miner_data", level_delimiter: str = "."
) -> str:
"""Get this dataclass as [influxdb line protocol](https://docs.influxdata.com/influxdb/v2.4/reference/syntax/line-protocol/).
Parameters:
measurement_name: The name of the measurement to insert into in influxdb.
Returns:
A influxdb line protocol version of this class.
"""
def serialize_int(key: str, value: int) -> str:
return f"{key}={value}"
def serialize_float(key: str, value: float) -> str:
return f"{key}={value}"
def serialize_str(key: str, value: str) -> str:
return f'{key}="{value}"'
def serialize_algo_hash_rate(key: str, value: AlgoHashRateType) -> str:
return f"{key}={round(float(value), 2)}"
def serialize_list(key: str, value: list[Any]) -> str | None:
if len(value) == 0:
return None
list_field_data = []
for idx, list_field_val in enumerate(value):
item_serialization_func = serialization_map.get(
type(list_field_val), lambda _k, _v: None
)
item_serialized = item_serialization_func(
f"{key}{level_delimiter}{idx}", list_field_val
)
if item_serialized is not None:
list_field_data.append(item_serialized)
continue
for dt in serialization_map_instance:
if item_serialized is None:
if isinstance(list_field_val, dt):
func = serialization_map_instance[dt]
item_serialized = func(
f"{key}{level_delimiter}{idx}", list_field_val
)
if item_serialized is not None:
list_field_data.append(item_serialized)
return ",".join(list_field_data)
def serialize_miner_error(key: str, value: BaseMinerError):
return value.as_influxdb(key, level_delimiter=level_delimiter)
def serialize_fan(key: str, value: Fan) -> str:
return f"{key}{level_delimiter}speed={value.speed}"
def serialize_hashboard(key: str, value: HashBoard) -> str:
return value.as_influxdb(key, level_delimiter=level_delimiter)
def serialize_bool(key: str, value: bool):
return f"{key}={str(value).lower()}"
def serialize_pool_metrics(key: str, value: PoolMetrics):
return value.as_influxdb(key, level_delimiter=level_delimiter)
include = [
"uptime",
"expected_hashrate",
"hashrate",
"hashboards",
"temperature_avg",
"env_temp",
"wattage",
"wattage_limit",
"voltage",
"fans",
"expected_fans",
"fan_psu",
"total_chips",
"expected_chips",
"efficiency",
"fault_light",
"is_mining",
"errors",
"pools",
]
serialization_map_instance: dict[type, Callable[[str, Any], str | None]] = {
AlgoHashRateType: serialize_algo_hash_rate,
BaseMinerError: serialize_miner_error,
}
serialization_map: dict[type, Callable[[str, Any], str | None]] = {
int: serialize_int,
float: serialize_float,
str: serialize_str,
bool: serialize_bool,
list: serialize_list,
Fan: serialize_fan,
HashBoard: serialize_hashboard,
PoolMetrics: serialize_pool_metrics,
}
tag_data = [
measurement_name,
f"ip={str(self.ip)}",
f"mac={str(self.mac)}",
f"make={str(self.make)}",
f"model={str(self.model)}",
f"firmware={str(self.firmware)}",
f"algo={str(self.algo)}",
]
field_data = []
for field in include:
field_val = getattr(self, field)
serialization_func = serialization_map.get(
type(field_val), lambda _k, _v: None
)
serialized = serialization_func(field, field_val)
if serialized is not None:
field_data.append(serialized)
continue
for datatype in serialization_map_instance:
if serialized is None:
if isinstance(field_val, datatype):
func = serialization_map_instance[datatype]
serialized = func(field, field_val)
if serialized is not None:
field_data.append(serialized)
tags_str = ",".join(tag_data).replace(" ", "\\ ")
field_str = ",".join(field_data).replace(" ", "\\ ")
timestamp = str(self.timestamp * 10**9)
return " ".join([tags_str, field_str, timestamp])
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/device.py | pyasic/data/device.py | from pydantic import BaseModel, ConfigDict, field_serializer
from pyasic.device.algorithm import MinerAlgoType
from pyasic.device.firmware import MinerFirmware
from pyasic.device.makes import MinerMake
from pyasic.device.models import MinerModelType
class DeviceInfo(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
make: MinerMake | None = None
model: MinerModelType | None = None
firmware: MinerFirmware | None = None
algo: type[MinerAlgoType] | None = None
@field_serializer("make")
def serialize_make(self, make: MinerMake, _info):
return str(make)
@field_serializer("model")
def serialize_model(self, model: MinerModelType, _info):
return str(model)
@field_serializer("firmware")
def serialize_firmware(self, firmware: MinerFirmware, _info):
return str(firmware)
@field_serializer("algo")
def serialize_algo(self, algo: MinerAlgoType, _info):
return str(algo)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/pools.py | pyasic/data/pools.py | from collections.abc import Callable
from enum import Enum
from typing import Any
from urllib.parse import urlparse
from pydantic import BaseModel, computed_field, model_serializer
from typing_extensions import Self
class Scheme(Enum):
STRATUM_V1 = "stratum+tcp"
STRATUM_V2 = "stratum2+tcp"
STRATUM_V1_SSL = "stratum+ssl"
class PoolUrl(BaseModel):
scheme: Scheme
host: str
port: int
pubkey: str | None = None
@model_serializer
def serialize(self):
return str(self)
def __str__(self) -> str:
if self.scheme == Scheme.STRATUM_V2 and self.pubkey:
return f"{self.scheme.value}://{self.host}:{self.port}/{self.pubkey}"
else:
return f"{self.scheme.value}://{self.host}:{self.port}"
@classmethod
def from_str(cls, url: str) -> Self | None:
parsed_url = urlparse(url)
if not parsed_url.hostname:
return None
if not parsed_url.scheme.strip() == "":
scheme = Scheme(parsed_url.scheme)
else:
scheme = Scheme.STRATUM_V1
host = parsed_url.hostname
port = parsed_url.port
if port is None:
return None
pubkey = parsed_url.path.lstrip("/") if scheme == Scheme.STRATUM_V2 else None
return cls(scheme=scheme, host=host, port=port, pubkey=pubkey)
class PoolMetrics(BaseModel):
"""A dataclass to standardize pool metrics returned from miners.
Attributes:
accepted: Number of accepted shares.
rejected: Number of rejected shares.
get_failures: Number of failures in obtaining work from the pool.
remote_failures: Number of failures communicating with the pool server.
active: Indicates if the miner is connected to the stratum server.
Alive : Indicates if a pool is alive.
url: URL of the pool.
index: Index of the pool.
user: Username for the pool.
pool_rejected_percent: Percentage of rejected shares by the pool.
pool_stale_percent: Percentage of stale shares by the pool.
"""
url: PoolUrl | None
accepted: int | None = None
rejected: int | None = None
get_failures: int | None = None
remote_failures: int | None = None
active: bool | None = None
alive: bool | None = None
index: int | None = None
user: str | None = None
@computed_field # type: ignore[prop-decorator]
@property
def pool_rejected_percent(self) -> float: # noqa - Skip PyCharm inspection
"""Calculate and return the percentage of rejected shares"""
if self.rejected is None or self.accepted is None:
return 0.0
return self._calculate_percentage(self.rejected, self.accepted + self.rejected)
@computed_field # type: ignore[prop-decorator]
@property
def pool_stale_percent(self) -> float: # noqa - Skip PyCharm inspection
"""Calculate and return the percentage of stale shares."""
if self.get_failures is None or self.accepted is None or self.rejected is None:
return 0.0
return self._calculate_percentage(
self.get_failures, self.accepted + self.rejected
)
@staticmethod
def _calculate_percentage(value: int, total: int) -> float:
"""Calculate the percentage."""
if total == 0:
return 0.0
return (value / total) * 100
def as_influxdb(self, key_root: str, level_delimiter: str = ".") -> str:
def serialize_int(key: str, value: int) -> str:
return f"{key}={value}"
def serialize_float(key: str, value: float) -> str:
return f"{key}={value}"
def serialize_str(key: str, value: str) -> str:
return f'{key}="{value}"'
def serialize_pool_url(key: str, value: PoolUrl) -> str:
return f'{key}="{str(value)}"'
def serialize_bool(key: str, value: bool) -> str:
return f"{key}={str(value).lower()}"
serialization_map: dict[type, Callable[[str, Any], str]] = {
int: serialize_int,
float: serialize_float,
str: serialize_str,
bool: serialize_bool,
PoolUrl: serialize_pool_url,
}
include = [
"url",
"accepted",
"rejected",
"active",
"alive",
"user",
]
field_data = []
for field in include:
field_val = getattr(self, field)
if field_val is None:
continue
serialization_func = serialization_map.get(type(field_val))
if serialization_func is not None:
serialized = serialization_func(
f"{key_root}{level_delimiter}{field}", field_val
)
if serialized is not None:
field_data.append(serialized)
return ",".join(field_data)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/error_codes/whatsminer.py | pyasic/data/error_codes/whatsminer.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from pydantic import computed_field
from pyasic.data.error_codes.base import BaseMinerError
class WhatsminerError(BaseMinerError):
"""A Dataclass to handle error codes of Whatsminers.
Attributes:
error_code: The error code as an int.
error_message: The error message as a string. Automatically found from the error code.
"""
error_code: int
@computed_field # type: ignore[prop-decorator]
@property
def error_message(self) -> str: # noqa - Skip PyCharm inspection
error_str = str(self.error_code)
# Handle edge cases for short error codes
if len(error_str) < 3:
return "Unknown error type."
if len(error_str) == 6 and not error_str[:1] == "1":
err_type = int(error_str[:2])
err_subtype = int(error_str[2:3])
err_value = int(error_str[3:])
else:
err_type = int(error_str[:-2])
err_subtype = int(error_str[-2:-1])
err_value = int(error_str[-1:])
try:
select_err_type = ERROR_CODES.get(err_type)
if select_err_type is None:
return "Unknown error type."
if err_subtype in select_err_type:
select_err_subtype = select_err_type[err_subtype]
if isinstance(select_err_subtype, dict):
if err_value in select_err_subtype:
result = select_err_subtype[err_value]
return str(result) if not isinstance(result, str) else result
elif "n" in select_err_subtype:
template = select_err_subtype["n"]
if isinstance(template, str):
return template.replace("{n}", str(err_value))
else:
return "Unknown error type."
else:
return "Unknown error type."
else:
return "Unknown error type."
elif "n" in select_err_type:
select_err_subtype = select_err_type["n"]
if isinstance(select_err_subtype, dict):
if err_value in select_err_subtype:
result = select_err_subtype[err_value]
return str(result) if not isinstance(result, str) else result
elif "c" in select_err_subtype:
template = select_err_subtype["c"]
if isinstance(template, str):
return template.replace("{n}", str(err_subtype)).replace(
"{c}", str(err_value)
)
else:
return "Unknown error type."
else:
return "Unknown error type."
else:
return "Unknown error type."
else:
return "Unknown error type."
except (KeyError, TypeError):
return "Unknown error type."
ERROR_CODES: dict[int, dict[int | str, str | dict[int | str, str]]] = {
1: { # Fan error
0: {
0: "Fan unknown.",
},
1: { # Fan speed error of 1000+
0: "Intake fan speed error.",
1: "Exhaust fan speed error.",
},
2: { # Fan speed error of 2000+
0: "Intake fan speed error. Fan speed deviates by more than 2000.",
1: "Exhaust fan speed error. Fan speed deviates by more than 2000.",
},
3: { # Fan speed error of 3000+
0: "Intake fan speed error. Fan speed deviates by more than 3000.",
1: "Exhaust fan speed error. Fan speed deviates by more than 3000.",
},
4: {
0: "Fan speed too high.",
}, # High speed
},
2: { # Power error
0: {
0: "Power probing error. No power found.",
1: "Power supply and configuration file don't match.",
2: "Power output voltage error.",
3: "Power protecting due to high environment temperature.",
4: "Power current protecting due to high environment temperature.",
5: "Power current error.",
6: "Power input low voltage error.",
7: "Power input current protecting due to bad power input.",
8: "Power power error.",
9: "Power voltage offset error.",
},
1: {
0: "Power error.",
1: "Power iout error, please reboot.",
2: "Power vout error, reach vout border. Border: [1150, 1500]",
3: "Power input voltage and current do not match power output.",
4: "Power pin did not change.",
5: "Power vout set error.",
6: "Power remained unchanged for a long time.",
7: "Power set enable error.",
8: "Power input voltage is lower than 230V for high power mode.",
9: "Power input current is incorrect.",
},
3: {
3: "Power output high temperature protection error.",
4: "Power output high temperature protection error.",
5: "Power output high temperature protection error.",
6: "Power output high current protection error.",
7: "Power output high current protection error.",
8: "Power output high current protection error.",
9: "Power output high voltage protection error.",
},
4: {
0: "Power output low voltage protection error.",
1: "Power output current imbalance error.",
3: "Power input high temperature protection error.",
4: "Power input high temperature protection error.",
5: "Power input high temperature protection error.",
6: "Power input high voltage protection error.",
7: "Power input high voltage protection error.",
8: "Power input high current protection error.",
9: "Power input high current protection error.",
},
5: {
0: "Power input low voltage protection error.",
1: "Power input low voltage protection error.",
3: "Power supply fan error.",
4: "Power supply fan error.",
5: "Power output high power protection error.",
6: "Power output high power protection error.",
7: "Input over current protection of power supply on primary side.",
},
6: {
3: "Power communication warning.",
4: "Power communication error.",
5: "Power unknown error.",
6: "Power unknown error.",
7: "Power watchdog protection.",
8: "Power output high current protection.",
9: "Power input high current protection.",
},
7: {
0: "Power input high voltage protection.",
1: "Power input low voltage protection.",
2: "Excessive power supply output warning.",
3: "Power input too high warning.",
4: "Power fan warning.",
5: "Power high temperature warning.",
6: "Power unknown error.",
7: "Power unknown error.",
8: "Power unknown error.",
9: "Power unknown error.",
},
8: {
0: "Power unknown error.",
1: "Power vendor status 1 bit 0 error.",
2: "Power vendor status 1 bit 1 error.",
3: "Power vendor status 1 bit 2 error.",
4: "Power vendor status 1 bit 3 error.",
5: "Power vendor status 1 bit 4 error.",
6: "Power vendor status 1 bit 5 error.",
7: "Power vendor status 1 bit 6 error.",
8: "Power vendor status 1 bit 7 error.",
9: "Power vendor status 2 bit 0 error.",
},
9: {
0: "Power vendor status 2 bit 1 error.",
1: "Power vendor status 2 bit 2 error.",
2: "Power vendor status 2 bit 3 error.",
3: "Power vendor status 2 bit 4 error.",
4: "Power vendor status 2 bit 5 error.",
5: "Power vendor status 2 bit 6 error.",
6: "Power vendor status 2 bit 7 error.",
},
},
3: { # temperature error
0: { # sensor detection error
"n": "Slot {n} temperature sensor detection error.",
},
2: { # temperature reading error
"n": "Slot {n} temperature reading error.",
9: "Control board temperature sensor communication error.",
},
5: {
"n": "Slot {n} temperature protecting.",
}, # temperature protection
6: {
0: "Hashboard high temperature error.",
1: "Hashboard high temperature error.",
2: "Hashboard high temperature error.",
3: "Hashboard high temperature error.",
}, # high temp
7: {
0: "The environment temperature fluctuates too much.",
}, # env temp
8: {
0: "Humidity sensor not found.",
1: "Humidity sensor read error.",
2: "Humidity sensor read error.",
3: "Humidity sensor protecting.",
}, # humidity
},
4: { # EEPROM error
0: {
0: "Eeprom unknown error.",
},
1: {
"n": "Slot {n} eeprom detection error.",
}, # EEPROM detection error
2: {
"n": "Slot {n} eeprom parsing error.",
}, # EEPROM parsing error
3: {
"n": "Slot {n} chip bin type error.",
}, # chip bin error
4: {
"n": "Slot {n} eeprom chip number X error.",
}, # EEPROM chip number error
5: {
"n": "Slot {n} eeprom xfer error.",
}, # EEPROM xfer error
},
5: { # hashboard error
0: {
0: "Board unknown error.",
},
1: {
"n": "Slot {n} miner type error.",
}, # board miner type error
2: {
"n": "Slot {n} bin type error.",
}, # chip bin type error
3: {
"n": "Slot {n} not found.",
}, # board not found error
4: {
"n": "Slot {n} error reading chip id.",
}, # reading chip id error
5: {
"n": "Slot {n} has bad chips.",
}, # board has bad chips error
6: {
"n": "Slot {n} loss of balance error.",
}, # loss of balance error
7: {
"n": "Slot {n} xfer error chip.",
}, # xfer error
8: {
"n": "Slot {n} reset error.",
}, # reset error
9: {
"n": "Slot {n} frequency too low.",
}, # freq error
},
6: { # env temp error
0: {
0: "Environment temperature is too high.",
}, # normal env temp error
1: { # high power env temp error
0: "Environment temperature is too high for high performance mode.",
},
},
7: { # control board error
0: {
0: "MAC address invalid",
1: "Control board no support chip.",
},
1: {
0: "Control board rebooted as an exception.",
1: "Control board rebooted as exception and cpufreq reduced, please upgrade the firmware",
2: "Control board rebooted as an exception.",
3: "The network is unstable, change time.",
4: "Unknown error.",
},
2: {
"n": "Control board slot {n} frame error.",
},
},
8: { # checksum error
0: {
0: "CGMiner checksum error.",
1: "System monitor checksum error.",
2: "Remote daemon checksum error.",
},
1: {0: "Air to liquid PCB serial # does not match."},
},
9: {
0: {0: "Unknown error.", 1: "Power rate error.", 2: "Unknown error."}
}, # power rate error
20: { # pool error
0: {
0: "No pool information configured.",
},
1: {
0: "All pools are disabled.",
}, # all disabled error
2: {
"n": "Pool {n} connection failed.",
}, # pool connection failed error
3: {
0: "High rejection rate on pool.",
}, # rejection rate error
4: { # asicboost not supported error
0: "The pool does not support asicboost mode.",
},
},
21: {
1: {
"n": "Slot {n} factory test step failed.",
}
},
23: { # hashrate error
1: {
0: "Hashrate is too low.",
},
2: {
0: "Hashrate is too low.",
},
3: {
0: "Hashrate loss is too high.",
},
4: {
0: "Hashrate loss is too high.",
},
5: {
0: "Hashrate loss.",
},
},
50: { # water velocity error/voltage error
1: {
"n": "Slot {n} chip voltage too low.",
},
2: {
"n": "Slot {n} chip voltage changed.",
},
3: {
"n": "Slot {n} chip temperature difference is too large.",
},
4: {
"n": "Slot {n} chip hottest temperature difference is too large.",
},
5: {"n": "Slot {n} stopped hashing, chips temperature protecting."},
7: {
"n": "Slot {n} water velocity is abnormal.",
}, # abnormal water velocity
8: {
0: "Chip temp calibration failed, please restore factory settings.",
},
9: {
"n": "Slot {n} chip temp calibration check no balance.",
},
},
51: { # frequency error
1: {
"n": "Slot {n} frequency up timeout.",
}, # frequency up timeout
2: {"n": "Slot {n} too many CRC errors."},
3: {"n": "Slot {n} unstable."},
7: {
"n": "Slot {n} frequency up timeout.",
}, # frequency up timeout
},
52: {
"n": {
"c": "Slot {n} chip {c} error nonce.",
},
},
53: {
"n": {
"c": "Slot {n} chip {c} too few nonce.",
},
},
54: {
"n": {
"c": "Slot {n} chip {c} temp protected.",
},
},
55: {
"n": {
"c": "Slot {n} chip {c} has been reset.",
},
},
56: {
"n": {
"c": "Slot {n} chip {c} zero nonce.",
},
},
80: {
0: {
0: "The tool version is too low, please update.",
},
1: {
0: "Low freq.",
},
2: {
0: "Low hashrate.",
},
3: {
5: "High env temp.",
},
},
81: {
0: {
0: "Chip data error.",
},
},
82: {
0: {
0: "Power version error.",
},
1: {
0: "Miner type error.",
},
2: {
0: "Version info error.",
},
},
83: {
0: {
0: "Empty level error.",
},
},
84: {
0: {
0: "Old firmware.",
},
1: {
0: "Software version error.",
},
},
85: {
"n": {
0: "Hashrate substandard L{n}.",
1: "Power consumption substandard L{n}.",
2: "Fan speed substandard L{n}.",
3: "Fan speed substandard L{n}.",
4: "Voltage substandard L{n}.",
},
},
86: {
0: {
0: "Missing product serial #.",
},
1: {
0: "Missing product type.",
},
2: {
0: "Missing miner serial #.",
1: "Wrong miner serial # length.",
},
3: {
0: "Missing power serial #.",
1: "Wrong power serial #.",
2: "Fault miner serial #.",
},
4: {
0: "Missing power model.",
1: "Wrong power model name.",
2: "Wrong power model vout.",
3: "Wrong power model rate.",
4: "Wrong power model format.",
},
5: {
0: "Wrong hash board struct.",
},
6: {
0: "Wrong miner cooling type.",
},
7: {
0: "Missing PCB serial #.",
},
},
87: {
0: {
0: "Miner power mismatch.",
},
},
90: {
0: {
0: "Process error, exited with signal: 3.",
},
1: {
0: "Process error, exited with signal: 3.",
},
},
99: {
9: {
9: "Miner unknown error.",
},
},
1000: {
0: {
0: "Security library error, please upgrade firmware",
1: "/antiv/signature illegal.",
2: "/antiv/dig/init.d illegal.",
3: "/antiv/dig/pf_partial.dig illegal.",
},
},
1001: {
0: {
0: "Security BTMiner removed, please upgrade firmware.",
},
},
1100: {
0: {
0: "Security illegal file, please upgrade firmware.",
1: "Security virus 0001 is removed, please upgrade firmware.",
}
},
}
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/error_codes/innosilicon.py | pyasic/data/error_codes/innosilicon.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from pydantic import computed_field
from pyasic.data.error_codes.base import BaseMinerError
class InnosiliconError(BaseMinerError):
"""A Dataclass to handle error codes of Innosilicon miners.
Attributes:
error_code: The error code as an int.
error_message: The error message as a string. Automatically found from the error code.
"""
error_code: int
@computed_field # type: ignore[prop-decorator]
@property
def error_message(self) -> str: # noqa - Skip PyCharm inspection
if self.error_code in ERROR_CODES:
return ERROR_CODES[self.error_code]
return "Unknown error type."
ERROR_CODES = {
21: "The PLUG signal of the hash board is not detected.",
22: "Power I2C communication is abnormal.",
23: "The SPI of all hash boards is blocked.",
24: "Some of the hash boards fail to connect to the SPI'.",
25: "Hashboard failed to set frequency.",
26: "Hashboard failed to set voltage.",
27: "Chip BIST test failed.",
28: "Hashboard SPI communication is abnormal.",
29: "Power I2C communication is abnormal.",
30: "Pool connection failed.",
31: "Individual chips are damaged.",
32: "Over temperature protection.",
33: "Hashboard fault.",
34: "The data cables are not connected in the correct order.",
35: "No power output.",
36: "Hashboard fault.",
37: "Control board and/or hashboard do not match.",
40: "Power output is abnormal.",
41: "Power output is abnormal.",
42: "Hashboard fault.",
}
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/error_codes/bos.py | pyasic/data/error_codes/bos.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from pyasic.data.error_codes.base import BaseMinerError
class BraiinsOSError(BaseMinerError):
"""A Dataclass to handle error codes of BraiinsOS+ miners.
Attributes:
error_message: The error message as a string.
error_code: The error code as an int. 0 if the message is not assigned a code.
"""
error_message: str
error_code: int = 0
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/error_codes/__init__.py | pyasic/data/error_codes/__init__.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from typing import TypeVar
from .bos import BraiinsOSError
from .innosilicon import InnosiliconError
from .vnish import VnishError
from .whatsminer import WhatsminerError
from .X19 import X19Error
MinerErrorData = TypeVar(
"MinerErrorData",
WhatsminerError,
BraiinsOSError,
X19Error,
InnosiliconError,
VnishError,
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/error_codes/base.py | pyasic/data/error_codes/base.py | from pydantic import BaseModel
class BaseMinerError(BaseModel):
error_code: int | None = None
@classmethod
def fields(cls):
return list(cls.model_fields.keys())
def asdict(self) -> dict:
return self.model_dump()
def as_dict(self) -> dict:
"""Get this dataclass as a dictionary.
Returns:
A dictionary version of this class.
"""
return self.asdict()
def as_influxdb(self, root_key: str, level_delimiter: str = ".") -> str:
field_data = []
if self.error_code is not None:
field_data.append(
f"{root_key}{level_delimiter}error_code={self.error_code}"
)
# Check if error_message exists as an attribute (either regular or computed field)
if hasattr(self, "error_message"):
error_message = getattr(self, "error_message")
if error_message is not None:
field_data.append(
f'{root_key}{level_delimiter}error_message="{error_message}"'
)
return ",".join(field_data)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/error_codes/vnish.py | pyasic/data/error_codes/vnish.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from pyasic.data.error_codes.base import BaseMinerError
class VnishError(BaseMinerError):
"""A Dataclass to handle error codes of Vnish miners.
Attributes:
error_message: The error message as a string.
error_code: The error code as an int. 0 if the message is not assigned a code.
"""
error_message: str
error_code: int = 0
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/data/error_codes/X19.py | pyasic/data/error_codes/X19.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from pyasic.data.error_codes.base import BaseMinerError
class X19Error(BaseMinerError):
"""A Dataclass to handle error codes of X19 miners.
Attributes:
error_message: The error message as a string.
error_code: The error code as an int. 0 if the message is not assigned a code.
"""
error_message: str
error_code: int = 0
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/luckyminer.py | pyasic/web/luckyminer.py | from __future__ import annotations
from .bitaxe import ESPMinerWebAPI
class LuckyMinerWebAPI(ESPMinerWebAPI):
pass
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/goldshell.py | pyasic/web/goldshell.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import json
import warnings
from typing import Any, TypedDict
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
PoolPass = TypedDict("PoolPass", {"pass": str})
class GoldshellWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username: str = "admin"
self.pwd: str = settings.get("default_goldshell_web_password", "123456789")
self.token: str | None = None
async def auth(self) -> str | None:
async with httpx.AsyncClient(transport=settings.transport()) as client:
try:
await client.get(f"http://{self.ip}:{self.port}/user/logout")
auth = (
await client.get(
f"http://{self.ip}:{self.port}/user/login?username={self.username}&password={self.pwd}&cipher=false"
)
).json()
except httpx.HTTPError:
warnings.warn(f"Could not authenticate web token with miner: {self}")
except json.JSONDecodeError:
# try again with encrypted normal password
try:
auth = (
await client.get(
f"http://{self.ip}:{self.port}/user/login?username=admin&password=bbad7537f4c8b6ea31eea0b3d760e257&cipher=true"
)
).json()
except (httpx.HTTPError, json.JSONDecodeError):
warnings.warn(
f"Could not authenticate web token with miner: {self}"
)
else:
self.token = auth.get("JWT Token")
else:
self.token = auth.get("JWT Token")
return self.token
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
if self.token is None:
await self.auth()
async with httpx.AsyncClient(transport=settings.transport()) as client:
retries = settings.get("get_data_retries", 1)
for attempt in range(retries):
if self.token is None:
raise APIError(
f"Could not authenticate web token with miner: {self}"
)
try:
if not parameters == {}:
response = await client.put(
f"http://{self.ip}:{self.port}/mcb/{command}",
headers={"Authorization": "Bearer " + self.token},
timeout=settings.get("api_function_timeout", 5),
json=parameters,
)
else:
response = await client.get(
f"http://{self.ip}:{self.port}/mcb/{command}",
headers={"Authorization": "Bearer " + self.token},
timeout=settings.get("api_function_timeout", 5),
)
json_data = response.json()
return json_data
except TypeError:
await self.auth()
except httpx.HTTPError as e:
if attempt == retries - 1:
raise APIError(
f"HTTP error sending '{command}' to {self.ip}: {e}"
)
except json.JSONDecodeError as e:
if attempt == retries - 1:
response_text = (
response.text if response.text else "empty response"
)
raise APIError(
f"JSON decode error for '{command}' from {self.ip}: {e} - Response: {response_text}"
)
raise APIError(f"Failed to send command to miner: {self}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
data: dict[str, Any] = {k: None for k in commands}
data["multicommand"] = True
await self.auth()
async with httpx.AsyncClient(transport=settings.transport()) as client:
for command in commands:
if self.token is None:
raise APIError(
f"Could not authenticate web token with miner: {self}"
)
try:
uri_commnand = command
if command == "devs":
uri_commnand = "cgminer?cgminercmd=devs"
response = await client.get(
f"http://{self.ip}:{self.port}/mcb/{uri_commnand}",
headers={"Authorization": "Bearer " + self.token},
timeout=settings.get("api_function_timeout", 5),
)
json_data = response.json()
data[command] = json_data
except httpx.HTTPError:
pass
except json.JSONDecodeError:
pass
except TypeError:
await self.auth()
return data
async def pools(self) -> dict:
return await self.send_command("pools")
async def newpool(self, url: str, user: str, password: str) -> dict:
# looks dumb, but cant pass `pass` since it is a built in type
poolpass: PoolPass = {"pass": password}
return await self.send_command("newpool", url=url, user=user, **poolpass)
async def delpool(
self, url: str, user: str, password: str, dragid: int = 0
) -> dict:
# looks dumb, but cant pass `pass` since it is a built in type
poolpass: PoolPass = {"pass": password}
return await self.send_command(
"delpool", url=url, user=user, dragid=dragid, **poolpass
)
async def setting(self) -> dict:
return await self.send_command("setting")
async def set_setting(self, values: dict) -> None:
await self.send_command("setting", **values)
async def status(self) -> dict:
return await self.send_command("status")
async def devs(self) -> dict:
return await self.send_command("cgminer?cgminercmd=devs")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/innosilicon.py | pyasic/web/innosilicon.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import json
import warnings
from typing import Any
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
class InnosiliconWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username: str = "admin"
self.pwd: str = settings.get("default_innosilicon_web_password", "admin")
self.token: str | None = None
async def auth(self) -> str | None:
async with httpx.AsyncClient(transport=settings.transport()) as client:
try:
auth = await client.post(
f"http://{self.ip}:{self.port}/api/auth",
data={"username": self.username, "password": self.pwd},
)
except httpx.HTTPError:
warnings.warn(f"Could not authenticate web token with miner: {self}")
else:
json_auth = auth.json()
self.token = json_auth.get("jwt")
return self.token
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
if self.token is None:
await self.auth()
async with httpx.AsyncClient(transport=settings.transport()) as client:
retries = settings.get("get_data_retries", 1)
for attempt in range(retries):
if self.token is None:
raise APIError(
f"Could not authenticate web token with miner: {self}"
)
try:
response = await client.post(
f"http://{self.ip}:{self.port}/api/{command}",
headers={"Authorization": "Bearer " + self.token},
timeout=settings.get("api_function_timeout", 5),
json=parameters,
)
json_data = response.json()
if (
not json_data.get("success")
and "token" in json_data
and json_data.get("token") == "expired"
):
# refresh the token, retry
await self.auth()
continue
if not json_data.get("success"):
if json_data.get("msg"):
raise APIError(json_data["msg"])
elif json_data.get("message"):
raise APIError(json_data["message"])
raise APIError("Innosilicon web api command failed.")
return json_data
except httpx.HTTPError as e:
if attempt == retries - 1:
raise APIError(
f"HTTP error sending '{command}' to {self.ip}: {e}"
)
except json.JSONDecodeError as e:
if attempt == retries - 1:
response_text = (
response.text if response.text else "empty response"
)
raise APIError(
f"JSON decode error for '{command}' from {self.ip}: {e} - Response: {response_text}"
)
raise APIError(f"Failed to send command to miner: {self}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
data: dict[str, Any] = {k: None for k in commands}
data["multicommand"] = True
await self.auth()
async with httpx.AsyncClient(transport=settings.transport()) as client:
for command in commands:
if self.token is None:
raise APIError(
f"Could not authenticate web token with miner: {self}"
)
try:
response = await client.post(
f"http://{self.ip}:{self.port}/api/{command}",
headers={"Authorization": "Bearer " + self.token},
timeout=settings.get("api_function_timeout", 5),
)
json_data = response.json()
data[command] = json_data
except httpx.HTTPError:
pass
except json.JSONDecodeError:
pass
except TypeError:
await self.auth()
return data
async def reboot(self) -> dict:
return await self.send_command("reboot")
async def restart_cgminer(self) -> dict:
return await self.send_command("restartCgMiner")
async def update_pools(self, conf: dict) -> dict:
return await self.send_command("updatePools", **conf)
async def overview(self) -> dict:
return await self.send_command("overview")
async def type(self) -> dict:
return await self.send_command("type")
async def get_all(self) -> dict:
return await self.send_command("getAll")
async def summary(self) -> dict:
return await self.send_command("summary")
async def get_error_detail(self) -> dict:
return await self.send_command("getErrorDetail")
async def pools(self) -> dict:
return await self.send_command("pools")
async def poweroff(self) -> dict:
return await self.send_command("poweroff")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/hammer.py | pyasic/web/hammer.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import json
from typing import Any
import httpx
from pyasic import settings
from pyasic.web.base import BaseWebAPI
class HammerWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
"""Initialize the modern Hammer API client with a specific IP address.
Args:
ip (str): IP address of the Hammer device.
"""
super().__init__(ip)
self.username: str = "root"
self.pwd: str = settings.get("default_hammer_web_password", "root")
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
"""Send a command to the Hammer device using HTTP digest authentication.
Args:
command (str): The CGI command to send.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
privileged (bool): If set to True, requires elevated privileges.
**parameters: Arbitrary keyword arguments to be sent as parameters in the request.
Returns:
dict: The JSON response from the device or an empty dictionary if an error occurs.
"""
url = f"http://{self.ip}:{self.port}/cgi-bin/{command}.cgi"
auth = httpx.DigestAuth(self.username, self.pwd)
try:
async with httpx.AsyncClient(transport=settings.transport()) as client:
if parameters:
data = await client.post(
url,
auth=auth,
timeout=settings.get("api_function_timeout", 3),
json=parameters,
)
else:
data = await client.get(url, auth=auth)
except httpx.HTTPError as e:
return {"success": False, "message": f"HTTP error occurred: {str(e)}"}
else:
if data.status_code == 200:
try:
return data.json()
except json.decoder.JSONDecodeError:
return {"success": False, "message": "Failed to decode JSON"}
return {"success": False, "message": "Unknown error occurred"}
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
"""Execute multiple commands simultaneously.
Args:
*commands (str): Multiple command strings to be executed.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
Returns:
dict: A dictionary containing the results of all commands executed.
"""
async with httpx.AsyncClient(transport=settings.transport()) as client:
tasks = [
asyncio.create_task(self._handle_multicommand(client, command))
for command in commands
]
all_data = await asyncio.gather(*tasks)
data = {}
for item in all_data:
data.update(item)
data["multicommand"] = True
return data
async def _handle_multicommand(
self, client: httpx.AsyncClient, command: str
) -> dict:
"""Helper function for handling individual commands in a multicommand execution.
Args:
client (httpx.AsyncClient): The HTTP client to use for the request.
command (str): The command to be executed.
Returns:
dict: A dictionary containing the response of the executed command.
"""
auth = httpx.DigestAuth(self.username, self.pwd)
try:
url = f"http://{self.ip}/cgi-bin/{command}.cgi"
ret = await client.get(url, auth=auth)
except httpx.HTTPError:
pass
else:
if ret.status_code == 200:
try:
json_data = ret.json()
return {command: json_data}
except json.decoder.JSONDecodeError:
pass
return {command: {}}
async def get_miner_conf(self) -> dict:
"""Retrieve the miner configuration from the Hammer device.
Returns:
dict: A dictionary containing the current configuration of the miner.
"""
return await self.send_command("get_miner_conf")
async def set_miner_conf(self, conf: dict) -> dict:
"""Set the configuration for the miner.
Args:
conf (dict): A dictionary of configuration settings to apply to the miner.
Returns:
dict: A dictionary response from the device after setting the configuration.
"""
return await self.send_command("set_miner_conf", **conf)
async def blink(self, blink: bool) -> dict:
"""Control the blinking of the LED on the miner device.
Args:
blink (bool): True to start blinking, False to stop.
Returns:
dict: A dictionary response from the device after the command execution.
"""
if blink:
return await self.send_command("blink", blink="true")
return await self.send_command("blink", blink="false")
async def reboot(self) -> dict:
"""Reboot the miner device.
Returns:
dict: A dictionary response from the device confirming the reboot command.
"""
return await self.send_command("reboot")
async def get_system_info(self) -> dict:
"""Retrieve system information from the miner.
Returns:
dict: A dictionary containing system information of the miner.
"""
return await self.send_command("get_system_info")
async def get_network_info(self) -> dict:
"""Retrieve network configuration information from the miner.
Returns:
dict: A dictionary containing the network configuration of the miner.
"""
return await self.send_command("get_network_info")
async def summary(self) -> dict:
"""Get a summary of the miner's status and performance.
Returns:
dict: A summary of the miner's current operational status.
"""
return await self.send_command("summary")
async def get_blink_status(self) -> dict:
"""Check the status of the LED blinking on the miner.
Returns:
dict: A dictionary indicating whether the LED is currently blinking.
"""
return await self.send_command("get_blink_status")
async def set_network_conf(
self,
ip: str,
dns: str,
gateway: str,
subnet_mask: str,
hostname: str,
protocol: int,
) -> dict:
"""Set the network configuration of the miner.
Args:
ip (str): IP address of the device.
dns (str): DNS server IP address.
gateway (str): Gateway IP address.
subnet_mask (str): Network subnet mask.
hostname (str): Hostname of the device.
protocol (int): Network protocol used.
Returns:
dict: A dictionary response from the device after setting the network configuration.
"""
return await self.send_command(
"set_network_conf",
ipAddress=ip,
ipDns=dns,
ipGateway=gateway,
ipHost=hostname,
ipPro=protocol,
ipSub=subnet_mask,
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/elphapex.py | pyasic/web/elphapex.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import json
from typing import Any
import httpx
from pyasic import settings
from pyasic.web.base import BaseWebAPI
class ElphapexWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
"""Initialize the modern Elphapex API client with a specific IP address.
Args:
ip (str): IP address of the Elphapex device.
"""
super().__init__(ip)
self.username: str = "root"
self.pwd: str = settings.get("default_elphapex_web_password", "root")
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
"""Send a command to the Elphapex device using HTTP digest authentication.
Args:
command (str): The CGI command to send.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
privileged (bool): If set to True, requires elevated privileges.
**parameters: Arbitrary keyword arguments to be sent as parameters in the request.
Returns:
dict: The JSON response from the device or an empty dictionary if an error occurs.
"""
url = f"http://{self.ip}:{self.port}/cgi-bin/{command}.cgi"
auth = httpx.DigestAuth(self.username, self.pwd)
try:
async with httpx.AsyncClient(transport=settings.transport()) as client:
if parameters:
data = await client.post(
url,
auth=auth,
timeout=settings.get("api_function_timeout", 3),
json=parameters,
)
else:
data = await client.get(url, auth=auth)
except httpx.HTTPError as e:
return {"success": False, "message": f"HTTP error occurred: {str(e)}"}
else:
if data.status_code == 200:
try:
return data.json()
except json.decoder.JSONDecodeError:
return {"success": False, "message": "Failed to decode JSON"}
return {"success": False, "message": "Unknown error occurred"}
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
"""Execute multiple commands simultaneously.
Args:
*commands (str): Multiple command strings to be executed.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
Returns:
dict: A dictionary containing the results of all commands executed.
"""
async with httpx.AsyncClient(transport=settings.transport()) as client:
tasks = [
asyncio.create_task(self._handle_multicommand(client, command))
for command in commands
]
all_data = await asyncio.gather(*tasks)
data = {}
for item in all_data:
data.update(item)
data["multicommand"] = True
return data
async def _handle_multicommand(
self, client: httpx.AsyncClient, command: str
) -> dict:
"""Helper function for handling individual commands in a multicommand execution.
Args:
client (httpx.AsyncClient): The HTTP client to use for the request.
command (str): The command to be executed.
Returns:
dict: A dictionary containing the response of the executed command.
"""
auth = httpx.DigestAuth(self.username, self.pwd)
async def _send():
try:
url = f"http://{self.ip}/cgi-bin/{command}.cgi"
ret = await client.get(url, auth=auth)
except httpx.HTTPError:
pass
else:
if ret.status_code == 200:
try:
json_data = ret.json()
if json_data.get("STATUS", {}).get("STATUS") not in ["S", "I"]:
return None
return {command: json_data}
except json.decoder.JSONDecodeError:
pass
return None
# retry 3 times
for i in range(3):
res = await _send()
if res is not None:
return res
return {command: {}}
async def get_miner_conf(self) -> dict:
"""Retrieve the miner configuration from the Elphapex device.
Returns:
dict: A dictionary containing the current configuration of the miner.
"""
return await self.send_command("get_miner_conf")
async def set_miner_conf(self, conf: dict) -> dict:
"""Set the configuration for the miner.
Args:
conf (dict): A dictionary of configuration settings to apply to the miner.
Returns:
dict: A dictionary response from the device after setting the configuration.
"""
return await self.send_command("set_miner_conf", **conf)
async def blink(self, blink: bool) -> dict:
"""Control the blinking of the LED on the miner device.
Args:
blink (bool): True to start blinking, False to stop.
Returns:
dict: A dictionary response from the device after the command execution.
"""
if blink:
return await self.send_command("blink", blink="true")
return await self.send_command("blink", blink="false")
async def reboot(self) -> dict:
"""Reboot the miner device.
Returns:
dict: A dictionary response from the device confirming the reboot command.
"""
return await self.send_command("reboot")
async def get_system_info(self) -> dict:
"""Retrieve system information from the miner.
Returns:
dict: A dictionary containing system information of the miner.
"""
return await self.send_command("get_system_info")
async def get_network_info(self) -> dict:
"""Retrieve network configuration information from the miner.
Returns:
dict: A dictionary containing the network configuration of the miner.
"""
return await self.send_command("get_network_info")
async def summary(self) -> dict:
"""Get a summary of the miner's status and performance.
Returns:
dict: A summary of the miner's current operational status.
"""
return await self.send_command("summary")
async def stats(self) -> dict:
"""Get miners stats.
Returns:
dict: A summary of the miner's current operational status.
"""
return await self.send_command("stats")
async def get_blink_status(self) -> dict:
"""Check the status of the LED blinking on the miner.
Returns:
dict: A dictionary indicating whether the LED is currently blinking.
"""
return await self.send_command("get_blink_status")
async def pools(self) -> dict:
"""Check the status of the miner's pools.
Returns:
dict: A dictionary containing the pool status as information.
"""
return await self.send_command("pools")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/avalonminer.py | pyasic/web/avalonminer.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import hashlib
import json
from typing import Any
import httpx
from pyasic import settings
from pyasic.web.base import BaseWebAPI
class AvalonMinerWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
"""Initialize the modern Avalonminer API client with a specific IP address.
Args:
ip (str): IP address of the Avalonminer device.
"""
super().__init__(ip)
self.username: str = "root"
self.pwd: str = settings.get("default_avalonminer_web_password", "root")
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
"""Send a command to the Avalonminer device using HTTP digest authentication.
Args:
command (str): The CGI command to send.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
**parameters: Arbitrary keyword arguments to be sent as parameters in the request.
Returns:
dict: The JSON response from the device or an empty dictionary if an error occurs.
"""
cookie_data = "ff0000ff" + hashlib.sha256(self.pwd.encode()).hexdigest()[:24]
url = f"http://{self.ip}:{self.port}/{command}.cgi"
try:
async with httpx.AsyncClient(transport=settings.transport()) as client:
client.cookies.set("auth", cookie_data)
resp = await client.get(url)
raw_data = resp.text.replace("minerinfoCallback(", "").replace(");", "")
return json.loads(raw_data)
except (httpx.HTTPError, json.JSONDecodeError):
pass
return {}
async def _send_with_fallbacks(self, *commands: str) -> dict:
"""Try a list of command names until one returns data.
The first successful JSON parse wins; errors/empty responses fall through.
"""
for cmd in commands:
data = await self.send_command(cmd)
if data:
return data
return {}
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
async with httpx.AsyncClient(transport=settings.transport()) as client:
cookie_data = (
"ff0000ff" + hashlib.sha256(self.pwd.encode()).hexdigest()[:24]
)
client.cookies.set("auth", cookie_data)
tasks = [
asyncio.create_task(self._handle_multicommand(client, command))
for command in commands
]
all_data = await asyncio.gather(*tasks)
data = {}
for item in all_data:
data.update(item)
data["multicommand"] = True
return data
async def _handle_multicommand(
self, client: httpx.AsyncClient, command: str
) -> dict:
fallback_variants = {
"get_minerinfo": ("get_miner_info",),
"get_miner_info": ("get_minerinfo",),
"get_status": ("status",),
"status": ("get_status",),
"get_pool": ("pool",),
"pool": ("get_pool",),
"summary": ("get_summary",),
"get_summary": ("summary",),
}
candidates = (command,) + fallback_variants.get(command, ())
for cmd in candidates:
try:
url = f"http://{self.ip}:{self.port}/{cmd}.cgi"
resp = await client.get(url)
raw_data = resp.text.replace("minerinfoCallback(", "").replace(");", "")
parsed = json.loads(raw_data)
if parsed:
return parsed
except (httpx.HTTPError, json.JSONDecodeError):
continue
return {}
async def minerinfo(self):
return await self._send_with_fallbacks("get_minerinfo", "get_miner_info")
async def miner_info(self):
return await self.send_command("get_miner_info")
async def status(self):
return await self._send_with_fallbacks("get_status", "status")
async def summary(self):
return await self._send_with_fallbacks("summary", "get_summary")
async def pools(self):
return await self._send_with_fallbacks("get_pool", "pool")
async def home(self):
return await self.send_command("get_home")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/mskminer.py | pyasic/web/mskminer.py | # ------------------------------------------------------------------------------
# Copyright 2024 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import warnings
from typing import Any
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
class MSKMinerWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username = "admin"
self.pwd = settings.get("default_mskminer_web_password", "root")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
tasks = {c: asyncio.create_task(getattr(self, c)()) for c in commands}
await asyncio.gather(*[t for t in tasks.values()])
return {t: tasks[t].result() for t in tasks}
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
async with httpx.AsyncClient(transport=settings.transport()) as client:
try:
# auth
await client.post(
f"http://{self.ip}:{self.port}/admin/login",
data={"username": self.username, "password": self.pwd},
)
except httpx.HTTPError:
warnings.warn(f"Could not authenticate with miner web: {self}")
try:
resp = await client.post(
f"http://{self.ip}:{self.port}/api/{command}", params=parameters
)
if not resp.status_code == 200:
if not ignore_errors:
raise APIError(f"Command failed: {command}")
warnings.warn(f"Command failed: {command}")
return resp.json()
except httpx.HTTPError:
raise APIError(f"Command failed: {command}")
async def info_v1(self):
return await self.send_command("info_v1")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/iceriver.py | pyasic/web/iceriver.py | # ------------------------------------------------------------------------------
# Copyright 2024 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import warnings
from typing import Any
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
class IceRiverWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username = "admin"
self.pwd = settings.get("default_iceriver_web_password", "12345678")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
tasks = {c: asyncio.create_task(getattr(self, c)()) for c in commands}
await asyncio.gather(*[t for t in tasks.values()])
return {t: tasks[t].result() for t in tasks}
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
async with httpx.AsyncClient(transport=settings.transport()) as client:
try:
# auth
await client.post(
f"http://{self.ip}:{self.port}/user/loginpost",
params={"post": "6", "user": self.username, "pwd": self.pwd},
)
except httpx.HTTPError:
warnings.warn(f"Could not authenticate with miner web: {self}")
try:
resp = await client.post(
f"http://{self.ip}:{self.port}/user/{command}", params=parameters
)
if not resp.status_code == 200:
if not ignore_errors:
raise APIError(f"Command failed: {command}")
warnings.warn(f"Command failed: {command}")
return resp.json()
except httpx.HTTPError:
raise APIError(f"Command failed: {command}")
async def locate(self, enable: bool):
return await self.send_command(
"userpanel", post="5", locate="1" if enable else "0"
)
async def userpanel(self):
return await self.send_command("userpanel", post="4")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/espminer.py | pyasic/web/espminer.py | from __future__ import annotations
import asyncio
import json
from typing import Any
import httpx
from pyasic import APIError, settings
from pyasic.web.base import BaseWebAPI
class ESPMinerWebAPI(BaseWebAPI):
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
url = f"http://{self.ip}:{self.port}/api/{command}"
async with httpx.AsyncClient(transport=settings.transport()) as client:
retries = settings.get("get_data_retries", 1)
for attempt in range(retries):
try:
if parameters.get("post", False):
parameters.pop("post")
data = await client.post(
url,
timeout=settings.get("api_function_timeout", 3),
json=parameters,
)
elif parameters.get("patch", False):
parameters.pop("patch")
data = await client.patch(
url,
timeout=settings.get("api_function_timeout", 3),
json=parameters,
)
else:
data = await client.get(
url,
timeout=settings.get("api_function_timeout", 5),
)
except httpx.HTTPError as e:
if attempt == retries - 1:
raise APIError(
f"HTTP error sending '{command}' to {self.ip}: {e}"
)
else:
if data.status_code == 200:
try:
return data.json()
except json.decoder.JSONDecodeError as e:
response_text = data.text if data.text else "empty response"
if attempt == retries - 1:
raise APIError(
f"JSON decode error for '{command}' from {self.ip}: {e} - Response: {response_text}"
)
raise APIError(f"Failed to send command to miner API: {url}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
"""Execute multiple commands simultaneously on the BitAxe miner.
Args:
*commands (str): Commands to execute.
ignore_errors (bool): Whether to ignore errors during command execution.
allow_warning (bool): Whether to proceed despite warnings.
Returns:
dict: A dictionary containing responses for all commands executed.
"""
tasks = {}
# send all commands individually
for cmd in commands:
tasks[cmd] = asyncio.create_task(
self.send_command(cmd, allow_warning=allow_warning)
)
results = await asyncio.gather(
*[tasks[cmd] for cmd in tasks], return_exceptions=True
)
data: dict[str, Any] = {"multicommand": True}
for cmd, result in zip(tasks.keys(), results):
if not isinstance(result, (APIError, Exception)):
if result is None or result == {}:
data[cmd] = {}
else:
data[cmd] = result
return data
async def system_info(self) -> dict:
return await self.send_command("system/info")
async def swarm_info(self) -> dict:
return await self.send_command("swarm/info")
async def restart(self) -> dict:
return await self.send_command("system/restart", post=True)
async def update_settings(self, **config: Any) -> dict:
return await self.send_command("system", patch=True, **config)
async def asic_info(self) -> dict:
return await self.send_command("system/asic")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/__init__.py | pyasic/web/__init__.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from .antminer import AntminerModernWebAPI, AntminerOldWebAPI
from .auradine import AuradineWebAPI
from .base import BaseWebAPI
from .braiins_os import BOSerWebAPI, BOSMinerWebAPI
from .epic import ePICWebAPI
from .goldshell import GoldshellWebAPI
from .hammer import HammerWebAPI
from .iceriver import IceRiverWebAPI
from .innosilicon import InnosiliconWebAPI
from .vnish import VNishWebAPI
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/base.py | pyasic/web/base.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from typing import Any
from pyasic.errors import APIWarning
class BaseWebAPI(ABC):
def __init__(self, ip: str) -> None:
# ip address of the miner
self.ip = ip
self.username: str | None = None
self.pwd: str | None = None
self.port: int = 80
self.token: str | None = None
def __new__(cls, *args: Any, **kwargs: Any) -> BaseWebAPI:
if cls is BaseWebAPI:
raise TypeError(f"Only children of '{cls.__name__}' may be instantiated")
return object.__new__(cls)
def __repr__(self) -> str:
return f"{self.__class__.__name__}: {str(self.ip)}"
@abstractmethod
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
pass
@abstractmethod
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
pass
def _check_commands(self, *commands: str) -> list[str]:
allowed_commands = self.get_commands()
return_commands = []
for command in [*commands]:
if command in allowed_commands:
return_commands.append(command)
else:
warnings.warn(
f"""Removing incorrect command: {command}
If you are sure you want to use this command please use WebAPI.send_command("{command}", ignore_errors=True) instead.""",
APIWarning,
)
return return_commands
@property
def commands(self) -> list[str]:
return self.get_commands()
def get_commands(self) -> list[str]:
"""Get a list of command accessible to a specific type of web API on the miner.
Returns:
A list of all web commands that the miner supports.
"""
return [
func
for func in
# each function in self
dir(self)
if not func == "commands"
if callable(getattr(self, func))
and
# no __ or _ methods
not func.startswith("__")
and not func.startswith("_")
and
# remove all functions that are in this base class
func
not in [
func for func in dir(BaseWebAPI) if callable(getattr(BaseWebAPI, func))
]
]
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/auradine.py | pyasic/web/auradine.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import json
import warnings
from typing import Any
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.misc import validate_command_output
from pyasic.web.base import BaseWebAPI
class AuradineWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
"""Initializes the API client for interacting with Auradine mining devices.
Args:
ip (str): IP address of the Auradine miner.
"""
super().__init__(ip)
self.username = "admin"
self.pwd = settings.get("default_auradine_web_password", "admin")
self.port = 8080
self.token = None
async def auth(self) -> str | None:
"""Authenticate and retrieve a web token from the Auradine miner.
Returns:
str | None: A token if authentication is successful, None otherwise.
"""
async with httpx.AsyncClient(transport=settings.transport()) as client:
try:
auth = await client.post(
f"http://{self.ip}:{self.port}/token",
json={
"command": "token",
"user": self.username,
"password": self.pwd,
},
)
except httpx.HTTPError:
warnings.warn(f"Could not authenticate web token with miner: {self}")
else:
json_auth = auth.json()
try:
self.token = json_auth["Token"][0]["Token"]
except LookupError:
return None
return self.token
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
"""Send a command to the Auradine miner, handling authentication and retries.
Args:
command (str): The specific command to execute.
ignore_errors (bool): Whether to ignore HTTP errors.
allow_warning (bool): Whether to proceed with warnings.
privileged (bool): Whether the command requires privileged access.
**parameters: Additional parameters for the command.
Returns:
dict: The JSON response from the device.
"""
post = privileged or not parameters == {}
if not parameters == {}:
parameters["command"] = command
if self.token is None:
await self.auth()
async with httpx.AsyncClient(transport=settings.transport()) as client:
for i in range(settings.get("get_data_retries", 1)):
if self.token is None:
raise APIError(
f"Could not authenticate web token with miner: {self}"
)
try:
if post:
response = await client.post(
f"http://{self.ip}:{self.port}/{command}",
headers={"Token": self.token},
timeout=settings.get("api_function_timeout", 5),
json=parameters,
)
else:
response = await client.get(
f"http://{self.ip}:{self.port}/{command}",
headers={"Token": self.token},
timeout=settings.get("api_function_timeout", 5),
)
json_data = response.json()
validation = validate_command_output(json_data)
if not validation[0]:
if i == settings.get("get_data_retries", 1):
raise APIError(validation[1])
# refresh the token, retry
await self.auth()
continue
return json_data
except (httpx.HTTPError, json.JSONDecodeError):
pass
raise APIError(f"Failed to send command to miner: {self}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
"""Execute multiple commands simultaneously on the Auradine miner.
Args:
*commands (str): Commands to execute.
ignore_errors (bool): Whether to ignore errors during command execution.
allow_warning (bool): Whether to proceed despite warnings.
Returns:
dict: A dictionary containing responses for all commands executed.
"""
tasks = {}
# send all commands individually
for cmd in commands:
tasks[cmd] = asyncio.create_task(
self.send_command(cmd, allow_warning=allow_warning)
)
results = await asyncio.gather(
*[tasks[cmd] for cmd in tasks], return_exceptions=True
)
data: dict[str, Any] = {"multicommand": True}
for cmd, result in zip(tasks.keys(), results):
if isinstance(result, dict):
data[cmd] = result
return data
async def factory_reset(self) -> dict:
"""Perform a factory reset on the Auradine miner.
Returns:
dict: A dictionary indicating the result of the reset operation.
"""
return await self.send_command("factory-reset", privileged=True)
async def get_fan(self) -> dict:
"""Retrieve the current fan status from the Auradine miner.
Returns:
dict: A dictionary containing the current fan status.
"""
return await self.send_command("fan")
async def set_fan(self, fan: int, speed_pct: int) -> dict:
"""Set the speed of a specific fan on the Auradine miner.
Args:
fan (int): The index of the fan to control.
speed_pct (int): The speed percentage to set for the fan.
Returns:
dict: A dictionary indicating the result of the operation.
"""
return await self.send_command("fan", index=fan, percentage=speed_pct)
async def firmware_upgrade(
self, url: str | None = None, version: str = "latest"
) -> dict:
"""Upgrade the firmware of the Auradine miner.
Args:
url (str, optional): The URL to download the firmware from.
version (str, optional): The version of the firmware to upgrade to, defaults to 'latest'.
Returns:
dict: A dictionary indicating the result of the firmware upgrade.
"""
if url is not None:
return await self.send_command("firmware-upgrade", url=url)
return await self.send_command("firmware-upgrade", version=version)
async def get_frequency(self) -> dict:
"""Retrieve the current frequency settings of the Auradine miner.
Returns:
dict: A dictionary containing the frequency settings.
"""
return await self.send_command("frequency")
async def set_frequency(self, board: int, frequency: float) -> dict:
"""Set the frequency for a specific board on the Auradine miner.
Args:
board (int): The index of the board to configure.
frequency (float): The frequency in MHz to set for the board.
Returns:
dict: A dictionary indicating the result of setting the frequency.
"""
return await self.send_command("frequency", board=board, frequency=frequency)
async def ipreport(self) -> dict:
"""Generate an IP report for the Auradine miner.
Returns:
dict: A dictionary containing the IP report details.
"""
return await self.send_command("ipreport")
async def get_led(self) -> dict:
"""Retrieve the current LED status from the Auradine miner.
Returns:
dict: A dictionary containing the current status of the LED settings.
"""
return await self.send_command("led")
async def set_led(self, code: int) -> dict:
"""Set the LED code on the Auradine miner.
Args:
code (int): The code that determines the LED behavior.
Returns:
dict: A dictionary indicating the result of the operation.
"""
return await self.send_command("led", code=code)
async def set_led_custom(self, code: int, led_1: int, led_2: int, msg: str) -> dict:
"""Set custom LED configurations including messages.
Args:
code (int): The LED code to set.
led_1 (int): The first LED indicator number.
led_2 (int): The second LED indicator number.
msg (str): The message to display or represent with LEDs.
Returns:
dict: A dictionary indicating the result of the custom LED configuration.
"""
return await self.send_command(
"led", code=code, led1=led_1, led2=led_2, msg=msg
)
async def get_mode(self) -> dict:
"""Retrieve the current operational mode of the Auradine miner.
Returns:
dict: A dictionary containing the current mode settings.
"""
return await self.send_command("mode")
async def set_mode(self, **kwargs: Any) -> dict:
"""Set the operational mode of the Auradine miner.
Args:
**kwargs (Any): Mode settings specified as keyword arguments.
Returns:
dict: A dictionary indicating the result of the mode setting operation.
"""
return await self.send_command("mode", **kwargs)
async def get_network(self) -> dict:
"""Retrieve the network configuration settings of the Auradine miner.
Returns:
dict: A dictionary containing the network configuration details.
"""
return await self.send_command("network")
async def set_network(self, **kwargs: Any) -> dict:
"""Set the network configuration of the Auradine miner.
Args:
**kwargs (Any): Network settings specified as keyword arguments.
Returns:
dict: A dictionary indicating the result of the network configuration.
"""
return await self.send_command("network", **kwargs)
async def password(self, password: str) -> dict:
"""Change the password used for accessing the Auradine miner.
Args:
password (str): The new password to set.
Returns:
dict: A dictionary indicating the result of the password change operation.
"""
res = await self.send_command(
"password", user=self.username, old=self.pwd, new=password
)
self.pwd = password
return res
async def get_psu(self) -> dict:
"""Retrieve the status of the power supply unit (PSU) from the Auradine miner.
Returns:
dict: A dictionary containing the PSU status.
"""
return await self.send_command("psu")
async def set_psu(self, voltage: float) -> dict:
"""Set the voltage for the power supply unit of the Auradine miner.
Args:
voltage (float): The voltage level to set for the PSU.
Returns:
dict: A dictionary indicating the result of setting the PSU voltage.
"""
return await self.send_command("psu", voltage=voltage)
async def get_register(self) -> dict:
"""Retrieve registration information from the Auradine miner.
Returns:
dict: A dictionary containing the registration details.
"""
return await self.send_command("register")
async def set_register(self, company: str) -> dict:
"""Set the registration information for the Auradine miner.
Args:
company (str): The company name to register the miner under.
Returns:
dict: A dictionary indicating the result of the registration operation.
"""
return await self.send_command("register", parameter=company)
async def reboot(self) -> dict:
"""Reboot the Auradine miner.
Returns:
dict: A dictionary indicating the result of the reboot operation.
"""
return await self.send_command("restart", privileged=True)
async def restart_gcminer(self) -> dict:
"""Restart the GCMiner application on the Auradine miner.
Returns:
dict: A dictionary indicating the result of the GCMiner restart operation.
"""
return await self.send_command("restart", parameter="gcminer")
async def restart_api_server(self) -> dict:
"""Restart the API server on the Auradine miner.
Returns:
dict: A dictionary indicating the result of the API server restart operation.
"""
return await self.send_command("restart", parameter="api-server")
async def temperature(self) -> dict:
"""Retrieve the current temperature readings from the Auradine miner.
Returns:
dict: A dictionary containing temperature data.
"""
return await self.send_command("temperature")
async def timedate(self, ntp: str, timezone: str) -> dict:
"""Set the time and date settings for the Auradine miner.
Args:
ntp (str): The NTP server to use for time synchronization.
timezone (str): The timezone setting.
Returns:
dict: A dictionary indicating the result of setting the time and date.
"""
return await self.send_command("timedate", ntp=ntp, timezone=timezone)
async def get_token(self) -> dict:
"""Retrieve the current authentication token for the Auradine miner.
Returns:
dict: A dictionary containing the authentication token.
"""
return await self.send_command("token", user=self.username, password=self.pwd)
async def update_pools(self, pools: list[dict]) -> dict:
"""Update the mining pools configuration on the Auradine miner.
Args:
pools (list[dict]): A list of dictionaries, each representing a pool configuration.
Returns:
dict: A dictionary indicating the result of the update operation.
"""
return await self.send_command("updatepools", pools=pools)
async def voltage(self) -> dict:
"""Retrieve the voltage settings of the Auradine miner.
Returns:
dict: A dictionary containing the voltage details.
"""
return await self.send_command("voltage")
async def get_ztp(self) -> dict:
"""Retrieve the zero-touch provisioning status from the Auradine miner.
Returns:
dict: A dictionary containing the ZTP status.
"""
return await self.send_command("ztp")
async def set_ztp(self, enable: bool) -> dict:
"""Enable or disable zero-touch provisioning (ZTP) on the Auradine miner.
Args:
enable (bool): True to enable ZTP, False to disable.
Returns:
dict: A dictionary indicating the result of the ZTP setting operation.
"""
return await self.send_command("ztp", parameter="on" if enable else "off")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/vnish.py | pyasic/web/vnish.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import json
import warnings
from typing import Any
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
class VNishWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username = "admin"
self.pwd = settings.get("default_vnish_web_password", "admin")
self.token = None
async def auth(self) -> str | None:
async with httpx.AsyncClient(transport=settings.transport()) as client:
try:
auth = await client.post(
f"http://{self.ip}:{self.port}/api/v1/unlock",
json={"pw": self.pwd},
)
except httpx.HTTPError:
warnings.warn(f"Could not authenticate web token with miner: {self}")
else:
if not auth.status_code == 200:
warnings.warn(
f"Could not authenticate web token with miner: {self}"
)
return None
json_auth = auth.json()
self.token = json_auth["token"]
return self.token
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
post = privileged or not parameters == {}
if self.token is None:
await self.auth()
async with httpx.AsyncClient(transport=settings.transport()) as client:
retries = settings.get("get_data_retries", 1)
for attempt in range(retries):
try:
auth = self.token
if auth is None:
raise APIError(
f"Could not authenticate web token with miner: {self}"
)
if command.startswith("system"):
auth = "Bearer " + auth
if post:
response = await client.post(
f"http://{self.ip}:{self.port}/api/v1/{command}",
headers={"Authorization": auth},
timeout=settings.get("api_function_timeout", 5),
json=parameters,
)
else:
response = await client.get(
f"http://{self.ip}:{self.port}/api/v1/{command}",
headers={"Authorization": auth},
timeout=settings.get("api_function_timeout", 5),
)
if not response.status_code == 200:
# refresh the token, retry
await self.auth()
continue
json_data = response.json()
if json_data:
return json_data
return {"success": True}
except httpx.HTTPError as e:
if attempt == retries - 1:
raise APIError(
f"HTTP error sending '{command}' to {self.ip}: {e}"
)
except json.JSONDecodeError as e:
if attempt == retries - 1:
response_text = (
response.text if response.text else "empty response"
)
raise APIError(
f"JSON decode error for '{command}' from {self.ip}: {e} - Response: {response_text}"
)
except AttributeError as e:
if attempt == retries - 1:
raise APIError(
f"Attribute error sending '{command}' to {self.ip}: {e}"
)
raise APIError(f"Failed to send command to miner: {self}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
data: dict[str, Any] = {k: None for k in commands}
data["multicommand"] = True
for command in commands:
data[command] = await self.send_command(command)
return data
async def restart_vnish(self) -> dict:
return await self.send_command("mining/restart", privileged=True)
async def reboot(self) -> dict:
return await self.send_command("system/reboot", privileged=True)
async def pause_mining(self) -> dict:
return await self.send_command("mining/pause", privileged=True)
async def resume_mining(self) -> dict:
return await self.send_command("mining/resume", privileged=True)
async def stop_mining(self) -> dict:
return await self.send_command("mining/stop", privileged=True)
async def start_mining(self) -> dict:
return await self.send_command("mining/start", privileged=True)
async def info(self) -> dict:
return await self.send_command("info")
async def summary(self) -> dict:
return await self.send_command("summary")
async def perf_summary(self) -> dict:
return await self.send_command("perf-summary")
async def chips(self) -> dict:
return await self.send_command("chips")
async def layout(self) -> dict:
return await self.send_command("layout")
async def status(self) -> dict:
return await self.send_command("status")
async def settings(self) -> dict:
return await self.send_command("settings")
async def set_power_limit(self, wattage: int) -> dict:
# Can only set power limit to tuned preset
settings = await self.settings()
settings["miner"]["overclock"]["preset"] = str(wattage)
miner = {"overclock": settings["miner"]["overclock"]}
# response will always be {"restart_required":false,"reboot_required":false} even if unsuccessful
return await self.send_command("settings", privileged=True, miner=miner)
async def autotune_presets(self) -> dict:
return await self.send_command("autotune/presets")
async def find_miner(self) -> dict:
return await self.send_command("find-miner", privileged=True)
async def post_settings(self, miner_settings: dict):
return await self.send_command("settings", post=True, **miner_settings)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/marathon.py | pyasic/web/marathon.py | from __future__ import annotations
import asyncio
import json
from typing import Any
import httpx
from pyasic import APIError, settings
from pyasic.web.base import BaseWebAPI
class MaraWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username: str = "root"
self.pwd: str = "root"
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
async with httpx.AsyncClient(transport=settings.transport()) as client:
tasks = [
asyncio.create_task(self._handle_multicommand(client, command))
for command in commands
]
all_data = await asyncio.gather(*tasks)
data = {}
for item in all_data:
data.update(item)
data["multicommand"] = True
return data
async def _handle_multicommand(
self, client: httpx.AsyncClient, command: str
) -> dict:
auth = httpx.DigestAuth(self.username, self.pwd)
try:
url = f"http://{self.ip}:{self.port}/kaonsu/v1/{command}"
ret = await client.get(url, auth=auth)
except httpx.HTTPError:
pass
else:
if ret.status_code == 200:
try:
json_data = ret.json()
return {command: json_data}
except json.decoder.JSONDecodeError:
pass
return {command: {}}
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
url = f"http://{self.ip}:{self.port}/kaonsu/v1/{command}"
auth = httpx.DigestAuth(self.username, self.pwd)
try:
async with httpx.AsyncClient(
transport=settings.transport(),
) as client:
if parameters:
response = await client.post(
url,
auth=auth,
timeout=settings.get("api_function_timeout", 3),
json=parameters,
)
else:
response = await client.get(url, auth=auth)
except httpx.HTTPError:
pass
else:
if response.status_code == 200:
try:
return response.json()
except json.decoder.JSONDecodeError as e:
response_text = response.text if response.text else "empty response"
raise APIError(
f"JSON decode error for '{command}' from {self.ip}: {e} - Response: {response_text}"
)
raise APIError(f"Failed to send command to miner API: {url}")
async def brief(self):
return await self.send_command("brief")
async def ping(self):
return await self.send_command("ping")
async def get_locate_miner(self):
return await self.send_command("locate_miner")
async def set_locate_miner(self, blinking: bool):
return await self.send_command("locate_miner", blinking=blinking)
async def reboot(self):
return await self.send_command("maintenance", type="reboot")
async def reset(self):
return await self.send_command("maintenance", type="reset")
async def reload(self):
return await self.send_command("maintenance", type="reload")
async def set_password(self, new_pwd: str):
return await self.send_command(
"maintenance",
type="passwd",
params={"curPwd": self.pwd, "confirmPwd": self.pwd, "newPwd": new_pwd},
)
async def get_network_config(self):
return await self.send_command("network_config")
async def set_network_config(self, **params):
return await self.send_command("network_config", **params)
async def get_miner_config(self):
return await self.send_command("miner_config")
async def set_miner_config(self, **params):
return await self.send_command("miner_config", **params)
async def fans(self):
return await self.send_command("fans")
async def log(self):
return await self.send_command("log")
async def overview(self):
return await self.send_command("overview")
async def connections(self):
return await self.send_command("connections")
async def controlboard_info(self):
return await self.send_command("controlboard_info")
async def event_chart(self):
return await self.send_command("event_chart")
async def hashboards(self):
return await self.send_command("hashboards")
async def pools(self):
return await self.send_command("pools")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/antminer.py | pyasic/web/antminer.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import json
from pathlib import Path
from typing import Any
import aiofiles
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
class AntminerModernWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
"""Initialize the modern Antminer API client with a specific IP address.
Args:
ip (str): IP address of the Antminer device.
"""
super().__init__(ip)
self.username: str = "root"
self.pwd: str = settings.get("default_antminer_web_password", "root")
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
"""Send a command to the Antminer device using HTTP digest authentication.
Args:
command (str): The CGI command to send.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
privileged (bool): If set to True, requires elevated privileges.
**parameters: Arbitrary keyword arguments to be sent as parameters in the request.
Returns:
dict: The JSON response from the device or an empty dictionary if an error occurs.
"""
url = f"http://{self.ip}:{self.port}/cgi-bin/{command}.cgi"
auth = httpx.DigestAuth(self.username, self.pwd)
try:
async with httpx.AsyncClient(transport=settings.transport()) as client:
if parameters:
data = await client.post(
url,
auth=auth,
timeout=settings.get("api_function_timeout", 3),
json=parameters,
)
else:
data = await client.get(url, auth=auth)
except httpx.HTTPError as e:
return {"success": False, "message": f"HTTP error occurred: {str(e)}"}
else:
if data.status_code == 200:
try:
return data.json()
except json.decoder.JSONDecodeError:
return {"success": False, "message": "Failed to decode JSON"}
return {"success": False, "message": "Unknown error occurred"}
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
"""Execute multiple commands simultaneously.
Args:
*commands (str): Multiple command strings to be executed.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
Returns:
dict: A dictionary containing the results of all commands executed.
"""
async with httpx.AsyncClient(transport=settings.transport()) as client:
tasks = [
asyncio.create_task(self._handle_multicommand(client, command))
for command in commands
]
all_data = await asyncio.gather(*tasks)
data = {}
for item in all_data:
data.update(item)
data["multicommand"] = True
return data
async def _handle_multicommand(
self, client: httpx.AsyncClient, command: str
) -> dict:
"""Helper function for handling individual commands in a multicommand execution.
Args:
client (httpx.AsyncClient): The HTTP client to use for the request.
command (str): The command to be executed.
Returns:
dict: A dictionary containing the response of the executed command.
"""
auth = httpx.DigestAuth(self.username, self.pwd)
try:
url = f"http://{self.ip}/cgi-bin/{command}.cgi"
ret = await client.get(url, auth=auth)
except httpx.HTTPError:
pass
else:
if ret.status_code == 200:
try:
json_data = ret.json()
return {command: json_data}
except json.decoder.JSONDecodeError:
pass
return {command: {}}
async def get_miner_conf(self) -> dict:
"""Retrieve the miner configuration from the Antminer device.
Returns:
dict: A dictionary containing the current configuration of the miner.
"""
return await self.send_command("get_miner_conf")
async def set_miner_conf(self, conf: dict) -> dict:
"""Set the configuration for the miner.
Args:
conf (dict): A dictionary of configuration settings to apply to the miner.
Returns:
dict: A dictionary response from the device after setting the configuration.
"""
return await self.send_command("set_miner_conf", **conf)
async def blink(self, blink: bool) -> dict:
"""Control the blinking of the LED on the miner device.
Args:
blink (bool): True to start blinking, False to stop.
Returns:
dict: A dictionary response from the device after the command execution.
"""
if blink:
return await self.send_command("blink", blink="true")
return await self.send_command("blink", blink="false")
async def reboot(self) -> dict:
"""Reboot the miner device.
Returns:
dict: A dictionary response from the device confirming the reboot command.
"""
return await self.send_command("reboot")
async def get_system_info(self) -> dict:
"""Retrieve system information from the miner.
Returns:
dict: A dictionary containing system information of the miner.
"""
return await self.send_command("get_system_info")
async def get_network_info(self) -> dict:
"""Retrieve network configuration information from the miner.
Returns:
dict: A dictionary containing the network configuration of the miner.
"""
return await self.send_command("get_network_info")
async def summary(self) -> dict:
"""Get a summary of the miner's status and performance.
Returns:
dict: A summary of the miner's current operational status.
"""
return await self.send_command("summary")
async def get_blink_status(self) -> dict:
"""Check the status of the LED blinking on the miner.
Returns:
dict: A dictionary indicating whether the LED is currently blinking.
"""
return await self.send_command("get_blink_status")
async def set_network_conf(
self,
ip: str,
dns: str,
gateway: str,
subnet_mask: str,
hostname: str,
protocol: int,
) -> dict:
"""Set the network configuration of the miner.
Args:
ip (str): IP address of the device.
dns (str): DNS server IP address.
gateway (str): Gateway IP address.
subnet_mask (str): Network subnet mask.
hostname (str): Hostname of the device.
protocol (int): Network protocol used.
Returns:
dict: A dictionary response from the device after setting the network configuration.
"""
return await self.send_command(
"set_network_conf",
ipAddress=ip,
ipDns=dns,
ipGateway=gateway,
ipHost=hostname,
ipPro=protocol,
ipSub=subnet_mask,
)
class AntminerOldWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
"""Initialize the old Antminer API client with a specific IP address.
Args:
ip (str): IP address of the Antminer device.
"""
super().__init__(ip)
self.username: str = "root"
self.pwd: str = settings.get("default_antminer_web_password", "root")
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
"""Send a command to the Antminer device using HTTP digest authentication.
Args:
command (str): The CGI command to send.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
privileged (bool): If set to True, requires elevated privileges.
**parameters: Arbitrary keyword arguments to be sent as parameters in the request.
Returns:
dict: The JSON response from the device or an empty dictionary if an error occurs.
"""
url = f"http://{self.ip}:{self.port}/cgi-bin/{command}.cgi"
auth = httpx.DigestAuth(self.username, self.pwd)
try:
async with httpx.AsyncClient(transport=settings.transport()) as client:
if parameters:
data = await client.post(
url,
data=parameters,
auth=auth,
timeout=settings.get("api_function_timeout", 3),
)
else:
data = await client.get(url, auth=auth)
except httpx.HTTPError:
pass
else:
if data.status_code == 200:
try:
return data.json()
except json.decoder.JSONDecodeError:
pass
raise APIError(f"Failed to send command to miner: {self}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
"""Execute multiple commands simultaneously.
Args:
*commands (str): Multiple command strings to be executed.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
Returns:
dict: A dictionary containing the results of all commands executed.
"""
data = {k: None for k in commands}
auth = httpx.DigestAuth(self.username, self.pwd)
async with httpx.AsyncClient(transport=settings.transport()) as client:
for command in commands:
try:
url = f"http://{self.ip}/cgi-bin/{command}.cgi"
ret = await client.get(url, auth=auth)
except httpx.HTTPError:
pass
else:
if ret.status_code == 200:
try:
json_data = ret.json()
data[command] = json_data
except json.decoder.JSONDecodeError:
pass
return data
async def get_system_info(self) -> dict:
"""Retrieve system information from the miner.
Returns:
dict: A dictionary containing system information of the miner.
"""
return await self.send_command("get_system_info")
async def blink(self, blink: bool) -> dict:
"""Control the blinking of the LED on the miner device.
Args:
blink (bool): True to start blinking, False to stop.
Returns:
dict: A dictionary response from the device after the command execution.
"""
if blink:
return await self.send_command("blink", action="startBlink")
return await self.send_command("blink", action="stopBlink")
async def reboot(self) -> dict:
"""Reboot the miner device.
Returns:
dict: A dictionary response from the device confirming the reboot command.
"""
return await self.send_command("reboot")
async def get_blink_status(self) -> dict:
"""Check the status of the LED blinking on the miner.
Returns:
dict: A dictionary indicating whether the LED is currently blinking.
"""
return await self.send_command("blink", action="onPageLoaded")
async def get_miner_conf(self) -> dict:
"""Retrieve the miner configuration from the Antminer device.
Returns:
dict: A dictionary containing the current configuration of the miner.
"""
return await self.send_command("get_miner_conf")
async def set_miner_conf(self, conf: dict) -> dict:
"""Set the configuration for the miner.
Args:
conf (dict): A dictionary of configuration settings to apply to the miner.
Returns:
dict: A dictionary response from the device after setting the configuration.
"""
return await self.send_command("set_miner_conf", **conf)
async def stats(self) -> dict:
"""Retrieve detailed statistical data of the mining operation.
Returns:
dict: Detailed statistics of the miner's operation.
"""
return await self.send_command("miner_stats")
async def summary(self) -> dict:
"""Get a summary of the miner's status and performance.
Returns:
dict: A summary of the miner's current operational status.
"""
return await self.send_command("miner_summary")
async def pools(self) -> dict:
"""Retrieve current pool information associated with the miner.
Returns:
dict: Information about the mining pools configured in the miner.
"""
return await self.send_command("miner_pools")
async def update_firmware(self, file: Path, keep_settings: bool = True) -> dict:
"""Perform a system update by uploading a firmware file and sending a command to initiate the update."""
async with aiofiles.open(file, "rb") as firmware:
file_content = await firmware.read()
return await self.send_command(
command="upgrade",
file=(file.name, file_content, "application/octet-stream"),
filename=file.name,
keep_settings=keep_settings,
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/hiveon.py | pyasic/web/hiveon.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import json
from pathlib import Path
from typing import Any
import aiofiles
import httpx
from pyasic import APIError, settings
from pyasic.web.base import BaseWebAPI
class HiveonWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
"""Initialize the old Antminer API client with a specific IP address.
Args:
ip (str): IP address of the Antminer device.
"""
super().__init__(ip)
self.username: str = "root"
self.pwd: str = settings.get("default_hive_web_password", "root")
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
"""Send a command to the Antminer device using HTTP digest authentication.
Args:
command (str): The CGI command to send.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
privileged (bool): If set to True, requires elevated privileges.
**parameters: Arbitrary keyword arguments to be sent as parameters in the request.
Returns:
dict: The JSON response from the device or an empty dictionary if an error occurs.
"""
url = f"http://{self.ip}:{self.port}/cgi-bin/{command}.cgi"
auth = httpx.DigestAuth(self.username, self.pwd)
try:
async with httpx.AsyncClient(transport=settings.transport()) as client:
if parameters:
response = await client.post(
url,
data=parameters,
auth=auth,
timeout=settings.get("api_function_timeout", 3),
)
else:
response = await client.get(url, auth=auth)
except httpx.HTTPError as e:
raise APIError(f"HTTP error sending '{command}' to {self.ip}: {e}")
else:
if response.status_code == 200:
try:
return response.json()
except json.decoder.JSONDecodeError as e:
response_text = response.text if response.text else "empty response"
raise APIError(
f"JSON decode error for '{command}' from {self.ip}: {e} - Response: {response_text}"
)
raise APIError(f"Failed to send command to miner API: {url}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
"""Execute multiple commands simultaneously.
Args:
*commands (str): Multiple command strings to be executed.
ignore_errors (bool): If True, ignore any HTTP errors.
allow_warning (bool): If True, proceed with warnings.
Returns:
dict: A dictionary containing the results of all commands executed.
"""
data = {k: None for k in commands}
auth = httpx.DigestAuth(self.username, self.pwd)
async with httpx.AsyncClient(transport=settings.transport()) as client:
for command in commands:
try:
url = f"http://{self.ip}/cgi-bin/{command}.cgi"
ret = await client.get(url, auth=auth)
except httpx.HTTPError:
pass
else:
if ret.status_code == 200:
try:
json_data = ret.json()
data[command] = json_data
except json.decoder.JSONDecodeError:
pass
return data
async def get_system_info(self) -> dict:
"""Retrieve system information from the miner.
Returns:
dict: A dictionary containing system information of the miner.
"""
return await self.send_command("get_system_info")
async def get_network_info(self) -> dict:
"""Retrieve system information from the miner.
Returns:
dict: A dictionary containing system information of the miner.
"""
return await self.send_command("get_network_info")
async def blink(self, blink: bool) -> dict:
"""Control the blinking of the LED on the miner device.
Args:
blink (bool): True to start blinking, False to stop.
Returns:
dict: A dictionary response from the device after the command execution.
"""
if blink:
return await self.send_command("blink", action="startBlink")
return await self.send_command("blink", action="stopBlink")
async def reboot(self) -> dict:
"""Reboot the miner device.
Returns:
dict: A dictionary response from the device confirming the reboot command.
"""
return await self.send_command("reboot")
async def get_blink_status(self) -> dict:
"""Check the status of the LED blinking on the miner.
Returns:
dict: A dictionary indicating whether the LED is currently blinking.
"""
return await self.send_command("blink", action="onPageLoaded")
async def get_miner_conf(self) -> dict:
"""Retrieve the miner configuration from the Antminer device.
Returns:
dict: A dictionary containing the current configuration of the miner.
"""
return await self.send_command("get_miner_conf")
async def set_miner_conf(self, conf: dict) -> dict:
"""Set the configuration for the miner.
Args:
conf (dict): A dictionary of configuration settings to apply to the miner.
Returns:
dict: A dictionary response from the device after setting the configuration.
"""
return await self.send_command("set_miner_conf", **conf)
async def stats(self) -> dict:
"""Retrieve detailed statistical data of the mining operation.
Returns:
dict: Detailed statistics of the miner's operation.
"""
return await self.send_command("miner_stats")
async def summary(self) -> dict:
"""Get a summary of the miner's status and performance.
Returns:
dict: A summary of the miner's current operational status.
"""
return await self.send_command("miner_summary")
async def pools(self) -> dict:
"""Retrieve current pool information associated with the miner.
Returns:
dict: Information about the mining pools configured in the miner.
"""
return await self.send_command("miner_pools")
async def update_firmware(self, file: Path, keep_settings: bool = True) -> dict:
"""Perform a system update by uploading a firmware file and sending a command to initiate the update."""
async with aiofiles.open(file, "rb") as firmware:
file_content = await firmware.read()
return await self.send_command(
"upgrade",
file=(file.name, file_content, "application/octet-stream"),
filename=file.name,
keep_settings=keep_settings,
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/epic.py | pyasic/web/epic.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import hashlib
import json
from pathlib import Path
from typing import Any
import aiofiles
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
class ePICWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username = "root"
self.pwd = settings.get("default_epic_web_password", "letmein")
self.port = 4028
self.token = None
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
post = privileged or not parameters == {}
async with httpx.AsyncClient(transport=settings.transport()) as client:
for retry_cnt in range(settings.get("get_data_retries", 1)):
try:
if parameters.get("files") is not None:
files = parameters["files"]
data_fields = parameters.get("data", {})
data_fields["password"] = self.pwd
response = await client.post(
f"http://{self.ip}:{self.port}/{command}",
timeout=5,
files=files,
data=data_fields,
)
elif post:
response = await client.post(
f"http://{self.ip}:{self.port}/{command}",
timeout=5,
json={
**parameters,
"password": self.pwd,
},
)
else:
response = await client.get(
f"http://{self.ip}:{self.port}/{command}",
timeout=5,
)
if not response.status_code == 200:
if not ignore_errors:
raise APIError(
f"Web command {command} failed with status code {response.status_code}"
)
return {}
json_data = response.json()
if json_data:
# The API can return a fail status if the miner cannot return the requested data. Catch this and pass
if not json_data.get("result", True) and not post:
if retry_cnt < settings.get("get_data_retries", 1) - 1:
continue
if not ignore_errors:
raise APIError(json_data["error"])
return json_data
return {"success": True}
except (httpx.HTTPError, json.JSONDecodeError, AttributeError):
pass
raise APIError(f"Failed to send command to miner: {self}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
data: dict[str, Any] = {k: None for k in commands}
data["multicommand"] = True
for command in commands:
data[command] = await self.send_command(command)
return data
async def restart_epic(self) -> dict:
return await self.send_command("softreboot", privileged=True)
async def reboot(self) -> dict:
return await self.send_command("reboot", privileged=True)
async def set_shutdown_temp(self, params: int) -> dict:
return await self.send_command("shutdowntemp", param=params)
async def set_critical_temp(self, params: int) -> dict:
return await self.send_command("criticaltemp", param=params)
async def set_fan(self, params: dict) -> dict:
return await self.send_command("fanspeed", param=params)
async def set_ptune_enable(self, params: bool) -> dict:
return await self.send_command("perpetualtune", param=params)
async def set_ptune_algo(self, params: dict) -> dict:
return await self.send_command("perpetualtune/algo", param=params)
async def set_pools(self, params: dict) -> dict:
return await self.send_command("coin", param=params)
async def pause_mining(self) -> dict:
return await self.send_command("miner", param="Stop")
async def resume_mining(self) -> dict:
return await self.send_command("miner", param="Autostart")
async def stop_mining(self) -> dict:
return await self.send_command("miner", param="Stop")
async def start_mining(self) -> dict:
return await self.send_command("miner", param="Autostart")
async def summary(self) -> dict:
return await self.send_command("summary")
async def hashrate(self) -> dict:
return await self.send_command("hashrate")
async def network(self) -> dict:
return await self.send_command("network")
async def capabilities(self) -> dict:
return await self.send_command("capabilities")
async def system_update(self, file: Path | str, keep_settings: bool = True) -> None:
"""Perform a system update by uploading a firmware file and sending a
command to initiate the update."""
# calculate the SHA256 checksum of the firmware file
sha256_hash = hashlib.sha256()
async with aiofiles.open(str(file), "rb") as f:
while chunk := await f.read(8192):
sha256_hash.update(chunk)
checksum = sha256_hash.hexdigest()
# prepare the multipart/form-data request
with open(file, "rb") as f:
files = {"update.zip": ("update.zip", f, "application/zip")}
data = {"checksum": checksum, "keepsettings": str(keep_settings).lower()}
await self.send_command("systemupdate", files=files, data=data)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/braiins_os/bosminer.py | pyasic/web/braiins_os/bosminer.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import json
from typing import Any
import httpx
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
class BOSMinerWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username = "root"
self.pwd = settings.get("default_bosminer_password", "root")
self.port = 80
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
try:
async with httpx.AsyncClient(transport=settings.transport()) as client:
await self.auth(client)
data = await client.get(
f"http://{self.ip}:{self.port}/cgi-bin/luci/{command}",
headers={"User-Agent": "BTC Tools v0.1"},
)
if data.status_code == 200:
return data.json()
if ignore_errors:
return {}
raise APIError(
f"LUCI web command failed: command={command}, code={data.status_code}"
)
except (httpx.HTTPError, json.JSONDecodeError):
if ignore_errors:
return {}
raise APIError(f"LUCI web command failed: command={command}")
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
data = {}
for command in commands:
data[command] = await self.send_command(
command, ignore_errors=ignore_errors
)
return data
async def auth(self, session: httpx.AsyncClient) -> None:
login = {"luci_username": self.username, "luci_password": self.pwd}
url = f"http://{self.ip}:{self.port}/cgi-bin/luci"
headers = {
"User-Agent": (
"BTC Tools v0.1"
), # only seems to respond if this user-agent is set
"Content-Type": "application/x-www-form-urlencoded",
}
await session.post(url, headers=headers, data=login)
async def get_net_conf(self) -> dict:
return await self.send_command("admin/network/iface_status/lan")
async def get_cfg_metadata(self) -> dict:
return await self.send_command("admin/miner/cfg_metadata")
async def get_cfg_data(self) -> dict:
return await self.send_command("admin/miner/cfg_data")
async def get_bos_info(self) -> dict:
return await self.send_command("bos/info")
async def get_overview(self) -> dict:
return await self.send_command(
"admin/status/overview?status=1"
) # needs status=1 or it fails
async def get_api_status(self) -> dict:
return await self.send_command("admin/miner/api_status")
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/braiins_os/boser.py | pyasic/web/braiins_os/boser.py | # ------------------------------------------------------------------------------
# Copyright 2022 Upstream Data Inc -
# -
# Licensed under the Apache License, Version 2.0 (the "License"); -
# you may not use this file except in compliance with the License. -
# You may obtain a copy of the License at -
# -
# http://www.apache.org/licenses/LICENSE-2.0 -
# -
# Unless required by applicable law or agreed to in writing, software -
# distributed under the License is distributed on an "AS IS" BASIS, -
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -
# See the License for the specific language governing permissions and -
# limitations under the License. -
# ------------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import logging
from datetime import timedelta
from typing import Any
from grpclib import GRPCError, Status
from grpclib.client import Channel
from pyasic import settings
from pyasic.errors import APIError
from pyasic.web.base import BaseWebAPI
from pyasic.web.braiins_os.better_monkey import patch
patch()
from .proto.braiins.bos import *
from .proto.braiins.bos.v1 import *
class BOSMinerGRPCStub(
ApiVersionServiceStub,
AuthenticationServiceStub,
CoolingServiceStub,
ConfigurationServiceStub,
MinerServiceStub,
PoolServiceStub,
LicenseServiceStub,
ActionsServiceStub,
PerformanceServiceStub,
):
pass
class BOSerWebAPI(BaseWebAPI):
def __init__(self, ip: str) -> None:
super().__init__(ip)
self.username: str = "root"
self.pwd: str = settings.get("default_bosminer_password", "root")
self.port = 50051
self._auth_time: datetime | None = None
@property
def commands(self) -> list:
return self.get_commands()
def get_commands(self) -> list:
return [
func
for func in
# each function in self
dir(self)
if func
not in ["send_command", "multicommand", "auth", "commands", "get_commands"]
if callable(getattr(self, func))
and
# no __ or _ methods
not func.startswith("__")
and not func.startswith("_")
]
async def multicommand(
self, *commands: str, ignore_errors: bool = False, allow_warning: bool = True
) -> dict:
result: dict[str, Any] = {"multicommand": True}
tasks = {}
for command in commands:
try:
tasks[command] = asyncio.create_task(getattr(self, command)())
except AttributeError:
pass
results = await asyncio.gather(
*[t for t in tasks.values()], return_exceptions=True
)
for cmd, task_result in zip(tasks.keys(), results):
if isinstance(task_result, dict):
result[cmd] = task_result
return result
async def send_command(
self,
command: str,
ignore_errors: bool = False,
allow_warning: bool = True,
privileged: bool = False,
**parameters: Any,
) -> dict:
message: betterproto.Message = parameters["message"]
metadata = []
if privileged:
metadata.append(("authorization", await self.auth()))
try:
async with Channel(self.ip, self.port) as c:
endpoint = getattr(BOSMinerGRPCStub(c), command)
if endpoint is None:
if not ignore_errors:
raise APIError(f"Command not found - {endpoint}")
return {}
try:
return (await endpoint(message, metadata=metadata)).to_pydict()
except GRPCError as e:
if e.status == Status.UNAUTHENTICATED:
await self._get_auth()
metadata = [("authorization", await self.auth())]
return (await endpoint(message, metadata=metadata)).to_pydict()
raise e
except (GRPCError, ConnectionError) as e:
raise APIError(f"gRPC command failed - {endpoint}") from e
async def auth(self) -> str | None:
if (
self.token is not None
and self._auth_time is not None
and datetime.now() - self._auth_time < timedelta(seconds=3540)
):
return self.token
await self._get_auth()
return self.token
async def _get_auth(self) -> str | None:
async with Channel(self.ip, self.port) as c:
req = LoginRequest(username=self.username, password=self.pwd)
async with c.request(
"/braiins.bos.v1.AuthenticationService/Login",
grpclib.const.Cardinality.UNARY_UNARY,
type(req),
LoginResponse,
) as stream:
await stream.send_message(req, end=True)
await stream.recv_initial_metadata()
if stream.initial_metadata is not None:
auth = stream.initial_metadata.get("authorization")
if auth is not None and isinstance(auth, str):
self.token = auth
self._auth_time = datetime.now()
return self.token
return None
async def get_api_version(self) -> dict:
return await self.send_command(
"get_api_version", message=ApiVersionRequest(), privileged=False
)
async def start(self) -> dict:
return await self.send_command("start", message=StartRequest(), privileged=True)
async def stop(self) -> dict:
return await self.send_command("stop", message=StopRequest(), privileged=True)
async def pause_mining(self) -> dict:
return await self.send_command(
"pause_mining", message=PauseMiningRequest(), privileged=True
)
async def resume_mining(self) -> dict:
return await self.send_command(
"resume_mining", message=ResumeMiningRequest(), privileged=True
)
async def restart(self) -> dict:
return await self.send_command(
"restart", message=RestartRequest(), privileged=True
)
async def reboot(self) -> dict:
return await self.send_command(
"reboot", message=RebootRequest(), privileged=True
)
async def set_locate_device_status(self, enable: bool) -> dict:
return await self.send_command(
"set_locate_device_status",
message=SetLocateDeviceStatusRequest(enable=enable),
privileged=True,
)
async def get_locate_device_status(self) -> dict:
return await self.send_command(
"get_locate_device_status",
message=GetLocateDeviceStatusRequest(),
privileged=True,
)
async def set_password(self, password: str | None = None) -> dict:
return await self.send_command(
"set_password",
message=SetPasswordRequest(password=password),
privileged=True,
)
async def get_cooling_state(self) -> dict:
return await self.send_command(
"get_cooling_state", message=GetCoolingStateRequest(), privileged=True
)
async def set_immersion_mode(
self,
enable: bool,
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"set_immersion_mode",
message=SetImmersionModeRequest(
enable_immersion_mode=enable, save_action=save_action
),
privileged=True,
)
async def get_tuner_state(self) -> dict:
return await self.send_command(
"get_tuner_state", message=GetTunerStateRequest(), privileged=True
)
async def list_target_profiles(self) -> dict:
return await self.send_command(
"list_target_profiles", message=ListTargetProfilesRequest(), privileged=True
)
async def set_default_power_target(
self, save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY)
) -> dict:
return await self.send_command(
"set_default_power_target",
message=SetDefaultPowerTargetRequest(save_action=save_action),
privileged=True,
)
async def set_power_target(
self,
power_target: int,
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"set_power_target",
message=SetPowerTargetRequest(
power_target=Power(watt=power_target), save_action=save_action
),
privileged=True,
)
async def increment_power_target(
self,
power_target_increment: int,
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"increment_power_target",
message=IncrementPowerTargetRequest(
power_target_increment=Power(watt=power_target_increment),
save_action=save_action,
),
privileged=True,
)
async def decrement_power_target(
self,
power_target_decrement: int,
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"decrement_power_target",
message=DecrementPowerTargetRequest(
power_target_decrement=Power(watt=power_target_decrement),
save_action=save_action,
),
privileged=True,
)
async def set_default_hashrate_target(
self, save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY)
) -> dict:
return await self.send_command(
"set_default_hashrate_target",
message=SetDefaultHashrateTargetRequest(save_action=save_action),
privileged=True,
)
async def set_hashrate_target(
self,
hashrate_target: float,
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"set_hashrate_target",
message=SetHashrateTargetRequest(
hashrate_target=TeraHashrate(terahash_per_second=hashrate_target),
save_action=save_action,
),
privileged=True,
)
async def increment_hashrate_target(
self,
hashrate_target_increment: int,
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"increment_hashrate_target",
message=IncrementHashrateTargetRequest(
hashrate_target_increment=TeraHashrate(
terahash_per_second=hashrate_target_increment
),
save_action=save_action,
),
privileged=True,
)
async def decrement_hashrate_target(
self,
hashrate_target_decrement: int,
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"decrement_hashrate_target",
message=DecrementHashrateTargetRequest(
hashrate_target_decrement=TeraHashrate(
terahash_per_second=hashrate_target_decrement
),
save_action=save_action,
),
privileged=True,
)
async def set_dps(
self,
enable: bool,
power_step: int,
min_power_target: int,
enable_shutdown: bool | None = None,
shutdown_duration: int | None = None,
) -> dict:
return await self.send_command(
"set_dps",
message=SetDpsRequest(
enable=enable,
enable_shutdown=enable_shutdown,
shutdown_duration=(
Hours(hours=shutdown_duration)
if shutdown_duration is not None
else None
),
target=DpsTarget(
power_target=DpsPowerTarget(
power_step=Power(power_step),
min_power_target=Power(min_power_target),
)
),
),
privileged=True,
)
async def set_performance_mode(
self,
wattage_target: int | None = None,
hashrate_target: int | None = None,
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
if wattage_target is not None and hashrate_target is not None:
logging.error(
"Cannot use both wattage_target and hashrate_target, using wattage_target."
)
hashrate_target = None
tuner_mode: TunerPerformanceMode
if wattage_target is not None:
tuner_mode = TunerPerformanceMode(
power_target=PowerTargetMode(power_target=Power(watt=wattage_target))
)
elif hashrate_target is not None:
tuner_mode = TunerPerformanceMode(
hashrate_target=HashrateTargetMode(
hashrate_target=TeraHashrate(terahash_per_second=hashrate_target)
)
)
else:
raise APIError(
"No target supplied, please supply either wattage_target or hashrate_target."
)
return await self.send_command(
"set_performance_mode",
message=SetPerformanceModeRequest(
save_action=save_action,
mode=PerformanceMode(tuner_mode=tuner_mode),
),
privileged=True,
)
async def get_active_performance_mode(self) -> dict:
return await self.send_command(
"get_active_performance_mode",
message=GetPerformanceModeRequest(),
privileged=True,
)
async def get_pool_groups(self) -> dict:
return await self.send_command(
"get_pool_groups", message=GetPoolGroupsRequest(), privileged=True
)
async def get_miner_configuration(self) -> dict:
return await self.send_command(
"get_miner_configuration",
message=GetMinerConfigurationRequest(),
privileged=True,
)
async def get_constraints(self) -> dict:
return await self.send_command(
"get_constraints", message=GetConstraintsRequest(), privileged=True
)
async def get_license_state(self) -> dict:
return await self.send_command(
"get_license_state", message=GetLicenseStateRequest(), privileged=True
)
async def get_miner_status(self) -> dict:
return await self.send_command(
"get_miner_status", message=GetMinerStatusRequest(), privileged=True
)
async def get_miner_details(self) -> dict:
return await self.send_command(
"get_miner_details", message=GetMinerDetailsRequest(), privileged=True
)
async def get_miner_stats(self) -> dict:
return await self.send_command(
"get_miner_stats", message=GetMinerStatsRequest(), privileged=True
)
async def get_hashboards(self) -> dict:
return await self.send_command(
"get_hashboards", message=GetHashboardsRequest(), privileged=True
)
async def get_support_archive(self) -> dict:
return await self.send_command(
"get_support_archive", message=GetSupportArchiveRequest(), privileged=True
)
async def enable_hashboards(
self,
hashboard_ids: list[str],
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"enable_hashboards",
message=EnableHashboardsRequest(
hashboard_ids=hashboard_ids, save_action=save_action
),
privileged=True,
)
async def disable_hashboards(
self,
hashboard_ids: list[str],
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"disable_hashboards",
message=DisableHashboardsRequest(
hashboard_ids=hashboard_ids, save_action=save_action
),
privileged=True,
)
async def set_pool_groups(
self,
pool_groups: list[PoolGroupConfiguration],
save_action: SaveAction = SaveAction(SaveAction.SAVE_AND_APPLY),
) -> dict:
return await self.send_command(
"set_pool_groups",
message=SetPoolGroupsRequest(
save_action=save_action, pool_groups=pool_groups
),
)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/braiins_os/__init__.py | pyasic/web/braiins_os/__init__.py | from .boser import BOSerWebAPI
from .bosminer import BOSMinerWebAPI
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/braiins_os/better_monkey.py | pyasic/web/braiins_os/better_monkey.py | from datetime import datetime, timedelta
from typing import Any
from betterproto import DATETIME_ZERO, TYPE_MAP, TYPE_MESSAGE, Casing, Message
# https://github.com/danielgtaylor/python-betterproto/pull/609
def to_pydict(
self, casing: Casing = Casing.CAMEL, include_default_values: bool = False
) -> dict[str, Any]:
"""
Returns a python dict representation of this object.
Parameters
-----------
casing: :class:`Casing`
The casing to use for key values. Default is :attr:`Casing.CAMEL` for
compatibility purposes.
include_default_values: :class:`bool`
If ``True`` will include the default values of fields. Default is ``False``.
E.g. an ``int32`` field will be included with a value of ``0`` if this is
set to ``True``, otherwise this would be ignored.
Returns
--------
dict[:class:`str`, Any]
The python dict representation of this object.
"""
output: dict[str, Any] = {}
defaults = self._betterproto.default_gen
for field_name, meta in self._betterproto.meta_by_field_name.items():
field_is_repeated = defaults[field_name] is list
try:
value = getattr(self, field_name)
except AttributeError:
value = self._get_field_default(field_name)
cased_name = casing(field_name).rstrip("_") # type: ignore
if meta.proto_type == TYPE_MESSAGE:
if isinstance(value, datetime):
if (
value != DATETIME_ZERO
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = value
elif isinstance(value, timedelta):
if (
value != timedelta(0)
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = value
elif meta.wraps:
if value is not None or include_default_values:
output[cased_name] = value
elif field_is_repeated:
# Convert each item.
value = [i.to_pydict(casing, include_default_values) for i in value]
if value or include_default_values:
output[cased_name] = value
elif value is None:
if include_default_values:
output[cased_name] = None
elif (
value._serialized_on_wire
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = value.to_pydict(casing, include_default_values)
elif meta.proto_type == TYPE_MAP:
for k in value:
if hasattr(value[k], "to_pydict"):
value[k] = value[k].to_pydict(casing, include_default_values)
if value or include_default_values:
output[cased_name] = value
elif (
value != self._get_field_default(field_name)
or include_default_values
or self._include_default_value_for_oneof(field_name=field_name, meta=meta)
):
output[cased_name] = value
return output
def patch():
Message.to_pydict = to_pydict
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/braiins_os/proto/__init__.py | pyasic/web/braiins_os/proto/__init__.py | python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false | |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/braiins_os/proto/braiins/__init__.py | pyasic/web/braiins_os/proto/braiins/__init__.py | python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false | |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/braiins_os/proto/braiins/bos/__init__.py | pyasic/web/braiins_os/proto/braiins/bos/__init__.py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: bos/version.proto
# plugin: python-betterproto
# This file has been @generated
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, Optional
import betterproto
import grpclib
from betterproto.grpc.grpclib_server import ServiceBase
if TYPE_CHECKING:
import grpclib.server
from betterproto.grpc.grpclib_client import MetadataLike
from grpclib.metadata import Deadline
@dataclass(eq=False, repr=False)
class ApiVersion(betterproto.Message):
"""LATEST_API_VERSION=1.3.0"""
major: int = betterproto.uint64_field(1)
minor: int = betterproto.uint64_field(2)
patch: int = betterproto.uint64_field(3)
pre: str = betterproto.string_field(4)
build: str = betterproto.string_field(5)
@dataclass(eq=False, repr=False)
class ApiVersionRequest(betterproto.Message):
pass
class ApiVersionServiceStub(betterproto.ServiceStub):
async def get_api_version(
self,
api_version_request: "ApiVersionRequest",
*,
timeout: Optional[float] = None,
deadline: Optional["Deadline"] = None,
metadata: Optional["MetadataLike"] = None,
) -> "ApiVersion":
return await self._unary_unary(
"/braiins.bos.ApiVersionService/GetApiVersion",
api_version_request,
ApiVersion,
timeout=timeout,
deadline=deadline,
metadata=metadata,
)
class ApiVersionServiceBase(ServiceBase):
async def get_api_version(
self, api_version_request: "ApiVersionRequest"
) -> "ApiVersion":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_get_api_version(
self, stream: "grpclib.server.Stream[ApiVersionRequest, ApiVersion]"
) -> None:
request = await stream.recv_message()
response = await self.get_api_version(request)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/braiins.bos.ApiVersionService/GetApiVersion": grpclib.const.Handler(
self.__rpc_get_api_version,
grpclib.const.Cardinality.UNARY_UNARY,
ApiVersionRequest,
ApiVersion,
),
}
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/pyasic/web/braiins_os/proto/braiins/bos/v1/__init__.py | pyasic/web/braiins_os/proto/braiins/bos/v1/__init__.py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: bos/v1/actions.proto, bos/v1/authentication.proto, bos/v1/common.proto, bos/v1/configuration.proto, bos/v1/constraints.proto, bos/v1/cooling.proto, bos/v1/license.proto, bos/v1/miner.proto, bos/v1/network.proto, bos/v1/performance.proto, bos/v1/pool.proto, bos/v1/units.proto, bos/v1/work.proto
# plugin: python-betterproto
# This file has been @generated
import warnings
from dataclasses import dataclass
from datetime import datetime
from typing import TYPE_CHECKING, AsyncIterator, Dict, List, Optional
import betterproto
import grpclib
from betterproto.grpc.grpclib_server import ServiceBase
if TYPE_CHECKING:
import grpclib.server
from betterproto.grpc.grpclib_client import MetadataLike
from grpclib.metadata import Deadline
class SaveAction(betterproto.Enum):
"""Save action for different operations"""
UNSPECIFIED = 0
SAVE = 1
SAVE_AND_APPLY = 2
SAVE_AND_FORCE_APPLY = 3
class CoolingMode(betterproto.Enum):
UNSPECIFIED = 0
AUTO = 1
MANUAL = 2
DISABLED = 3
class SensorLocation(betterproto.Enum):
UNSPECIFIED = 0
CHIP = 1
PCB = 2
class TunerMode(betterproto.Enum):
UNSPECIFIED = 0
POWER_TARGET = 1
HASHRATE_TARGET = 2
class TunerState(betterproto.Enum):
UNSPECIFIED = 0
DISABLED = 1
STABLE = 2
TUNING = 3
ERROR = 4
class LicenseType(betterproto.Enum):
UNSPECIFIED = 0
STANDARD = 1
CUSTOM = 2
class Platform(betterproto.Enum):
"""Supported platforms"""
UNSPECIFIED = 0
AM1_S9 = 1
AM2_S17 = 2
AM3_BBB = 3
AM3_AML = 4
STM32MP157C_II1_AM2 = 5
CVITEK_BM1_AM2 = 6
ZYNQ_BM3_AM2 = 7
STM32MP157C_II2_BMM1 = 8
class BosMode(betterproto.Enum):
"""BOS modes enumeration"""
UNSPECIFIED = 0
UPGRADE = 1
RECOVERY = 2
SD = 3
NAND = 4
EMMC = 5
class MinerBrand(betterproto.Enum):
UNSPECIFIED = 0
ANTMINER = 1
WHATSMINER = 2
class MinerModel(betterproto.Enum):
"""Deprecated: This enumeration is not longer maintained"""
UNSPECIFIED = 0
ANTMINER_S9 = 1
ANTMINER_X17 = 2
ANTMINER_S17 = 3
ANTMINER_S17_PLUS = 4
ANTMINER_S17_PRO = 5
ANTMINER_S17E = 6
ANTMINER_T17 = 7
ANTMINER_T17E = 8
ANTMINER_T17_PLUS = 9
ANTMINER_X19 = 10
ANTMINER_S19 = 11
ANTMINER_S19_PRO = 12
ANTMINER_S19_PLUS = 13
ANTMINER_S19J = 14
ANTMINER_S19J_PRO = 15
ANTMINER_S19A = 16
ANTMINER_S19A_PRO = 17
ANTMINER_S19XP = 18
ANTMINER_T19 = 19
ANTMINER_S19J_PRO_PLUS = 20
class MinerStatus(betterproto.Enum):
UNSPECIFIED = 0
NOT_STARTED = 1
NORMAL = 2
PAUSED = 3
SUSPENDED = 4
RESTRICTED = 5
class SupportArchiveFormat(betterproto.Enum):
"""Enumeration for support archive format"""
UNSPECIFIED = 0
ZIP = 1
"""Compressed zip format"""
BOS = 2
"""BOS custom format"""
ZIP_ENCRYPTED = 3
"""Compressed encrypted zip format"""
class NetworkProtocol(betterproto.Enum):
UNSPECIFIED = 0
DHCP = 1
STATIC = 2
@dataclass(eq=False, repr=False)
class StartRequest(betterproto.Message):
"""Request for start bosminer action."""
pass
@dataclass(eq=False, repr=False)
class StartResponse(betterproto.Message):
"""Response for start bosminer action."""
already_running: bool = betterproto.bool_field(1)
"""Flag that bosminer was already running"""
@dataclass(eq=False, repr=False)
class RestartRequest(betterproto.Message):
"""Request for restart bosminer action."""
pass
@dataclass(eq=False, repr=False)
class RestartResponse(betterproto.Message):
"""Response for restart bosminer action."""
already_running: bool = betterproto.bool_field(1)
"""Flag that bosminer was already running"""
@dataclass(eq=False, repr=False)
class RebootRequest(betterproto.Message):
"""Request for reboot bosminer action."""
pass
@dataclass(eq=False, repr=False)
class RebootResponse(betterproto.Message):
"""Response for reboot bosminer action."""
pass
@dataclass(eq=False, repr=False)
class StopRequest(betterproto.Message):
"""Request for stop bosminer action."""
pass
@dataclass(eq=False, repr=False)
class StopResponse(betterproto.Message):
"""Response for stop bosminer action."""
already_stopped: bool = betterproto.bool_field(1)
"""Flag that bosminer was already stopped"""
@dataclass(eq=False, repr=False)
class PauseMiningRequest(betterproto.Message):
"""Request for pause mining action."""
pass
@dataclass(eq=False, repr=False)
class PauseMiningResponse(betterproto.Message):
"""Response for pause mining action."""
already_paused: bool = betterproto.bool_field(1)
"""Flag that miner mining was already paused"""
@dataclass(eq=False, repr=False)
class ResumeMiningRequest(betterproto.Message):
"""Response for resume mining action."""
pass
@dataclass(eq=False, repr=False)
class ResumeMiningResponse(betterproto.Message):
"""Response for resume mining action."""
already_mining: bool = betterproto.bool_field(1)
"""Flag that miner was already mining"""
@dataclass(eq=False, repr=False)
class SetLocateDeviceStatusRequest(betterproto.Message):
"""Request message to enable/disable locate device"""
enable: bool = betterproto.bool_field(1)
@dataclass(eq=False, repr=False)
class LocateDeviceStatusResponse(betterproto.Message):
"""Response with locate device status"""
enabled: bool = betterproto.bool_field(1)
@dataclass(eq=False, repr=False)
class GetLocateDeviceStatusRequest(betterproto.Message):
"""Request for locate device status action."""
pass
@dataclass(eq=False, repr=False)
class LoginRequest(betterproto.Message):
"""Request for login action."""
username: str = betterproto.string_field(1)
password: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class LoginResponse(betterproto.Message):
"""Response for login action."""
token: str = betterproto.string_field(1)
"""Token to be used for authentication"""
timeout_s: int = betterproto.uint32_field(2)
"""
Authentication token validity/timeout in seconds.
Token validity refreshed to this value with each request.
"""
@dataclass(eq=False, repr=False)
class SetPasswordRequest(betterproto.Message):
"""Request for set password action."""
password: Optional[str] = betterproto.string_field(1, optional=True)
@dataclass(eq=False, repr=False)
class SetPasswordResponse(betterproto.Message):
"""Response for set password action."""
pass
@dataclass(eq=False, repr=False)
class MegaHashrate(betterproto.Message):
megahash_per_second: float = betterproto.double_field(1)
@dataclass(eq=False, repr=False)
class GigaHashrate(betterproto.Message):
gigahash_per_second: float = betterproto.double_field(1)
@dataclass(eq=False, repr=False)
class TeraHashrate(betterproto.Message):
terahash_per_second: float = betterproto.double_field(1)
@dataclass(eq=False, repr=False)
class Frequency(betterproto.Message):
hertz: float = betterproto.double_field(1)
@dataclass(eq=False, repr=False)
class Voltage(betterproto.Message):
volt: float = betterproto.double_field(1)
@dataclass(eq=False, repr=False)
class Power(betterproto.Message):
watt: int = betterproto.uint64_field(1)
@dataclass(eq=False, repr=False)
class PowerEfficiency(betterproto.Message):
joule_per_terahash: float = betterproto.double_field(1)
@dataclass(eq=False, repr=False)
class Temperature(betterproto.Message):
degree_c: float = betterproto.double_field(1)
@dataclass(eq=False, repr=False)
class BasesPoints(betterproto.Message):
"""Structure representing Basis Points"""
bsp: int = betterproto.uint32_field(1)
"""
A basis point is one hundredth of 1 percentage point.
For example: 1bps = 0.01%, 250bps = 2.5%
"""
@dataclass(eq=False, repr=False)
class Hours(betterproto.Message):
hours: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class UInt32Constraints(betterproto.Message):
default: int = betterproto.uint32_field(1)
min: int = betterproto.uint32_field(2)
max: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class DoubleConstraints(betterproto.Message):
default: float = betterproto.double_field(1)
min: float = betterproto.double_field(2)
max: float = betterproto.double_field(3)
@dataclass(eq=False, repr=False)
class PowerConstraints(betterproto.Message):
default: "Power" = betterproto.message_field(1)
min: "Power" = betterproto.message_field(2)
max: "Power" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class HashrateConstraints(betterproto.Message):
default: "TeraHashrate" = betterproto.message_field(1)
min: "TeraHashrate" = betterproto.message_field(2)
max: "TeraHashrate" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class TemperatureConstraints(betterproto.Message):
default: "Temperature" = betterproto.message_field(1)
min: "Temperature" = betterproto.message_field(2)
max: "Temperature" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class BooleanConstraint(betterproto.Message):
default: bool = betterproto.bool_field(1)
@dataclass(eq=False, repr=False)
class DurationConstraints(betterproto.Message):
default: "Hours" = betterproto.message_field(1)
min: "Hours" = betterproto.message_field(2)
max: "Hours" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class FrequencyConstraints(betterproto.Message):
default: "Frequency" = betterproto.message_field(1)
min: "Frequency" = betterproto.message_field(2)
max: "Frequency" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class VoltageConstraints(betterproto.Message):
default: "Voltage" = betterproto.message_field(1)
min: "Voltage" = betterproto.message_field(2)
max: "Voltage" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CoolingAutoMode(betterproto.Message):
"""
The temperature control modes.
Miner software tries to regulate the fan speed so that miner temperature is approximately at the target temperature.
The allowed temperature range is 0-200 degree Celsius.
"""
target_temperature: "Temperature" = betterproto.message_field(1)
"""Temperature that the miner will try to maintain"""
hot_temperature: "Temperature" = betterproto.message_field(2)
"""Temperature threshold at which the fans start to run at 100%."""
dangerous_temperature: "Temperature" = betterproto.message_field(3)
"""
Temperature threshold at which BOSMiner shuts down in order to prevent overheating and damaging the miner.
"""
@dataclass(eq=False, repr=False)
class CoolingManualMode(betterproto.Message):
"""
Fans are kept at a fixed, user-defined speed, no matter the temperature.
"""
fan_speed_ratio: Optional[float] = betterproto.double_field(1, optional=True)
"""
User defined fan speed expressed as a ratio between 0.0 and 1.0
where 0.0 means completely turned off and
1.0 means running at full speed possible
"""
hot_temperature: "Temperature" = betterproto.message_field(2)
"""Temperature threshold at which the fans start to run at 100%."""
dangerous_temperature: "Temperature" = betterproto.message_field(3)
"""
Temperature threshold at which BOSMiner shuts down in order to prevent overheating and damaging the miner.
"""
@dataclass(eq=False, repr=False)
class CoolingDisabledMode(betterproto.Message):
"""Disable temperature control. May be dangerous."""
fan_speed_ratio: Optional[float] = betterproto.double_field(1, optional=True)
"""
User defined fan speed expressed as a ratio between 0.0 and 1.0
where 0.0 means completely turned off and
1.0 means running at full speed possible
"""
@dataclass(eq=False, repr=False)
class CoolingConfiguration(betterproto.Message):
minimum_required_fans: Optional[int] = betterproto.uint32_field(1, optional=True)
auto: "CoolingAutoMode" = betterproto.message_field(2, group="mode")
manual: "CoolingManualMode" = betterproto.message_field(3, group="mode")
disabled: "CoolingDisabledMode" = betterproto.message_field(4, group="mode")
@dataclass(eq=False, repr=False)
class CoolingConstraints(betterproto.Message):
default_cooling_mode: "CoolingMode" = betterproto.enum_field(1)
target_temperature: "TemperatureConstraints" = betterproto.message_field(2)
hot_temperature: "TemperatureConstraints" = betterproto.message_field(3)
dangerous_temperature: "TemperatureConstraints" = betterproto.message_field(4)
fan_speed_ratio: "DoubleConstraints" = betterproto.message_field(5)
minimum_required_fans: "UInt32Constraints" = betterproto.message_field(6)
@dataclass(eq=False, repr=False)
class FanState(betterproto.Message):
"""Structure which contain info about one specific miner fan."""
position: Optional[int] = betterproto.uint32_field(1, optional=True)
"""Fan positions/ID"""
rpm: int = betterproto.uint32_field(2)
"""Actual fan RPM (Revolutions/Rotation Per Minute)"""
target_speed_ratio: Optional[float] = betterproto.double_field(3, optional=True)
"""Actual fan speed ratio(PWM) in range 0.0 - 1.0"""
@dataclass(eq=False, repr=False)
class TemperatureSensor(betterproto.Message):
id: Optional[int] = betterproto.uint32_field(1, optional=True)
"""Sensor id"""
location: "SensorLocation" = betterproto.enum_field(2)
"""Sensor location"""
temperature: "Temperature" = betterproto.message_field(3)
"""Temperature"""
@dataclass(eq=False, repr=False)
class GetCoolingStateRequest(betterproto.Message):
"""Request to get current temperature and fans measurements"""
pass
@dataclass(eq=False, repr=False)
class GetCoolingStateResponse(betterproto.Message):
"""
Response to get current fan states and
temperature measurements
"""
fans: List["FanState"] = betterproto.message_field(1)
"""All Fans state"""
highest_temperature: "TemperatureSensor" = betterproto.message_field(2)
"""Sensor with current highest temperature"""
@dataclass(eq=False, repr=False)
class SetImmersionModeRequest(betterproto.Message):
"""Request to set immersion mode"""
save_action: "SaveAction" = betterproto.enum_field(1)
enable_immersion_mode: bool = betterproto.bool_field(2)
"""Flag to enable or disable immersion mode"""
@dataclass(eq=False, repr=False)
class SetImmersionModeResponse(betterproto.Message):
"""Response for set immersion mode action."""
immersion_mode: bool = betterproto.bool_field(1)
"""The resulting immersion mode"""
@dataclass(eq=False, repr=False)
class TunerConfiguration(betterproto.Message):
enabled: Optional[bool] = betterproto.bool_field(1, optional=True)
"""Flag if tuner is enabled"""
tuner_mode: Optional["TunerMode"] = betterproto.enum_field(2, optional=True)
"""Tuner mode"""
power_target: "Power" = betterproto.message_field(3)
"""Tuner power target"""
hashrate_target: "TeraHashrate" = betterproto.message_field(4)
"""Tuner hashrate target"""
@dataclass(eq=False, repr=False)
class TunerConstraints(betterproto.Message):
power_target: "PowerConstraints" = betterproto.message_field(1)
"""Tuner power target mode constraints"""
hashrate_target: "HashrateConstraints" = betterproto.message_field(2)
"""Tuner hashrate target mode constraints"""
enabled: "BooleanConstraint" = betterproto.message_field(3)
"""Tuner enabled enabled default value"""
default_mode: "TunerMode" = betterproto.enum_field(4)
"""Default tuner mode"""
@dataclass(eq=False, repr=False)
class DpsConfiguration(betterproto.Message):
enabled: Optional[bool] = betterproto.bool_field(1, optional=True)
"""Flag if Dynamic Performance Scaling is enabled"""
power_step: "Power" = betterproto.message_field(2)
"""Dynamic Performance Scaling power step"""
hashrate_step: "TeraHashrate" = betterproto.message_field(3)
"""Dynamic Performance Scaling hashrate step"""
min_power_target: "Power" = betterproto.message_field(4)
"""Dynamic Performance Scaling minimal power target"""
min_hashrate_target: "TeraHashrate" = betterproto.message_field(5)
"""Dynamic Performance Scaling minimal hashrate target"""
shutdown_enabled: Optional[bool] = betterproto.bool_field(6, optional=True)
"""Flag if shutdown for Dynamic Performance Scaling is enabled"""
shutdown_duration: "Hours" = betterproto.message_field(7)
"""Dynamic Performance Scaling shutdown duration"""
@dataclass(eq=False, repr=False)
class HashboardPerformanceConfiguration(betterproto.Message):
global_frequency: "Frequency" = betterproto.message_field(1)
"""Common frequency for all HB"""
global_voltage: "Voltage" = betterproto.message_field(2)
"""Common voltage for all HB"""
hashboards: List["HashboardConfig"] = betterproto.message_field(3)
"""
Per hashboard frequency/voltage. It has higher priority than global one
"""
@dataclass(eq=False, repr=False)
class DpsConstraints(betterproto.Message):
power_step: "PowerConstraints" = betterproto.message_field(1)
"""Dynamic Performance Scaling power step constraints"""
hashrate_step: "HashrateConstraints" = betterproto.message_field(2)
"""Dynamic Performance Scaling hashrate step constraints"""
min_power_target: "PowerConstraints" = betterproto.message_field(3)
"""Dynamic Performance Scaling minimal power target constraints"""
min_hashrate_target: "HashrateConstraints" = betterproto.message_field(4)
"""Dynamic Performance Scaling minimal hashrate target constraints"""
shutdown_enabled: "BooleanConstraint" = betterproto.message_field(5)
"""Dynamic Performance Scaling enabled shutdown default value"""
shutdown_duration: "DurationConstraints" = betterproto.message_field(6)
"""Dynamic Performance Scaling shutdown duration constraints"""
enabled: "BooleanConstraint" = betterproto.message_field(7)
"""Dynamic Performance Scaling enabled default value"""
@dataclass(eq=False, repr=False)
class HashboardConstraints(betterproto.Message):
hashboard_ids: List[str] = betterproto.string_field(1)
"""List of possible HB indices"""
enabled: "BooleanConstraint" = betterproto.message_field(2)
"""Default value for flag if hashboards are enabled"""
frequency: "FrequencyConstraints" = betterproto.message_field(3)
"""HB frequency constraints"""
voltage: "VoltageConstraints" = betterproto.message_field(4)
"""HB frequency constraints"""
@dataclass(eq=False, repr=False)
class PowerTargetProfile(betterproto.Message):
"""Structure to handle power target profile"""
created: datetime = betterproto.message_field(1)
"""Creation timestamp"""
target: "Power" = betterproto.message_field(2)
"""Tuned power target"""
measured_hashrate: "GigaHashrate" = betterproto.message_field(3)
"""Measured hashrate"""
estimated_power_consumption: "Power" = betterproto.message_field(4)
"""Estimated power consumption"""
@dataclass(eq=False, repr=False)
class HashrateTargetProfile(betterproto.Message):
"""Structure to handle hashrate target profile"""
created: datetime = betterproto.message_field(1)
"""Creation timestamp"""
target: "TeraHashrate" = betterproto.message_field(2)
"""Tuned hashrate target"""
measured_hashrate: "GigaHashrate" = betterproto.message_field(3)
"""Measured hashrate"""
estimated_power_consumption: "Power" = betterproto.message_field(4)
"""Estimated power consumption"""
@dataclass(eq=False, repr=False)
class GetTunerStateRequest(betterproto.Message):
"""Request for getting the current performance data"""
pass
@dataclass(eq=False, repr=False)
class GetTunerStateResponse(betterproto.Message):
"""Response with the current tuner details"""
overall_tuner_state: "TunerState" = betterproto.enum_field(1)
"""Tuner state"""
power_target_mode_state: "PowerTargetModeState" = betterproto.message_field(
2, group="mode_state"
)
hashrate_target_mode_state: "HashrateTargetModeState" = betterproto.message_field(
3, group="mode_state"
)
@dataclass(eq=False, repr=False)
class PowerTargetModeState(betterproto.Message):
profile: "PowerTargetProfile" = betterproto.message_field(1)
"""current power target profile"""
current_target: "Power" = betterproto.message_field(2)
"""Current power target"""
@dataclass(eq=False, repr=False)
class HashrateTargetModeState(betterproto.Message):
profile: "HashrateTargetProfile" = betterproto.message_field(1)
"""Currently used profile"""
current_target: "TeraHashrate" = betterproto.message_field(2)
"""Current hashrate target"""
@dataclass(eq=False, repr=False)
class ListTargetProfilesRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class ListTargetProfilesResponse(betterproto.Message):
power_target_profiles: List["PowerTargetProfile"] = betterproto.message_field(1)
"""Tuner profiles for power target mode"""
hashrate_target_profiles: List["HashrateTargetProfile"] = betterproto.message_field(
2
)
@dataclass(eq=False, repr=False)
class SetDefaultPowerTargetRequest(betterproto.Message):
"""Request for set default power target action."""
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
@dataclass(eq=False, repr=False)
class SetPowerTargetRequest(betterproto.Message):
"""Request for set absolute power target action."""
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
power_target: "Power" = betterproto.message_field(2)
"""Absolute value of power target"""
@dataclass(eq=False, repr=False)
class IncrementPowerTargetRequest(betterproto.Message):
"""Request for increment power target action."""
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
power_target_increment: "Power" = betterproto.message_field(2)
"""Incremental value of power target"""
@dataclass(eq=False, repr=False)
class DecrementPowerTargetRequest(betterproto.Message):
"""Request for decrement power target action."""
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
power_target_decrement: "Power" = betterproto.message_field(2)
"""Decremental value of power target"""
@dataclass(eq=False, repr=False)
class SetPowerTargetResponse(betterproto.Message):
"""Response for set power target action."""
power_target: "Power" = betterproto.message_field(1)
"""New value of power target"""
@dataclass(eq=False, repr=False)
class SetDefaultHashrateTargetRequest(betterproto.Message):
"""Request for set default hashrate target action."""
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
@dataclass(eq=False, repr=False)
class SetHashrateTargetRequest(betterproto.Message):
"""Request for set absolute hashrate target action."""
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
hashrate_target: "TeraHashrate" = betterproto.message_field(2)
"""Absolute value of hashrate target"""
@dataclass(eq=False, repr=False)
class IncrementHashrateTargetRequest(betterproto.Message):
"""Request for increment hashrate target action."""
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
hashrate_target_increment: "TeraHashrate" = betterproto.message_field(2)
"""Incremental value of hashrate target"""
@dataclass(eq=False, repr=False)
class DecrementHashrateTargetRequest(betterproto.Message):
"""Request for decrement hashrate target action."""
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
hashrate_target_decrement: "TeraHashrate" = betterproto.message_field(2)
"""Decremental value of hashrate target"""
@dataclass(eq=False, repr=False)
class SetHashrateTargetResponse(betterproto.Message):
"""Response for set hashrate target action."""
hashrate_target: "TeraHashrate" = betterproto.message_field(1)
"""New value of hashrate target"""
@dataclass(eq=False, repr=False)
class DpsPowerTarget(betterproto.Message):
power_step: "Power" = betterproto.message_field(1)
"""Dynamic Performance Scaling power step"""
min_power_target: "Power" = betterproto.message_field(2)
"""Dynamic Performance Scaling minimal power target"""
@dataclass(eq=False, repr=False)
class DpsHashrateTarget(betterproto.Message):
hashrate_step: "TeraHashrate" = betterproto.message_field(1)
"""Dynamic Performance Scaling hashrate step"""
min_hashrate_target: "TeraHashrate" = betterproto.message_field(2)
"""Dynamic Performance Scaling minimal hashrate target"""
@dataclass(eq=False, repr=False)
class DpsTarget(betterproto.Message):
power_target: "DpsPowerTarget" = betterproto.message_field(1, group="target")
"""Power target settings for Dynamic Performance Scaling"""
hashrate_target: "DpsHashrateTarget" = betterproto.message_field(2, group="target")
"""Hashrate target settings for Dynamic Performance Scaling"""
@dataclass(eq=False, repr=False)
class SetDpsRequest(betterproto.Message):
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
enable: Optional[bool] = betterproto.bool_field(2, optional=True)
"""Flag if Dynamic Performance Scaling should be enabled"""
enable_shutdown: Optional[bool] = betterproto.bool_field(3, optional=True)
"""Flag if shutdown for Dynamic Performance Scaling should be enabled"""
shutdown_duration: Optional["Hours"] = betterproto.message_field(4, optional=True)
"""Dynamic Performance Scaling shutdown duration"""
target: "DpsTarget" = betterproto.message_field(5)
"""Dynamic Performance Scaling target"""
@dataclass(eq=False, repr=False)
class SetDpsResponse(betterproto.Message):
enabled: Optional[bool] = betterproto.bool_field(1, optional=True)
"""Flag if Dynamic Performance Scaling is enabled"""
shutdown_enabled: Optional[bool] = betterproto.bool_field(2, optional=True)
"""Flag if shutdown for Dynamic Performance Scaling should be enabled"""
shutdown_duration: Optional["Hours"] = betterproto.message_field(3, optional=True)
"""Dynamic Performance Scaling shutdown duration"""
power_target: "DpsPowerTarget" = betterproto.message_field(4)
"""Dynamic Performance Scaling Power target"""
hashrate_target: "DpsHashrateTarget" = betterproto.message_field(5)
"""Dynamic Performance Scaling hashrate target"""
@dataclass(eq=False, repr=False)
class HashboardPerformanceSettings(betterproto.Message):
id: str = betterproto.string_field(1)
"""Hashboard id"""
frequency: "Frequency" = betterproto.message_field(2)
"""Hashboard frequency"""
voltage: "Voltage" = betterproto.message_field(3)
"""Hashboard voltage"""
@dataclass(eq=False, repr=False)
class HashboardConfig(betterproto.Message):
id: str = betterproto.string_field(1)
"""Hashboard id"""
enabled: Optional[bool] = betterproto.bool_field(2, optional=True)
"""Flag if HB si enabled"""
frequency: "Frequency" = betterproto.message_field(3)
"""Hashboard frequency"""
voltage: "Voltage" = betterproto.message_field(4)
"""Hashboard voltage"""
@dataclass(eq=False, repr=False)
class ManualPerformanceMode(betterproto.Message):
global_frequency: "Frequency" = betterproto.message_field(1)
"""Global hashboard frequency"""
global_voltage: "Voltage" = betterproto.message_field(2)
"""Global hashboard voltage"""
hashboards: List["HashboardPerformanceSettings"] = betterproto.message_field(3)
"""
Per hashboard frequency/voltage. It has higher priority than global one
"""
@dataclass(eq=False, repr=False)
class PowerTargetMode(betterproto.Message):
power_target: "Power" = betterproto.message_field(1)
"""Power target"""
@dataclass(eq=False, repr=False)
class HashrateTargetMode(betterproto.Message):
hashrate_target: "TeraHashrate" = betterproto.message_field(1)
"""Hashrate target"""
@dataclass(eq=False, repr=False)
class TunerPerformanceMode(betterproto.Message):
power_target: "PowerTargetMode" = betterproto.message_field(1, group="target")
"""Tuner power target"""
hashrate_target: "HashrateTargetMode" = betterproto.message_field(2, group="target")
"""Tuner hashrate target"""
@dataclass(eq=False, repr=False)
class SetPerformanceModeRequest(betterproto.Message):
save_action: "SaveAction" = betterproto.enum_field(1)
"""Save action"""
mode: "PerformanceMode" = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class PerformanceMode(betterproto.Message):
manual_mode: "ManualPerformanceMode" = betterproto.message_field(1, group="mode")
tuner_mode: "TunerPerformanceMode" = betterproto.message_field(2, group="mode")
@dataclass(eq=False, repr=False)
class GetPerformanceModeRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class RemoveTunedProfilesRequest(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class RemoveTunedProfilesResponse(betterproto.Message):
pass
@dataclass(eq=False, repr=False)
class Quota(betterproto.Message):
"""Structure for quota load balance strategy"""
value: int = betterproto.uint32_field(1)
@dataclass(eq=False, repr=False)
class FixedShareRatio(betterproto.Message):
"""
Structure for fixed share ratio load balance strategy
Fixed share ratio is value between 0.0 to 1.0 where 1.0 represents that all work is
generated from the group
"""
value: float = betterproto.double_field(1)
@dataclass(eq=False, repr=False)
class PoolGroupConfiguration(betterproto.Message):
"""Structure handle configured mining group"""
uid: str = betterproto.string_field(1)
"""Group id"""
name: str = betterproto.string_field(2)
"""Group name"""
quota: "Quota" = betterproto.message_field(3, group="load_balance_strategy")
fixed_share_ratio: "FixedShareRatio" = betterproto.message_field(
4, group="load_balance_strategy"
)
pools: List["PoolConfiguration"] = betterproto.message_field(5)
"""Group pools"""
@dataclass(eq=False, repr=False)
class PoolConfiguration(betterproto.Message):
"""Structure handle information about configured pool"""
uid: str = betterproto.string_field(1)
"""Pool connection id"""
url: str = betterproto.string_field(2)
"""Pool connection URL"""
user: str = betterproto.string_field(3)
"""Pool connection user"""
password: Optional[str] = betterproto.string_field(4, optional=True)
"""Pool connection password if set"""
enabled: Optional[bool] = betterproto.bool_field(5, optional=True)
"""Flag if pool connection is enabled"""
@dataclass(eq=False, repr=False)
class PoolGroup(betterproto.Message):
"""Structure handle all pool group details"""
name: str = betterproto.string_field(1)
"""Group name"""
quota: "Quota" = betterproto.message_field(2, group="strategy")
fixed_share_ratio: "FixedShareRatio" = betterproto.message_field(
3, group="strategy"
)
pools: List["Pool"] = betterproto.message_field(4)
"""Group pools"""
@dataclass(eq=False, repr=False)
class Pool(betterproto.Message):
"""Structure handle information about configured pool"""
uid: str = betterproto.string_field(1)
"""Pool connection id"""
url: str = betterproto.string_field(2)
"""Pool connection URL"""
user: str = betterproto.string_field(3)
"""Pool connection user"""
enabled: bool = betterproto.bool_field(4)
"""Flag if pool connection is enabled"""
alive: bool = betterproto.bool_field(5)
"""Flag if pool is alive"""
active: bool = betterproto.bool_field(6)
"""Flag if pool is active (running)"""
stats: "PoolStats" = betterproto.message_field(7)
"""Pool stats"""
@dataclass(eq=False, repr=False)
class PoolStats(betterproto.Message):
"""Structure handle pool statistics"""
accepted_shares: int = betterproto.uint64_field(1)
"""Accepted shares"""
rejected_shares: int = betterproto.uint64_field(2)
"""Rejected shares"""
stale_shares: int = betterproto.uint64_field(3)
"""Stale shares"""
last_difficulty: int = betterproto.uint64_field(4)
"""Last difficulty"""
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | true |
UpstreamData/pyasic | https://github.com/UpstreamData/pyasic/blob/820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3/docs/generate_miners.py | docs/generate_miners.py | import importlib
import os
import warnings
from pathlib import Path
from typing import Any
from pyasic.miners.factory import MINER_CLASSES, MinerTypes
warnings.filterwarnings("ignore")
def path(cls):
module = importlib.import_module(cls.__module__)
return module.__name__ + "." + cls.__name__
def make(cls):
p = path(cls)
return p.split(".")[2]
def model_type(cls):
p = path(cls)
return p.split(".")[4]
def backend_str(backend: MinerTypes) -> str:
match backend:
case MinerTypes.ANTMINER:
return "Stock Firmware Antminers"
case MinerTypes.AURADINE:
return "Stock Firmware Auradine Miners"
case MinerTypes.AVALONMINER:
return "Stock Firmware Avalonminers"
case MinerTypes.VNISH:
return "Vnish Firmware Miners"
case MinerTypes.EPIC:
return "ePIC Firmware Miners"
case MinerTypes.BRAIINS_OS:
return "BOS+ Firmware Miners"
case MinerTypes.HIVEON:
return "HiveOS Firmware Miners"
case MinerTypes.INNOSILICON:
return "Stock Firmware Innosilicons"
case MinerTypes.WHATSMINER:
return "Stock Firmware Whatsminers"
case MinerTypes.GOLDSHELL:
return "Stock Firmware Goldshells"
case MinerTypes.LUX_OS:
return "LuxOS Firmware Miners"
case MinerTypes.MARATHON:
return "Mara Firmware Miners"
case MinerTypes.BITAXE:
return "Stock Firmware BitAxe Miners"
case MinerTypes.LUCKYMINER:
return "Stock Firmware Lucky Miners"
case MinerTypes.ICERIVER:
return "Stock Firmware IceRiver Miners"
case MinerTypes.HAMMER:
return "Stock Firmware Hammer Miners"
case MinerTypes.VOLCMINER:
return "Stock Firmware Volcminers"
case MinerTypes.ELPHAPEX:
return "Stock Firmware Elphapex Miners"
case MinerTypes.MSKMINER:
return "MSKMiner Firmware Miners"
raise TypeError("Unknown miner backend, cannot generate docs")
def create_url_str(mtype: str):
return (
mtype.lower()
.replace(" ", "-")
.replace("(", "")
.replace(")", "")
.replace("+", "_1")
)
HEADER_FORMAT = "# pyasic\n## {} Models\n\n"
MINER_HEADER_FORMAT = "## {}\n"
DATA_FORMAT = """
- [{}] Shutdowns
- [{}] Power Modes
- [{}] Setpoints
- [{}] Presets
::: {}
handler: python
options:
show_root_heading: false
heading_level: 0
"""
SUPPORTED_TYPES_HEADER = """# pyasic
## Supported Miners
Supported miner types are here on this list. If your miner (or miner version) is not on this list, please feel free to [open an issue on GitHub](https://github.com/UpstreamData/pyasic/issues) to get it added.
Keep in mind that some functionality is only supported for specific miners or firmwares, please check the page for your miner to make sure the functionality you need is supported.
##### pyasic currently supports the following miners and subtypes:
<style>
details {
margin:0px;
padding-top:0px;
padding-bottom:0px;
}
</style>
"""
BACKEND_TYPE_HEADER = """
<details>
<summary>{}:</summary>
<ul>"""
MINER_TYPE_HEADER = """
<details>
<summary>{} Series:</summary>
<ul>"""
MINER_DETAILS = """
<li><a href="../{}/{}#{}">{}</a></li>"""
MINER_TYPE_CLOSER = """
</ul>
</details>"""
BACKEND_TYPE_CLOSER = """
</ul>
</details>"""
m_data: dict[str, dict[str, list[type[Any]]]] = {}
done = []
for m in MINER_CLASSES:
for t in sorted(MINER_CLASSES[m], key=lambda x: x or ""):
if t is not None and MINER_CLASSES[m][t] not in done:
miner = MINER_CLASSES[m][t]
if make(miner) not in m_data:
m_data[make(miner)] = {}
if model_type(miner) not in m_data[make(miner)]:
m_data[make(miner)][model_type(miner)] = []
m_data[make(miner)][model_type(miner)].append(miner)
done.append(miner)
def create_directory_structure(directory, data):
if not os.path.exists(directory):
os.makedirs(directory)
for key, value in data.items():
subdirectory = os.path.join(directory, key)
if isinstance(value, dict):
create_directory_structure(subdirectory, value)
elif isinstance(value, list):
file_path = os.path.join(subdirectory + ".md")
with open(file_path, "w") as file:
file.write(HEADER_FORMAT.format(key))
for item in value:
obj = item("1.1.1.1")
header = obj.model
file.write(MINER_HEADER_FORMAT.format(header))
file.write(
DATA_FORMAT.format(
"x" if obj.supports_shutdown else " ",
"x" if obj.supports_power_modes else " ",
"x" if obj.supports_autotuning else " ",
"x" if obj.supports_presets else " ",
path(item),
)
)
def create_supported_types(directory):
with open(os.path.join(directory, "supported_types.md"), "w") as file:
file.write(SUPPORTED_TYPES_HEADER)
for mback in MINER_CLASSES:
backend_types = {}
file.write(BACKEND_TYPE_HEADER.format(backend_str(mback)))
for mtype in MINER_CLASSES[mback]:
if mtype is None:
continue
m = MINER_CLASSES[mback][mtype]
if model_type(m) not in backend_types:
backend_types[model_type(m)] = []
backend_types[model_type(m)].append(m)
for mtype in backend_types:
file.write(MINER_TYPE_HEADER.format(mtype))
for minstance in backend_types[mtype]:
model = minstance("1.1.1.1").model
file.write(
MINER_DETAILS.format(
make(minstance), mtype, create_url_str(model), model
)
)
file.write(MINER_TYPE_CLOSER)
file.write(BACKEND_TYPE_CLOSER)
if __name__ == "__main__":
root_directory = Path(__file__).parent.joinpath("miners")
create_directory_structure(root_directory, m_data)
create_supported_types(root_directory)
| python | Apache-2.0 | 820d2aafdaa6bf2b046f94c017bf7ea58b7c50f3 | 2026-01-05T07:14:50.237218Z | false |
reallyrehan/flask-fileexplorer | https://github.com/reallyrehan/flask-fileexplorer/blob/74f3f33900191e82460e8a515bc071267efc3ba5/setup.py | setup.py | from flask import Flask, render_template, request, send_file, redirect, session, jsonify
from werkzeug.utils import secure_filename
from hurry.filesize import size
from datetime import datetime
from flask_fontawesome import FontAwesome
from flask_qrcode import QRcode
from pathlib import Path
import os
import mimetypes
import sys
import re
import json
import zipfile
import filetype
from urllib.parse import unquote
import socket
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
print("Your Computer Name is: " + hostname)
print("Your Computer IP Address is: " + IPAddr)
app = Flask(__name__)
#app.config["SERVER_NAME"] = "wifile.com"
app.secret_key = 'my_secret_key'
# FoNT AWESOME
fa = FontAwesome(app)
# QRcode
qrcode = QRcode(app)
# Config file
config = os.path.abspath(os.path.join(os.path.dirname(__file__), "config.json"))
with open(config) as json_data_file:
config_data = json.load(json_data_file)
hiddenList = config_data["Hidden"]
favList = config_data["Favorites"]
password = config_data["Password"]
maxFileNameLength = config_data["maxFileNameLength"]
sort_by_selected = config_data["sort_by_selected"]
sorted_label_dict = {0:"Alphabetical", 1:"Date Created", 2:"Date Modified", 3:"Size"}
sorted_label = sorted_label_dict[sort_by_selected]
currentDirectory = config_data["rootDir"]
osWindows = False # Not Windows
default_view = 0
tp_dict = {'image': [['png', "jpg", 'svg'], 'image-icon.png'],
'audio': [['mp3', 'wav'], 'audio-icon.png'],
'video': [['mp4', 'flv'], 'video-icon.png'],
"pdf": [['pdf'], 'pdf-icon.png'],
"word": [['docx', 'doc'], 'doc-icon.png'],
"txt": [['txt'], 'txt-icon.png'],
"compressed":[["zip", "rar"], 'copressed-icon.png'],
"code": [['css', 'scss', 'html', 'py', 'js', 'cpp'], 'code-icon.png']
}
supported_formats = video_types = ['mp4', "webm", "opgg",'mp3', 'pdf', 'txt', 'html', 'css', 'svg', 'js', 'png', 'jpg']
if 'win32' in sys.platform or 'win64' in sys.platform:
# import win32api
osWindows = True
# WINDOWS FEATURE
# drives = win32api.GetLogicalDriveStrings()
# drives=drives.replace('\\','')
# drives = drives.split('\000')[:-1]
# drives.extend(favList)
# favList=drives
if(len(favList) > 3):
favList = favList[0:3]
# print(favList)
# if(len(favList)>0):
# for i in range(0,len(favList)):
# favList[i]=favList[i].replace('\\','>') #CHANGE FOR MAC
# WINDOWS FEATURE
# drives = win32api.GetLogicalDriveStrings()
# drives=drives.replace('\\','')
# drives = drives.split('\000')[:-1]
# drives.extend(favList)
# favList=drives
def make_zipfile(output_filename, source_dir):
relroot = os.path.abspath(os.path.join(source_dir, os.pardir))
with zipfile.ZipFile(output_filename, "w", zipfile.ZIP_DEFLATED) as zip:
for root, dirs, files in os.walk(source_dir):
# add directory (needed for empty dirs)
zip.write(root, os.path.relpath(root, relroot))
for file in files:
filename = os.path.join(root, file)
if os.path.isfile(filename): # regular files only
arcname = os.path.join(
os.path.relpath(root, relroot), file)
zip.write(filename, arcname)
@app.route('/login/')
@app.route('/login/<path:var>')
def loginMethod(var=""):
global password
# print("LOGGING IN")
# print(var)
if(password == ''):
session['login'] = True
if('login' in session):
return redirect('/'+var)
else:
return render_template('login.html')
@app.route('/login/', methods=['POST'])
@app.route('/login/<path:var>', methods=['POST'])
def loginPost(var=""):
global password
text = request.form['text']
if(text == password):
session['login'] = True
return redirect('/'+var)
else:
return redirect('/login/'+var)
@app.route('/logout/')
def logoutMethod():
if('login' in session):
session.pop('login', None)
return redirect('/login/')
# @app.route('/exit/')
# def exitMethod():
# exit()
def hidden(path):
for i in hiddenList:
if i != '' and i in path:
return True
return False
def changeDirectory(path):
global currentDirectory, osWindows
pathC = path.split('/')
# print(path)
if(osWindows):
myPath = '//'.join(pathC)+'//'
else:
myPath = '/'+'/'.join(pathC)
# print(myPath)
myPath = unquote(myPath)
# print("HELLO")
# print(myPath)
try:
os.chdir(myPath)
ans = True
if (osWindows):
if(currentDirectory.replace('/', '\\') not in os.getcwd()):
ans = False
else:
if(currentDirectory not in os.getcwd()):
ans = False
except Exception as e:
ans = False
return ans
# def getDirList():
# dList= list(filter(lambda x: os.path.isdir(x), os.listdir('.')))
# finalList = []
# curDir=os.getcwd()
# for i in dList:
# if(hidden(curDir+'/'+i)==False):
# finalList.append(i)
# return(finalList)
@app.route('/changeView')
def changeView():
global default_view
# print('view received')
v = int(request.args.get('view', 0))
if v in [0, 1]:
default_view = v
else:
default_view = 0
return jsonify({
"txt": default_view,
})
@app.route('/changeSort')
def toggleSort():
global sort_by_selected
# 0 == alphabetical
# 1 == date created
# 2 == date modified
# 3 == size+alphabetical
sort_by_selected = sort_by_selected + 1 if sort_by_selected < 3 else 0
session['sorting_default'] = sort_by_selected
return jsonify({
"txt": sort_by_selected,
})
def getDirList():
# print(default_view)
global maxFileNameLength, tp_dict, hostname
dList = list(os.listdir('.'))
dList = list(filter(lambda x: os.path.isdir(x), os.listdir('.')))
dir_list_dict = []
fList = list(filter(lambda x: not os.path.isdir(x), os.listdir('.')))
file_list_dict = []
curDir = os.getcwd()
# print(os.stat(os.getcwd()))
for i in dList:
if(hidden(curDir+'/'+i) == False):
image = 'folder5.png'
if len(i) > maxFileNameLength:
dots = "..."
else:
dots = ""
dir_stats = os.stat(i)
temp_dir = {}
temp_dir['f'] = i[0:maxFileNameLength]+dots
temp_dir['f_url'] = re.sub("#", "|HASHTAG|", i)
temp_dir['currentDir'] = curDir
temp_dir['f_complete'] = i
temp_dir['image'] = image
temp_dir['dtc'] = datetime.utcfromtimestamp(dir_stats.st_ctime).strftime('%Y-%m-%d %H:%M:%S')
temp_dir['dtm'] = datetime.utcfromtimestamp(dir_stats.st_mtime).strftime('%Y-%m-%d %H:%M:%S')
temp_dir['size'] = "---"
dir_list_dict.append(temp_dir)
from utils import get_file_extension
for i in fList:
if(hidden(curDir+'/'+i) == False):
image = None
try:
tp = get_file_extension(i)
for file_type in tp_dict.values():
if tp in file_type[0]:
image = "files_icon/"+file_type[1]
break
tp = "" if not tp else tp
except Exception as e:
pass
if not image:
image = 'files_icon/unknown-icon.png'
if len(i) > maxFileNameLength:
dots = "..."
else:
dots = ""
temp_file = {}
temp_file['f'] = i[0:maxFileNameLength]+dots
temp_file['f_url'] = re.sub("#", "|HASHTAG|", i)
temp_file['currentDir'] = curDir
temp_file['f_complete'] = i
temp_file['image'] = image
temp_file['supported'] = True if tp.lower() in supported_formats else False
try:
dir_stats = os.stat(i)
temp_file['dtc'] = datetime.utcfromtimestamp(dir_stats.st_ctime).strftime('%Y-%m-%d %H:%M:%S')
temp_file['dtm'] = datetime.utcfromtimestamp(dir_stats.st_mtime).strftime('%Y-%m-%d %H:%M:%S')
temp_file['size'] = size(dir_stats.st_size)
temp_file['size_b'] = dir_stats.st_size
except Exception as e:
temp_file['dtc'] = "---"
temp_file['dtm'] = "---"
temp_file['size'] = "---"
temp_file['size_b'] = -1
file_list_dict.append(temp_file)
return sort_structure(dir_list_dict, file_list_dict)
def sort_structure(dir_list_dict, file_list_dict):
global sort_by_selected, sorted_label_dict
sort_by_selected = session.get('sorting_default', sort_by_selected)
if sort_by_selected == 0:
dir_list_dict = sorted(dir_list_dict, key=lambda x: x['f'].lower())
file_list_dict = sorted(file_list_dict, key=lambda x: x['f'].lower())
elif sort_by_selected == 1:
dir_list_dict = sorted(dir_list_dict, key=lambda x: x['dtc'])
file_list_dict = sorted(file_list_dict, key=lambda x: x['dtc'])
elif sort_by_selected == 2:
dir_list_dict = sorted(dir_list_dict, key=lambda x: x['dtm'])
file_list_dict = sorted(file_list_dict, key=lambda x: x['dtm'])
elif sort_by_selected == 3:
dir_list_dict = sorted(dir_list_dict, key=lambda x: x['f'].lower())
file_list_dict = sorted(file_list_dict, key=lambda x: x['size_b'])
sorted_by_label = sorted_label_dict[sort_by_selected]
return dir_list_dict, file_list_dict, sorted_by_label
def getFileList():
dList = list(filter(lambda x: os.path.isfile(x), os.listdir('.')))
finalList = []
curDir = os.getcwd()
for i in dList:
if(hidden(curDir+'/'+i) == False):
finalList.append(i)
return(finalList)
@app.route('/files/', methods=['GET'])
@app.route('/files/<path:var>', methods=['GET'])
def filePage(var=""):
global default_view
if('login' not in session):
return redirect('/login/files/'+var)
# print(var)
if(changeDirectory(var) == False):
# Invalid Directory
print("Directory Doesn't Exist")
return render_template('404.html', errorCode=300, errorText='Invalid Directory Path', favList=favList)
try:
dir_dict, file_dict, sorted_label_current = getDirList()
if default_view == 0:
var1, var2 = "DISABLED", ""
default_view_css_1, default_view_css_2 = '', 'style=display:none'
else:
var1, var2 = "", "DISABLED"
default_view_css_1, default_view_css_2 = 'style=display:none', ''
except Exception as e:
return render_template('404.html', errorCode=200, errorText='Permission Denied {}'.format(e), favList=favList)
if osWindows:
cList = var.split('/')
var_path = '<a style = "color:black;"href = "/files/' + \
cList[0]+'">'+unquote(cList[0])+'</a>'
for c in range(1, len(cList)):
var_path += ' / <a style = "color:black;"href = "/files/' + \
'/'.join(cList[0:c+1])+'">'+unquote(cList[c])+'</a>'
else:
cList = var.split('/')
var_path = '<a href = "/files/"><img src = "/static/root.png" style = "height:25px;width: 25px;"> </a>'
for c in range(0, len(cList)):
var_path += ' / <a style = "color:black;"href = "/files/' + \
'/'.join(cList[0:c+1])+'">'+unquote(cList[c])+'</a>'
return render_template('home.html', currentDir=var, favList=favList, default_view_css_1=default_view_css_1, default_view_css_2=default_view_css_2, view0_button=var1, view1_button=var2, currentDir_path=var_path, dir_dict=dir_dict, file_dict=file_dict, sorted_label_current=sorted_label_current)
@app.route('/', methods=['GET'])
def homePage():
global currentDirectory, osWindows
if('login' not in session):
return redirect('/login/')
if osWindows:
if(currentDirectory == ""):
return redirect('/files/C:')
else:
# cura = currentDirectory
cura = '>'.join(currentDirectory.split('\\'))
return redirect('/files/'+cura)
else:
return redirect('/files/'+currentDirectory)
# REDIRECT TO UNTITLED OR C DRIVE FOR WINDOWS OR / FOR MAC
@app.route('/browse/<path:var>', defaults={"browse":True})
@app.route('/download/<path:var>', defaults={"browse":False})
def browseFile(var, browse):
var = var.replace("|HASHTAG|", "#")
if('login' not in session):
return redirect('/login/download/'+var)
# os.chdir(currentDirectory)
pathC = unquote(var).split('/')
#print(var)
if(pathC[0] == ''):
pathC.remove(pathC[0])
# if osWindows:
# fPath = currentDirectory+'//'.join(pathC)
# else:
# fPath = '/'+currentDirectory+'//'.join(pathC)
if osWindows:
fPath = '//'.join(pathC)
else:
fPath = '/'+'//'.join(pathC)
# print("HELLO")
# print('//'.join(fPath.split("//")[0:-1]))
# print(hidden('//'.join(fPath.split("//")[0:-1])))
f_path_hidden = '//'.join(fPath.split("//")[0:-1])
if(hidden(f_path_hidden) == True or changeDirectory(f_path_hidden) == False):
# FILE HIDDEN
return render_template('404.html', errorCode=100, errorText='File Hidden', favList=favList)
fName = pathC[len(pathC)-1]
#print(fPath)
if browse:
from utils import is_media
is_media_file = is_media(fPath)
if is_media_file:
from utils import get_file
return get_file(fPath, is_media_file)
return send_file(fPath)
try:
return send_file(fPath, download_name=fName)
except Exception as e:
return render_template('404.html', errorCode=200, errorText='Permission Denied {}'.format(e), favList=favList)
@app.route('/downloadFolder/<path:var>')
def downloadFolder(var):
if('login' not in session):
return redirect('/login/downloadFolder/'+var)
pathC = var.split('/')
if(pathC[0] == ''):
pathC.remove(pathC[0])
if osWindows:
fPath = '//'.join(pathC)
else:
fPath = '/'+'//'.join(pathC)
f_path_hidden = '//'.join(fPath.split("//")[0:-1])
if(hidden(f_path_hidden) == True or changeDirectory(f_path_hidden) == False):
# FILE HIDDEN
return render_template('404.html', errorCode=100, errorText='File Hidden', favList=favList)
fName = pathC[len(pathC)-1]+'.zip'
downloads_folder = str(Path.home() / "Downloads\\temp")
if not os.path.exists(downloads_folder):
os.mkdir(downloads_folder)
try:
make_zipfile(downloads_folder+'\\abc.zip', os.getcwd())
return send_file(downloads_folder+'\\abc.zip', attachment_filename=fName)
except Exception as e:
print(e)
return render_template('404.html', errorCode=200, errorText='Permission Denied {}'.format(e), favList=favList)
@app.errorhandler(404)
def page_not_found(e):
if('login' not in session):
return redirect('/login/')
# note that we set the 404 status explicitly
return render_template('404.html', errorCode=404, errorText='Page Not Found', favList=favList), 404
@app.route('/upload/', methods=['GET', 'POST'])
@app.route('/upload/<path:var>', methods=['GET', 'POST'])
def uploadFile(var=""):
if('login' not in session):
return render_template('login.html')
text = ""
if request.method == 'POST':
pathC = var.split('/')
if(pathC[0] == ''):
pathC.remove(pathC[0])
# if osWindows:
# fPath = currentDirectory+'//'.join(pathC)
# else:
# fPath = '/'+currentDirectory+'//'.join(pathC)
if osWindows:
fPath = '//'.join(pathC)
else:
fPath = '/'+'//'.join(pathC)
f_path_hidden = fPath
# print(f_path_hidden)
# print(hidden(f_path_hidden))
if(hidden(f_path_hidden) == True or changeDirectory(f_path_hidden) == False):
# FILE HIDDEN
return render_template('404.html', errorCode=100, errorText='File Hidden', favList=favList)
files = request.files.getlist('files[]')
fileNo = 0
for file in files:
file.filename = secure_filename(file.filename) # ensure file name is secure
fupload = os.path.join(fPath, file.filename)
if not os.path.exists(fupload):
try:
file.save(fupload)
print(file.filename + ' Uploaded')
text = text + file.filename + ' Uploaded<br>'
fileNo = fileNo + 1
except Exception as e:
print(file.filename + ' Failed with Exception '+str(e))
text = text + file.filename + \
' Failed with Exception '+str(e) + '<br>'
continue
else:
print(file.filename +
' Failed because File Already Exists or File Type Issue')
text = text + file.filename + \
' Failed because File Already Exists or File Type not secure <br>'
fileNo2 = len(files)-fileNo
return render_template('uploadsuccess.html', text=text, fileNo=fileNo, fileNo2=fileNo2, favList=favList)
@app.route('/qr/<path:var>')
def qrFile(var):
global hostname
if('login' not in session):
return redirect('/login/qr/'+var)
# os.chdir(currentDirectory)
pathC = unquote(var).split('/')
if(pathC[0] == ''):
pathC.remove(pathC[0])
if osWindows:
fPath = '//'.join(pathC)
else:
fPath = '/'+'//'.join(pathC)
f_path_hidden = '//'.join(fPath.split("//")[0:-1])
if(hidden(f_path_hidden) == True or changeDirectory(f_path_hidden) == False):
# FILE HIDDEN
return render_template('404.html', errorCode=100, errorText='File Hidden', favList=favList)
fName = pathC[len(pathC)-1]
qr_text = 'http://'+hostname+"//download//"+fPath
return send_file(qrcode(qr_text, mode="raw"), mimetype="image/png")
return send_file(fPath, attachment_filename=fName)
if __name__ == '__main__':
local = "127.0.0.1"
public = '0.0.0.0'
app.run(host=public, debug=True, port=80) | python | MIT | 74f3f33900191e82460e8a515bc071267efc3ba5 | 2026-01-05T07:14:45.262249Z | false |
reallyrehan/flask-fileexplorer | https://github.com/reallyrehan/flask-fileexplorer/blob/74f3f33900191e82460e8a515bc071267efc3ba5/utils.py | utils.py | from setup import app
from flask import request, Response
import os
import re
video_types = ['mp4', "webm", "opgg"]
audio_types = ['mp3', "wav", "ogg", "mpeg", "aac", "3gpp", "3gpp2", "aiff", "x-aiff", "amr", "mpga"]
@app.after_request
def after_request(response):
response.headers.add('Accept-Ranges', 'bytes')
return response
def get_chunk(start_byte=None, end_byte=None, full_path=None):
file_size = os.stat(full_path).st_size
if end_byte:
length = end_byte + 1 - start_byte
else:
length = file_size - start_byte
with open(full_path, 'rb') as f:
f.seek(start_byte)
chunk = f.read(length)
return chunk, start_byte, length, file_size
def get_file(file_path, mimetype):
range_header = request.headers.get('Range', None)
start_byte, end_byte = 0, None
if range_header:
match = re.search(r'(\d+)-(\d*)', range_header)
groups = match.groups()
if groups[0]:
start_byte = int(groups[0])
if groups[1]:
end_byte = int(groups[1])
chunk, start, length, file_size = get_chunk(start_byte, end_byte, file_path)
resp = Response(chunk, 206, mimetype=f'video/{mimetype}',
content_type=mimetype, direct_passthrough=True)
print(length)
resp.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(start, start + length - 1, file_size))
return resp
def is_media(filepath):
found_media = re.search("\.mp4$|\.mp3$", filepath, re.IGNORECASE)
if found_media:
extension = found_media[0].lower()[1:]
if found_media in video_types:
return f"video/{extension}"
return f"audio/{extension}"
return False
def get_file_extension(fname):
found_extension = re.search("\.[A-Za-z0-9]*$", fname, re.IGNORECASE)
if found_extension:
return found_extension[0][1:].lower() | python | MIT | 74f3f33900191e82460e8a515bc071267efc3ba5 | 2026-01-05T07:14:45.262249Z | false |
reallyrehan/flask-fileexplorer | https://github.com/reallyrehan/flask-fileexplorer/blob/74f3f33900191e82460e8a515bc071267efc3ba5/build/setup.py | build/setup.py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 18:17:01 2019
@author: rehan.ahmed
"""
from cx_Freeze import setup, Executable
includefiles = [ ]
includes = ['jinja2.ext'] # add jinja2.ext here
excludes = []
setup(
name = 'WiFile',
version = '1.1',
description = 'File explorer over wifi',
author = 'Rehan Ahmed',
author_email = 'rhnahdshk@gmail.com',
# Add includes to the options
options = {'build_exe': {'excludes':excludes,'include_files':includefiles, 'includes':includes}},
executables = [Executable('setupWin.py')]
) | python | MIT | 74f3f33900191e82460e8a515bc071267efc3ba5 | 2026-01-05T07:14:45.262249Z | false |
reallyrehan/flask-fileexplorer | https://github.com/reallyrehan/flask-fileexplorer/blob/74f3f33900191e82460e8a515bc071267efc3ba5/backup/nosession-setup.py | backup/nosession-setup.py | from flask import Flask, render_template, request, send_file, redirect
import os
import sys
app = Flask(__name__)
try:
f= open('hidden.txt','r')
fileText=f.read()
fileText = fileText.split('\n')
except:
fileText=[]
hiddenList = []
for i in fileText:
hiddenList.append(i)
f.close()
try:
f= open('favorites.txt','r')
fileText=f.read()
fileText = fileText.split('\n')
except:
fileText=[]
favList = []
for i in fileText:
favList.append(i.replace('/','>'))
f.close()
if(len(favList)>3):
favList=favList[0:3]
currentDirectory='/'
currentDirectory='/Users/rehan/Downloads'
def hidden(path):
for i in hiddenList:
if i != '' and i in path:
return True
return False
def changeDirectory(path):
global currentDirectory
pathC = path.split('>')
if(pathC[0]==""):
pathC.remove(pathC[0])
myPath = currentDirectory+'/'+'/'.join(pathC)
print(myPath)
try:
os.chdir(myPath)
ans=True
if(currentDirectory not in os.getcwd()):
ans = False
except:
ans=False
return ans
def getDirList():
dList= list(filter(lambda x: os.path.isdir(x), os.listdir('.')))
finalList = []
curDir=os.getcwd()
for i in dList:
if(hidden(curDir+'/'+i)==False):
finalList.append(i)
return(finalList)
def getFileList():
dList = list(filter(lambda x: os.path.isfile(x), os.listdir('.')))
finalList = []
curDir=os.getcwd()
for i in dList:
if(hidden(curDir+'/'+i)==False):
finalList.append(i)
return(finalList)
@app.route('/<var>', methods=['GET'])
def filePage(var):
if(changeDirectory(var)==False):
#Invalid Directory
print("Directory Doesn't Exist")
return render_template('404.html',errorCode=300,errorText='Invalid Directory Path')
try:
dirList = getDirList()
fileList = getFileList()
except:
return render_template('404.html',errorCode=200,errorText='Permission Denied')
return render_template('home.html',dirList=dirList,fileList=fileList,currentDir=var,favList=favList)
@app.route('/', methods=['GET'])
def homePage():
global currentDirectory
os.chdir(currentDirectory)
dirList = getDirList()
fileList=getFileList()
return render_template('home.html',dirList=dirList,fileList=fileList,currentDir="",favList=favList)
@app.route('/download/<var>')
def downloadFile(var):
global currentDirectory
#os.chdir(currentDirectory)
pathC = var.split('>')
if(pathC[0]==''):
pathC.remove(pathC[0])
fPath = '/'.join(pathC)
fPath=currentDirectory+'/'+fPath
if(hidden(fPath)):
#FILE HIDDEN
return render_template('404.html',errorCode=100,errorText='File Hidden')
fName=pathC[len(pathC)-1]
#print(fPath)
try:
return send_file(fPath, attachment_filename=fName)
except:
return render_template('404.html',errorCode=200,errorText='Permission Denied')
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template('404.html',errorCode=404,errorText='Page Not Found'), 404
if __name__ == '__main__':
app.run(host= '0.0.0.0',debug=True) | python | MIT | 74f3f33900191e82460e8a515bc071267efc3ba5 | 2026-01-05T07:14:45.262249Z | false |
reallyrehan/flask-fileexplorer | https://github.com/reallyrehan/flask-fileexplorer/blob/74f3f33900191e82460e8a515bc071267efc3ba5/backup/setupWin.py | backup/setupWin.py | from flask import Flask, render_template, request, send_file, redirect, session
import os
import sys
import json
from flask_fontawesome import FontAwesome
import zipfile
import win32api
from werkzeug import secure_filename
import socket
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
print("Your Computer Name is: " + hostname)
print("Your Computer IP Address is: " + IPAddr)
app = Flask(__name__)
#FoNT AWESOME
fa = FontAwesome(app)
app.secret_key = 'my_secret_key'
with open('config.json') as json_data_file:
data = json.load(json_data_file)
hiddenList = data["Hidden"]
favList = data["Favorites"]
password = data["Password"]
currentDirectory=data["rootDir"]
osWindows = False #Not Windows
if 'win' in sys.platform:
osWindows = True
if(len(favList)>3):
favList=favList[0:3]
if(len(favList)>0):
for i in range(0,len(favList)):
favList[i]=favList[i].replace('\\','>') #CHANGE FOR MAC
#WINDOWS FEATURE
drives = win32api.GetLogicalDriveStrings()
drives=drives.replace('\\','')
drives = drives.split('\000')[:-1]
drives.extend(favList)
favList=drives
def make_zipfile(output_filename, source_dir):
relroot = os.path.abspath(os.path.join(source_dir, os.pardir))
with zipfile.ZipFile(output_filename, "w", zipfile.ZIP_DEFLATED) as zip:
for root, dirs, files in os.walk(source_dir):
# add directory (needed for empty dirs)
zip.write(root, os.path.relpath(root, relroot))
for file in files:
filename = os.path.join(root, file)
if os.path.isfile(filename): # regular files only
arcname = os.path.join(os.path.relpath(root, relroot), file)
zip.write(filename, arcname)
@app.route('/login/')
def loginMethod():
global password
if(password==''):
session['login'] = True
if('login' in session):
return redirect('/')
else:
return render_template('login.html')
@app.route('/login/', methods=['POST'])
def loginPost():
global password
text = request.form['text']
if(text==password):
session['login'] = True
return redirect('/')
else:
return redirect('/login/')
@app.route('/logout/')
def logoutMethod():
if('login' in session):
session.pop('login',None)
return redirect('/login/')
#@app.route('/exit/')
#def exitMethod():
# exit()
def hidden(path):
for i in hiddenList:
if i != '' and i in path:
return True
return False
def changeDirectory(path):
global currentDirectory, osWindows
pathC = path.split('>')
print(pathC)
if(osWindows):
myPath = '//'.join(pathC)+'//'
else:
myPath = '/'+'/'.join(pathC)
print(myPath)
try:
os.chdir(myPath)
ans=True
if(currentDirectory not in os.getcwd()):
ans = False
except:
ans=False
return ans
def getDirList():
dList= list(filter(lambda x: os.path.isdir(x), os.listdir('.')))
finalList = []
curDir=os.getcwd()
for i in dList:
if(hidden(curDir+'/'+i)==False):
finalList.append(i)
return(finalList)
def getFileList():
dList = list(filter(lambda x: os.path.isfile(x), os.listdir('.')))
finalList = []
curDir=os.getcwd()
for i in dList:
if(hidden(curDir+'/'+i)==False):
finalList.append(i)
return(finalList)
@app.route('/<var>', methods=['GET'])
def filePage(var):
if('login' not in session):
return redirect('/login/')
if(changeDirectory(var)==False):
#Invalid Directory
print("Directory Doesn't Exist")
return render_template('404.html',errorCode=300,errorText='Invalid Directory Path',favList=favList)
try:
dirList = getDirList()
fileList = getFileList()
except:
return render_template('404.html',errorCode=200,errorText='Permission Denied',favList=favList)
return render_template('home.html',dirList=dirList,fileList=fileList,currentDir=var,favList=favList)
@app.route('/', methods=['GET'])
def homePage():
global currentDirectory, osWindows
if('login' not in session):
return redirect('/login/')
if osWindows:
if(currentDirectory == ""):
return redirect('/C:')
else:
cura='>'.join(currentDirectory.split('\\'))
return redirect('/'+cura)
else:
return redirect('/>')
#REDIRECT TO UNTITLED OR C DRIVE FOR WINDOWS OR / FOR MAC
@app.route('/download/<var>')
def downloadFile(var):
if('login' not in session):
return redirect('/login/')
#os.chdir(currentDirectory)
pathC = var.split('>')
if(pathC[0]==''):
pathC.remove(pathC[0])
fPath = '//'.join(pathC)
if(hidden(fPath)):
#FILE HIDDEN
return render_template('404.html',errorCode=100,errorText='File Hidden',favList=favList)
fName=pathC[len(pathC)-1]
#print(fPath)
try:
return send_file(fPath, attachment_filename=fName)
except:
return render_template('404.html',errorCode=200,errorText='Permission Denied',favList=favList)
@app.route('/downloadFolder/<var>')
def downloadFolder(var):
if('login' not in session):
return redirect('/login/')
#os.chdir(currentDirectory)
pathC = var.split('>')
if(pathC[0]==''):
pathC.remove(pathC[0])
fPath = '//'.join(pathC)
if(hidden(fPath)):
#FILE HIDDEN
return render_template('404.html',errorCode=100,errorText='File Hidden',favList=favList)
fName=pathC[len(pathC)-1]+'.zip'
try:
make_zipfile('C:\\Users\\reall\\Downloads\\temp\\abc.zip',os.getcwd())
return send_file('C:\\Users\\reall\\Downloads\\temp\\abc.zip', attachment_filename=fName)
except:
return render_template('404.html',errorCode=200,errorText='Permission Denied',favList=favList)
@app.errorhandler(404)
def page_not_found(e):
if('login' not in session):
return redirect('/login/')
# note that we set the 404 status explicitly
return render_template('404.html',errorCode=404,errorText='Page Not Found',favList=favList), 404
@app.route('/upload/<var>', methods = ['GET', 'POST'])
def uploadFile(var):
if('login' not in session):
return render_template('login.html')
text = ""
if request.method == 'POST':
pathC = var.split('>')
if(pathC[0]==''):
pathC.remove(pathC[0])
fPath = '//'.join(pathC)
if(hidden(fPath)):
#FILE HIDDEN
return render_template('404.html',errorCode=100,errorText='File Hidden',favList=favList)
files = request.files.getlist('files[]')
fileNo=0
for file in files:
file.filename = secure_filename(file.filename) # ensure file name is secure
fupload = os.path.join(fPath,file.filename)
if not os.path.exists(fupload):
try:
file.save(fupload)
print(file.filename + ' Uploaded')
text = text + file.filename + ' Uploaded<br>'
fileNo = fileNo +1
except Exception as e:
print(file.filename + ' Failed with Exception '+str(e))
text = text + file.filename + ' Failed with Exception '+str(e) + '<br>'
continue
else:
print(file.filename + ' Failed because File Already Exists or File Type Issue')
text = text + file.filename + ' Failed because File Already Exists or File Type not secure <br>'
fileNo2 = len(files)-fileNo
return render_template('uploadsuccess.html',text=text,fileNo=fileNo,fileNo2=fileNo2,favList=favList)
if __name__ == '__main__':
app.run(host= '0.0.0.0',debug=True) | python | MIT | 74f3f33900191e82460e8a515bc071267efc3ba5 | 2026-01-05T07:14:45.262249Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/setup.py | setup.py | # Copyright 2024, LLM-PySC2 Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import distutils.command.build
from setuptools import setup
description = """LLM-PySC2 - LLM StarCraft II Learning Environment
LLM-PySC2 is NKAI Decision Team and NUDT Decision Team's Python component of the StarCraft II
LLM Decision Environment. It exposes Deepmind's PySC2 Learning Environment API as a Python LLM
Environment. This is a collaboration between NKAI and NUDT to develop StarCraft II into a rich
environment for LLM research. LLM-PySC2 provides an interface for LLM agents to interact with
StarCraft 2, getting textual or multimodal observations and textual actions.
Consider that we have not yet publish our paper, if you use the LLM-PySC2 environment or
LLM-SMAC tasks in your research, please cite the LLM StarCraft II github page temporarily.
Read the README for more information.
"""
class BuildCommand(distutils.command.build.build):
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
# To avoid conflicting with the Bazel BUILD file.
self.build_base = '_build'
setup(
name='llm-pysc2',
version='0.1',
description='LLM Starcraft II environment and library for training agents.',
long_description=description,
author='NKAI',
author_email='734162621@qq.com',
cmdclass={'build': BuildCommand},
license='Apache License, Version 2.0',
keywords='StarCraft AI',
url='',
packages=[
'pysc2',
'pysc2.agents',
'pysc2.bin',
'pysc2.env',
'pysc2.lib',
'pysc2.maps',
'pysc2.run_configs',
'pysc2.tests',
'llm_pysc2',
'llm_pysc2.agents',
'llm_pysc2.bin',
'llm_pysc2.lib',
],
install_requires=[
'absl-py>=0.1.0',
'deepdiff',
'dm_env',
'enum34',
'mock',
'mpyq',
'numpy>=1.10',
'portpicker>=1.2.0',
'protobuf==3.20.0',
'openai==0.28',
'pygame',
'requests',
's2clientprotocol>=4.10.1.75800.0',
's2protocol',
'sk-video',
'websocket-client',
'loguru',
'pillow',
'llamaapi',
'zhipuai',
# 'google-generativeai',
# 'anthropic',
# 'google',
],
entry_points={
'console_scripts': [
'pysc2_agent = pysc2.bin.agent:entry_point',
'pysc2_play = pysc2.bin.play:entry_point',
'pysc2_replay_info = pysc2.bin.replay_info:entry_point',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.9', # llamaapi requires python > 3.9
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/__init__.py | pysc2/__init__.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PySC2 module: https://github.com/deepmind/pysc2 ."""
import os
def load_tests(loader, standard_tests, unused_pattern):
"""Our tests end in `_test.py`, so need to override the test discovery."""
this_dir = os.path.dirname(__file__)
package_tests = loader.discover(start_dir=this_dir, pattern="*_test.py")
standard_tests.addTests(package_tests)
return standard_tests
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/enums.py | pysc2/env/enums.py | # Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enumerations used for configuring the SC2 environment."""
import enum
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
class Race(enum.IntEnum):
random = sc_common.Random
protoss = sc_common.Protoss
terran = sc_common.Terran
zerg = sc_common.Zerg
class Difficulty(enum.IntEnum):
"""Bot difficulties."""
very_easy = sc_pb.VeryEasy
easy = sc_pb.Easy
medium = sc_pb.Medium
medium_hard = sc_pb.MediumHard
hard = sc_pb.Hard
harder = sc_pb.Harder
very_hard = sc_pb.VeryHard
cheat_vision = sc_pb.CheatVision
cheat_money = sc_pb.CheatMoney
cheat_insane = sc_pb.CheatInsane
class BotBuild(enum.IntEnum):
"""Bot build strategies."""
random = sc_pb.RandomBuild
rush = sc_pb.Rush
timing = sc_pb.Timing
power = sc_pb.Power
macro = sc_pb.Macro
air = sc_pb.Air
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/mock_sc2_env_comparison_test.py | pysc2/env/mock_sc2_env_comparison_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that mock environment has same shape outputs as true environment."""
from absl.testing import absltest
from pysc2.env import mock_sc2_env
from pysc2.env import sc2_env
class TestCompareEnvironments(absltest.TestCase):
@classmethod
def setUpClass(cls):
super(TestCompareEnvironments, cls).setUpClass()
players = [
sc2_env.Agent(race=sc2_env.Race.terran),
sc2_env.Agent(race=sc2_env.Race.protoss),
]
kwargs = {
'map_name': 'Flat64',
'players': players,
'agent_interface_format': [
sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(
screen=(32, 64),
minimap=(8, 16)
),
rgb_dimensions=sc2_env.Dimensions(
screen=(31, 63),
minimap=(7, 15)
),
action_space=sc2_env.ActionSpace.FEATURES
),
sc2_env.AgentInterfaceFormat(
rgb_dimensions=sc2_env.Dimensions(screen=64, minimap=32)
)
]
}
cls._env = sc2_env.SC2Env(**kwargs)
cls._mock_env = mock_sc2_env.SC2TestEnv(**kwargs)
@classmethod
def tearDownClass(cls):
super(TestCompareEnvironments, cls).tearDownClass()
cls._env.close()
cls._mock_env.close()
def test_observation_spec(self):
self.assertEqual(self._env.observation_spec(),
self._mock_env.observation_spec())
def test_action_spec(self):
self.assertEqual(self._env.action_spec(), self._mock_env.action_spec())
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/mock_sc2_env.py | pysc2/env/mock_sc2_env.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocking the Starcraft II environment."""
import numpy as np
from pysc2.env import environment
from pysc2.env import sc2_env
from pysc2.lib import features
from pysc2.lib import units
from pysc2.tests import dummy_observation
from s2clientprotocol import common_pb2
from s2clientprotocol import raw_pb2
from s2clientprotocol import sc2api_pb2
DUMMY_MAP_SIZE = 256
class _TestEnvironment(environment.Base):
"""A simple generic test environment.
This class is a lightweight implementation of `environment.Base` that returns
the same timesteps on every observation call. By default, each returned
timestep (one per agent) is reward 0., discount 1., and the observations are
zero `np.ndarrays` of dtype `np.int32` and the shape specified by the
environment's spec.
However, the behavior of the `TestEnvironment` can be configured using the
object's attributes.
Attributes:
next_timestep: The `environment.TimeStep`s to return on the next call to
`step`. When necessary, some fields will be overridden to ensure the
`step_type` contract.
episode_length: if the episode length (number of transitions) exceeds
`episode_length` on a call to `step`, the `step-type` will be set to
`environment.StepType.LAST`, forcing an end of episode. This allows a
stub of a production environment to have end_episodes. Will be ignored if
set to `float('inf')` (the default).
"""
def __init__(self, num_agents, observation_spec, action_spec):
"""Initializes the TestEnvironment.
The `next_observation` is initialized to be reward = 0., discount = 1.,
and an appropriately sized observation of all zeros. `episode_length` is set
to `float('inf')`.
Args:
num_agents: The number of agents.
observation_spec: The observation specs for each player.
action_spec: The action specs for each player.
"""
self._num_agents = num_agents
self._observation_spec = observation_spec
self._action_spec = action_spec
self._episode_steps = 0
self.next_timestep = []
for agent_index, obs_spec in enumerate(observation_spec):
self.next_timestep.append(environment.TimeStep(
step_type=environment.StepType.MID,
reward=0.,
discount=1.,
observation=self._default_observation(obs_spec, agent_index)))
self.episode_length = float('inf')
def reset(self):
"""Restarts episode and returns `next_observation` with `StepType.FIRST`."""
self._episode_steps = 0
return self.step([None] * self._num_agents)
def step(self, actions, step_mul=None):
"""Returns `next_observation` modifying its `step_type` if necessary."""
del step_mul # ignored currently
if len(actions) != self._num_agents:
raise ValueError(
'Expected %d actions, received %d.' % (
self._num_agents, len(actions)))
if self._episode_steps == 0:
step_type = environment.StepType.FIRST
elif self._episode_steps >= self.episode_length:
step_type = environment.StepType.LAST
else:
step_type = environment.StepType.MID
timesteps = []
for timestep in self.next_timestep:
if step_type is environment.StepType.FIRST:
timesteps.append(timestep._replace(
step_type=step_type,
reward=0.,
discount=0.))
elif step_type is environment.StepType.LAST:
timesteps.append(timestep._replace(
step_type=step_type,
discount=0.))
else:
timesteps.append(timestep)
if timesteps[0].step_type is environment.StepType.LAST:
self._episode_steps = 0
else:
self._episode_steps += 1
return timesteps
def action_spec(self):
"""See base class."""
return self._action_spec
def observation_spec(self):
"""See base class."""
return self._observation_spec
def _default_observation(self, obs_spec, agent_index):
"""Returns an observation based on the observation spec."""
observation = {}
for key, spec in obs_spec.items():
observation[key] = np.zeros(shape=spec, dtype=np.int32)
return observation
class SC2TestEnv(_TestEnvironment):
"""A TestEnvironment to swap in for `starcraft2.env.sc2_env.SC2Env`.
Repeatedly returns a mock observation for 10 calls to `step` whereupon it
sets discount to 0. and changes state to READY_TO_END_EPISODE.
Example:
```
@mock.patch(
'starcraft2.env.sc2_env.SC2Env',
mock_sc2_env.SC2TestEnv)
def test_method(self):
env = sc2_env.SC2Env('nonexisting map') # Really a SC2TestEnv.
...
```
See base class for more details.
"""
def __init__(self,
*,
map_name=None,
players=None,
agent_interface_format=None,
discount=1.,
discount_zero_after_timeout=False,
visualize=False,
step_mul=None,
realtime=False,
save_replay_episodes=0,
replay_dir=None,
game_steps_per_episode=None,
score_index=None,
score_multiplier=None,
random_seed=None,
disable_fog=False,
ensure_available_actions=True,
version=None):
"""Initializes an SC2TestEnv.
Args:
map_name: Map name. Ignored.
players: A list of Agent and Bot instances that specify who will play.
agent_interface_format: A sequence containing one AgentInterfaceFormat per
agent, matching the order of agents specified in the players list. Or
a single AgentInterfaceFormat to be used for all agents. Note that
InterfaceOptions may be supplied in place of AgentInterfaceFormat, in
which case no action or observation processing will be carried out by
PySC2. The sc_pb.ResponseObservation proto will be returned as the
observation for the agent and passed actions must be instances of
sc_pb.Action. This is intended for agents which use custom environment
conversion code.
discount: Unused.
discount_zero_after_timeout: Unused.
visualize: Unused.
step_mul: Unused.
realtime: Not supported by the mock environment, throws if set to true.
save_replay_episodes: Unused.
replay_dir: Unused.
game_steps_per_episode: Unused.
score_index: Unused.
score_multiplier: Unused.
random_seed: Unused.
disable_fog: Unused.
ensure_available_actions: Whether to throw an exception when an
unavailable action is passed to step().
version: Unused.
Raises:
ValueError: if args are passed.
"""
del map_name # Unused.
del discount # Unused.
del discount_zero_after_timeout # Unused.
del visualize # Unused.
del step_mul # Unused.
del save_replay_episodes # Unused.
del replay_dir # Unused.
del game_steps_per_episode # Unused.
del score_index # Unused.
del score_multiplier # Unused.
del random_seed # Unused.
del disable_fog # Unused.
del ensure_available_actions # Unused.
del version # Unused.
if realtime:
raise ValueError('realtime mode is not supported by the mock env.')
if not players:
players = [sc2_env.Agent(sc2_env.Race.random)]
num_agents = sum(1 for p in players if isinstance(p, sc2_env.Agent))
if agent_interface_format is None:
raise ValueError('Please specify agent_interface_format.')
if isinstance(agent_interface_format,
(sc2_env.AgentInterfaceFormat, sc2api_pb2.InterfaceOptions)):
agent_interface_format = [agent_interface_format] * num_agents
if len(agent_interface_format) != num_agents:
raise ValueError(
'The number of entries in agent_interface_format should '
'correspond 1-1 with the number of agents.')
self._game_info = _make_dummy_game_info(players, agent_interface_format)
self._agent_interface_formats = agent_interface_format
self._features = [
features.features_from_game_info(
game_info=g, agent_interface_format=aif)
for g, aif in zip(self._game_info, self._agent_interface_formats)]
super(SC2TestEnv, self).__init__(
num_agents=num_agents,
action_spec=tuple(f.action_spec() for f in self._features),
observation_spec=tuple(f.observation_spec() for f in self._features))
self.episode_length = 10
@property
def game_info(self):
return self._game_info
def save_replay(self, *args, **kwargs):
"""Does nothing."""
def _default_observation(self, obs_spec, agent_index):
"""Returns a mock observation from an SC2Env."""
builder = dummy_observation.Builder(obs_spec).game_loop(0)
aif = self._agent_interface_formats[agent_index]
if (isinstance(aif, sc2_env.AgentInterfaceFormat) and
(aif.use_feature_units or aif.use_raw_units)):
feature_units = [
dummy_observation.FeatureUnit(
units.Neutral.LabBot,
features.PlayerRelative.NEUTRAL,
owner=16,
pos=common_pb2.Point(x=10, y=10, z=0),
radius=1.0,
health=5,
health_max=5,
is_on_screen=True,
)
]
builder.feature_units(feature_units)
response_observation = builder.build()
features_ = self._features[agent_index]
observation = features_.transform_obs(response_observation)
# Add bounding box for the minimap camera in top left of feature screen.
if hasattr(observation, 'feature_minimap'):
minimap_camera = observation.feature_minimap.camera
minimap_camera.fill(0)
height, width = [dim // 2 for dim in minimap_camera.shape]
minimap_camera[:height, :width].fill(1)
return observation
def _make_dummy_game_info(players, interface_formats):
"""Makes dummy game infos from player and interface format data."""
player_info = []
for i, p in enumerate(players, start=1):
if isinstance(p, sc2_env.Agent):
player_info.append(
sc2api_pb2.PlayerInfo(
player_id=i,
type=sc2api_pb2.PlayerType.Participant,
race_requested=p.race[0],
player_name=p.name))
else:
player_info.append(
sc2api_pb2.PlayerInfo(
player_id=i,
type=sc2api_pb2.PlayerType.Computer,
race_requested=p.race[0],
difficulty=p.difficulty,
ai_build=p.build[0],
player_name=p.difficulty.name))
game_infos = []
for _, interface_format in zip(players, interface_formats):
game_info = sc2api_pb2.ResponseGameInfo(
player_info=player_info,
start_raw=raw_pb2.StartRaw(
map_size=common_pb2.Size2DI(x=DUMMY_MAP_SIZE, y=DUMMY_MAP_SIZE)))
if isinstance(interface_format, sc2api_pb2.InterfaceOptions):
game_info.options.CopyFrom(interface_format)
else:
if interface_format.feature_dimensions is not None:
fd = interface_format.feature_dimensions
game_info.options.feature_layer.resolution.x = fd.screen.x
game_info.options.feature_layer.resolution.y = fd.screen.y
game_info.options.feature_layer.minimap_resolution.x = fd.minimap.x
game_info.options.feature_layer.minimap_resolution.y = fd.minimap.y
game_info.options.feature_layer.width = (
interface_format.camera_width_world_units)
if interface_format.rgb_dimensions is not None:
rd = interface_format.rgb_dimensions
game_info.options.render.resolution.x = rd.screen.x
game_info.options.render.resolution.y = rd.screen.y
game_info.options.render.minimap_resolution.x = rd.minimap.x
game_info.options.render.minimap_resolution.y = rd.minimap.y
game_info.options.render.width = (
interface_format.camera_width_world_units)
game_infos.append(game_info)
return game_infos
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/sc2_env_test.py | pysc2/env/sc2_env_test.py | #!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for sc2_env."""
from absl.testing import absltest
from absl.testing import parameterized
from pysc2.env import sc2_env
class TestNameCroppingAndDeduplication(parameterized.TestCase):
@parameterized.named_parameters(
("empty", [], []),
("single_no_crop", ["agent_1"], ["agent_1"]),
("single_cropped",
["very_long_agent_name_experimental_1"],
["very_long_agent_name_experimenta"]),
("no_dupes_no_crop",
["agent_1", "agent_2"],
["agent_1", "agent_2"]),
("no_dupes_cropped",
["a_very_long_agent_name_experimental",
"b_very_long_agent_name_experimental"],
["a_very_long_agent_name_experimen",
"b_very_long_agent_name_experimen"]),
("dupes_no_crop",
["agent_1", "agent_1"],
["(1) agent_1", "(2) agent_1"]),
("dupes_cropped",
["very_long_agent_name_experimental_c123",
"very_long_agent_name_experimental_c456"],
["(1) very_long_agent_name_experim",
"(2) very_long_agent_name_experim"]),
)
def test(self, names, expected_output):
self.assertEqual(sc2_env.crop_and_deduplicate_names(names), expected_output)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/sc2_env.py | pysc2/env/sc2_env.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Starcraft II environment."""
# pylint: disable=g-complex-comprehension
import collections
import copy
import random
import time
from absl import logging
from pysc2 import maps
from pysc2 import run_configs
from pysc2.env import enums
from pysc2.env import environment
from pysc2.lib import actions as actions_lib
from pysc2.lib import features
from pysc2.lib import metrics
from pysc2.lib import portspicker
from pysc2.lib import renderer_human
from pysc2.lib import run_parallel
from pysc2.lib import stopwatch
from s2clientprotocol import sc2api_pb2 as sc_pb
sw = stopwatch.sw
possible_results = {
sc_pb.Victory: 1,
sc_pb.Defeat: -1,
sc_pb.Tie: 0,
sc_pb.Undecided: 0,
}
Race = enums.Race
Difficulty = enums.Difficulty
BotBuild = enums.BotBuild
# Re-export these names to make it easy to construct the environment.
ActionSpace = actions_lib.ActionSpace # pylint: disable=invalid-name
Dimensions = features.Dimensions # pylint: disable=invalid-name
AgentInterfaceFormat = features.AgentInterfaceFormat # pylint: disable=invalid-name
parse_agent_interface_format = features.parse_agent_interface_format
def to_list(arg):
return arg if isinstance(arg, list) else [arg]
def get_default(a, b):
return b if a is None else a
class Agent(collections.namedtuple("Agent", ["race", "name"])):
"""Define an Agent. It can have a single race or a list of races."""
def __new__(cls, race, name=None):
return super(Agent, cls).__new__(cls, to_list(race), name or "<unknown>")
class Bot(collections.namedtuple("Bot", ["race", "difficulty", "build"])):
"""Define a Bot. It can have a single or list of races or builds."""
def __new__(cls, race, difficulty, build=None):
return super(Bot, cls).__new__(
cls, to_list(race), difficulty, to_list(build or BotBuild.random))
_DelayedAction = collections.namedtuple(
"DelayedAction", ["game_loop", "action"])
REALTIME_GAME_LOOP_SECONDS = 1 / 22.4
MAX_STEP_COUNT = 524000 # The game fails above 2^19=524288 steps.
NUM_ACTION_DELAY_BUCKETS = 10
class SC2Env(environment.Base):
"""A Starcraft II environment.
The implementation details of the action and observation specs are in
lib/features.py
"""
def __init__(self,
*,
map_name=None,
battle_net_map=False,
players=None,
agent_interface_format=None,
discount=1.,
discount_zero_after_timeout=False,
visualize=False,
step_mul=None,
realtime=False,
save_replay_episodes=0,
replay_dir=None,
replay_prefix=None,
game_steps_per_episode=None,
score_index=None,
score_multiplier=None,
random_seed=None,
disable_fog=False,
ensure_available_actions=True,
version=None):
"""Create a SC2 Env.
You must pass a resolution that you want to play at. You can send either
feature layer resolution or rgb resolution or both. If you send both you
must also choose which to use as your action space. Regardless of which you
choose you must send both the screen and minimap resolutions.
For each of the 4 resolutions, either specify size or both width and
height. If you specify size then both width and height will take that value.
Args:
map_name: Name of a SC2 map. Run bin/map_list to get the full list of
known maps. Alternatively, pass a Map instance. Take a look at the
docs in maps/README.md for more information on available maps. Can
also be a list of map names or instances, in which case one will be
chosen at random per episode.
battle_net_map: Whether to use the battle.net versions of the map(s).
players: A list of Agent and Bot instances that specify who will play.
agent_interface_format: A sequence containing one AgentInterfaceFormat per
agent, matching the order of agents specified in the players list. Or
a single AgentInterfaceFormat to be used for all agents. Note that
InterfaceOptions may be supplied in place of AgentInterfaceFormat, in
which case no action or observation processing will be carried out by
PySC2. The sc_pb.ResponseObservation proto will be returned as the
observation for the agent and passed actions must be instances of
sc_pb.Action. This is intended for agents which use custom environment
conversion code.
discount: Returned as part of the observation.
discount_zero_after_timeout: If True, the discount will be zero
after the `game_steps_per_episode` timeout.
visualize: Whether to pop up a window showing the camera and feature
layers. This won't work without access to a window manager.
step_mul: How many game steps per agent step (action/observation). None
means use the map default.
realtime: Whether to use realtime mode. In this mode the game simulation
automatically advances (at 22.4 gameloops per second) rather than
being stepped manually. The number of game loops advanced with each
call to step() won't necessarily match the step_mul specified. The
environment will attempt to honour step_mul, returning observations
with that spacing as closely as possible. Game loops will be skipped
if they cannot be retrieved and processed quickly enough.
save_replay_episodes: Save a replay after this many episodes. Default of 0
means don't save replays.
replay_dir: Directory to save replays. Required with save_replay_episodes.
replay_prefix: An optional prefix to use when saving replays.
game_steps_per_episode: Game steps per episode, independent of the
step_mul. 0 means no limit. None means use the map default.
score_index: -1 means use the win/loss reward, >=0 is the index into the
score_cumulative with 0 being the curriculum score. None means use
the map default.
score_multiplier: How much to multiply the score by. Useful for negating.
random_seed: Random number seed to use when initializing the game. This
lets you run repeatable games/tests.
disable_fog: Whether to disable fog of war.
ensure_available_actions: Whether to throw an exception when an
unavailable action is passed to step().
version: The version of SC2 to use, defaults to the latest.
Raises:
ValueError: if no map is specified.
ValueError: if wrong number of players are requested for a map.
ValueError: if the resolutions aren't specified correctly.
"""
if not players:
raise ValueError("You must specify the list of players.")
for p in players:
if not isinstance(p, (Agent, Bot)):
raise ValueError(
"Expected players to be of type Agent or Bot. Got: %s." % p)
num_players = len(players)
self._num_agents = sum(1 for p in players if isinstance(p, Agent))
self._players = players
if not 1 <= num_players <= 2 or not self._num_agents:
raise ValueError("Only 1 or 2 players with at least one agent is "
"supported at the moment.")
if not map_name:
raise ValueError("Missing a map name.")
self._battle_net_map = battle_net_map
self._maps = [maps.get(name) for name in to_list(map_name)]
min_players = min(m.players for m in self._maps)
max_players = max(m.players for m in self._maps)
if self._battle_net_map:
for m in self._maps:
if not m.battle_net:
raise ValueError("%s isn't known on Battle.net" % m.name)
if max_players == 1:
if self._num_agents != 1:
raise ValueError("Single player maps require exactly one Agent.")
elif not 2 <= num_players <= min_players:
raise ValueError(
"Maps support 2 - %s players, but trying to join with %s" % (
min_players, num_players))
if save_replay_episodes and not replay_dir:
raise ValueError("Missing replay_dir")
self._realtime = realtime
self._last_step_time = None
self._save_replay_episodes = save_replay_episodes
self._replay_dir = replay_dir
self._replay_prefix = replay_prefix
self._random_seed = random_seed
self._disable_fog = disable_fog
self._ensure_available_actions = ensure_available_actions
self._discount = discount
self._discount_zero_after_timeout = discount_zero_after_timeout
self._default_step_mul = step_mul
self._default_score_index = score_index
self._default_score_multiplier = score_multiplier
self._default_episode_length = game_steps_per_episode
self._run_config = run_configs.get(version=version)
self._parallel = run_parallel.RunParallel() # Needed for multiplayer.
self._game_info = None
self._requested_races = None
if agent_interface_format is None:
raise ValueError("Please specify agent_interface_format.")
if isinstance(agent_interface_format,
(AgentInterfaceFormat, sc_pb.InterfaceOptions)):
agent_interface_format = [agent_interface_format] * self._num_agents
if len(agent_interface_format) != self._num_agents:
raise ValueError(
"The number of entries in agent_interface_format should "
"correspond 1-1 with the number of agents.")
self._action_delay_fns = [
aif.action_delay_fn if isinstance(aif, AgentInterfaceFormat) else None
for aif in agent_interface_format
]
self._interface_formats = agent_interface_format
self._interface_options = [
self._get_interface(interface_format, require_raw=visualize and i == 0)
for i, interface_format in enumerate(agent_interface_format)]
self._launch_game()
self._create_join()
self._finalize(visualize)
def _finalize(self, visualize):
self._delayed_actions = [collections.deque()
for _ in self._action_delay_fns]
if visualize:
self._renderer_human = renderer_human.RendererHuman()
self._renderer_human.init(
self._controllers[0].game_info(),
self._controllers[0].data())
else:
self._renderer_human = None
self._metrics = metrics.Metrics(self._map_name)
self._metrics.increment_instance()
self._last_score = None
self._total_steps = 0
self._episode_steps = 0
self._episode_count = 0
self._obs = [None] * self._num_agents
self._agent_obs = [None] * self._num_agents
self._state = environment.StepType.LAST # Want to jump to `reset`.
logging.info("Environment is ready")
@staticmethod
def _get_interface(interface_format, require_raw):
if isinstance(interface_format, sc_pb.InterfaceOptions):
if require_raw and not interface_format.raw:
interface_options = copy.deepcopy(interface_format)
interface_options.raw = True
return interface_options
else:
return interface_format
aif = interface_format
interface = sc_pb.InterfaceOptions(
raw=(aif.use_feature_units or
aif.use_unit_counts or
aif.use_raw_units or
require_raw),
show_cloaked=aif.show_cloaked,
show_burrowed_shadows=aif.show_burrowed_shadows,
show_placeholders=aif.show_placeholders,
raw_affects_selection=True,
raw_crop_to_playable_area=aif.raw_crop_to_playable_area,
score=True)
if aif.feature_dimensions:
interface.feature_layer.width = aif.camera_width_world_units
aif.feature_dimensions.screen.assign_to(
interface.feature_layer.resolution)
aif.feature_dimensions.minimap.assign_to(
interface.feature_layer.minimap_resolution)
interface.feature_layer.crop_to_playable_area = aif.crop_to_playable_area
interface.feature_layer.allow_cheating_layers = aif.allow_cheating_layers
if aif.rgb_dimensions:
aif.rgb_dimensions.screen.assign_to(interface.render.resolution)
aif.rgb_dimensions.minimap.assign_to(interface.render.minimap_resolution)
return interface
def _launch_game(self):
# Reserve a whole bunch of ports for the weird multiplayer implementation.
if self._num_agents > 1:
self._ports = portspicker.pick_unused_ports(self._num_agents * 2)
logging.info("Ports used for multiplayer: %s", self._ports)
else:
self._ports = []
# Actually launch the game processes.
self._sc2_procs = [
self._run_config.start(want_rgb=interface.HasField("render"))
# self._run_config.start(extra_ports=self._ports,
# want_rgb=interface.HasField("render"))
for interface in self._interface_options]
self._controllers = [p.controller for p in self._sc2_procs]
if self._battle_net_map:
available_maps = self._controllers[0].available_maps()
available_maps = set(available_maps.battlenet_map_names)
unavailable = [m.name for m in self._maps
if m.battle_net not in available_maps]
if unavailable:
raise ValueError("Requested map(s) not in the battle.net cache: %s"
% ",".join(unavailable))
def _create_join(self):
"""Create the game, and join it."""
map_inst = random.choice(self._maps)
self._map_name = map_inst.name
self._step_mul = max(1, self._default_step_mul or map_inst.step_mul)
self._score_index = get_default(self._default_score_index,
map_inst.score_index)
self._score_multiplier = get_default(self._default_score_multiplier,
map_inst.score_multiplier)
self._episode_length = get_default(self._default_episode_length,
map_inst.game_steps_per_episode)
if self._episode_length <= 0 or self._episode_length > MAX_STEP_COUNT:
self._episode_length = MAX_STEP_COUNT
# Create the game. Set the first instance as the host.
create = sc_pb.RequestCreateGame(
disable_fog=self._disable_fog,
realtime=self._realtime)
if self._battle_net_map:
create.battlenet_map_name = map_inst.battle_net
else:
create.local_map.map_path = map_inst.path
map_data = map_inst.data(self._run_config)
if self._num_agents == 1:
create.local_map.map_data = map_data
else:
# Save the maps so they can access it. Don't do it in parallel since SC2
# doesn't respect tmpdir on windows, which leads to a race condition:
# https://github.com/Blizzard/s2client-proto/issues/102
for c in self._controllers:
c.save_map(map_inst.path, map_data)
if self._random_seed is not None:
create.random_seed = self._random_seed
for p in self._players:
if isinstance(p, Agent):
create.player_setup.add(type=sc_pb.Participant)
else:
create.player_setup.add(
type=sc_pb.Computer, race=random.choice(p.race),
difficulty=p.difficulty, ai_build=random.choice(p.build))
self._controllers[0].create_game(create)
# Create the join requests.
agent_players = [p for p in self._players if isinstance(p, Agent)]
sanitized_names = crop_and_deduplicate_names(p.name for p in agent_players)
join_reqs = []
for p, name, interface in zip(agent_players, sanitized_names,
self._interface_options):
join = sc_pb.RequestJoinGame(options=interface)
join.race = random.choice(p.race)
join.player_name = name
if self._ports:
join.shared_port = 0 # unused
join.server_ports.game_port = self._ports[0]
join.server_ports.base_port = self._ports[1]
for i in range(self._num_agents - 1):
join.client_ports.add(game_port=self._ports[i * 2 + 2],
base_port=self._ports[i * 2 + 3])
join_reqs.append(join)
# Join the game. This must be run in parallel because Join is a blocking
# call to the game that waits until all clients have joined.
self._parallel.run((c.join_game, join)
for c, join in zip(self._controllers, join_reqs))
self._game_info = self._parallel.run(c.game_info for c in self._controllers)
for g, interface in zip(self._game_info, self._interface_options):
if g.options.render != interface.render:
logging.warning(
"Actual interface options don't match requested options:\n"
"Requested:\n%s\n\nActual:\n%s", interface, g.options)
self._features = [
features.features_from_game_info(
game_info=g, agent_interface_format=aif, map_name=self._map_name)
for g, aif in zip(self._game_info, self._interface_formats)]
self._requested_races = {
info.player_id: info.race_requested
for info in self._game_info[0].player_info
if info.type != sc_pb.Observer
}
@property
def map_name(self):
return self._map_name
@property
def game_info(self):
"""A list of ResponseGameInfo, one per agent."""
return self._game_info
def static_data(self):
return self._controllers[0].data()
def observation_spec(self):
"""Look at Features for full specs."""
return tuple(f.observation_spec() for f in self._features)
def action_spec(self):
"""Look at Features for full specs."""
return tuple(f.action_spec() for f in self._features)
def action_delays(self):
"""In realtime we track the delay observation -> action executed.
Returns:
A list per agent of action delays, where action delays are a list where
the index in the list corresponds to the delay in game loops, the value
at that index the count over the course of an episode.
Raises:
ValueError: If called when not in realtime mode.
"""
if not self._realtime:
raise ValueError("This method is only supported in realtime mode")
return self._action_delays
def _restart(self):
if (len(self._players) == 1 and len(self._players[0].race) == 1 and
len(self._maps) == 1):
# Need to support restart for fast-restart of mini-games.
self._controllers[0].restart()
else:
if len(self._controllers) > 1:
self._parallel.run(c.leave for c in self._controllers)
self._create_join()
@sw.decorate
def reset(self):
"""Start a new episode."""
self._episode_steps = 0
if self._episode_count:
# No need to restart for the first episode.
self._restart()
self._episode_count += 1
races = [Race(r).name for _, r in sorted(self._requested_races.items())]
logging.info("Starting episode %s: [%s] on %s",
self._episode_count, ", ".join(races), self._map_name)
self._metrics.increment_episode()
self._last_score = [0] * self._num_agents
self._state = environment.StepType.FIRST
if self._realtime:
self._last_step_time = time.time()
self._last_obs_game_loop = None
self._action_delays = [[0] * NUM_ACTION_DELAY_BUCKETS] * self._num_agents
return self._observe(target_game_loop=0)
@sw.decorate("step_env")
def step(self, actions, step_mul=None):
"""Apply actions, step the world forward, and return observations.
Args:
actions: A list of actions meeting the action spec, one per agent, or a
list per agent. Using a list allows multiple actions per frame, but
will still check that they're valid, so disabling
ensure_available_actions is encouraged.
step_mul: If specified, use this rather than the environment's default.
Returns:
A tuple of TimeStep namedtuples, one per agent.
"""
if self._state == environment.StepType.LAST:
return self.reset()
skip = not self._ensure_available_actions
actions = [[f.transform_action(o.observation, a, skip_available=skip)
for a in to_list(acts)]
for f, o, acts in zip(self._features, self._obs, actions)]
if not self._realtime:
actions = self._apply_action_delays(actions)
self._parallel.run((c.actions, sc_pb.RequestAction(actions=a))
for c, a in zip(self._controllers, actions))
self._state = environment.StepType.MID
return self._step(step_mul)
def _step(self, step_mul=None):
step_mul = step_mul or self._step_mul
if step_mul <= 0:
raise ValueError("step_mul should be positive, got {}".format(step_mul))
target_game_loop = self._episode_steps + step_mul
if not self._realtime:
# Send any delayed actions that were scheduled up to the target game loop.
current_game_loop = self._send_delayed_actions(
up_to_game_loop=target_game_loop,
current_game_loop=self._episode_steps)
self._step_to(game_loop=target_game_loop,
current_game_loop=current_game_loop)
return self._observe(target_game_loop=target_game_loop)
def _apply_action_delays(self, actions):
"""Apply action delays to the requested actions, if configured to."""
assert not self._realtime
actions_now = []
for actions_for_player, delay_fn, delayed_actions in zip(
actions, self._action_delay_fns, self._delayed_actions):
actions_now_for_player = []
for action in actions_for_player:
delay = delay_fn() if delay_fn else 1
if delay > 1 and action.ListFields(): # Skip no-ops.
game_loop = self._episode_steps + delay - 1
# Randomized delays mean that 2 delay actions can be reversed.
# Make sure that doesn't happen.
if delayed_actions:
game_loop = max(game_loop, delayed_actions[-1].game_loop)
# Don't send an action this frame.
delayed_actions.append(_DelayedAction(game_loop, action))
else:
actions_now_for_player.append(action)
actions_now.append(actions_now_for_player)
return actions_now
def _send_delayed_actions(self, up_to_game_loop, current_game_loop):
"""Send any delayed actions scheduled for up to the specified game loop."""
assert not self._realtime
while True:
if not any(self._delayed_actions): # No queued actions
return current_game_loop
act_game_loop = min(d[0].game_loop for d in self._delayed_actions if d)
if act_game_loop > up_to_game_loop:
return current_game_loop
self._step_to(act_game_loop, current_game_loop)
current_game_loop = act_game_loop
if self._controllers[0].status_ended:
# We haven't observed and may have hit game end.
return current_game_loop
actions = []
for d in self._delayed_actions:
if d and d[0].game_loop == current_game_loop:
delayed_action = d.popleft()
actions.append(delayed_action.action)
else:
actions.append(None)
self._parallel.run((c.act, a) for c, a in zip(self._controllers, actions))
def _step_to(self, game_loop, current_game_loop):
step_mul = game_loop - current_game_loop
if step_mul < 0:
raise ValueError("We should never need to step backwards")
if step_mul > 0:
with self._metrics.measure_step_time(step_mul):
if not self._controllers[0].status_ended: # May already have ended.
self._parallel.run((c.step, step_mul) for c in self._controllers)
def _get_observations(self, target_game_loop):
# Transform in the thread so it runs while waiting for other observations.
def parallel_observe(c, f):
obs = c.observe(target_game_loop=target_game_loop)
agent_obs = f.transform_obs(obs)
return obs, agent_obs
with self._metrics.measure_observation_time():
self._obs, self._agent_obs = zip(*self._parallel.run(
(parallel_observe, c, f)
for c, f in zip(self._controllers, self._features)))
game_loop = _get_game_loop(self._agent_obs[0])
if (game_loop < target_game_loop and
not any(o.player_result for o in self._obs)):
raise ValueError(
("The game didn't advance to the expected game loop. "
"Expected: %s, got: %s") % (target_game_loop, game_loop))
elif game_loop > target_game_loop and target_game_loop > 0:
logging.warning(
"Received observation %d step(s) late: %d rather than %d.",
game_loop - target_game_loop, game_loop, target_game_loop)
if self._realtime:
# Track delays on executed actions.
# Note that this will underestimate e.g. action sent, new observation
# taken before action executes, action executes, observation taken
# with action. This is difficult to avoid without changing the SC2
# binary - e.g. send the observation game loop with each action,
# return them in the observation action proto.
if self._last_obs_game_loop is not None:
for i, obs in enumerate(self._obs):
for action in obs.actions:
if action.HasField("game_loop"):
delay = action.game_loop - self._last_obs_game_loop
if delay > 0:
num_slots = len(self._action_delays[i])
delay = min(delay, num_slots - 1) # Cap to num buckets.
self._action_delays[i][delay] += 1
break
self._last_obs_game_loop = game_loop
def _observe(self, target_game_loop):
self._get_observations(target_game_loop)
# TODO(tewalds): How should we handle more than 2 agents and the case where
# the episode can end early for some agents?
outcome = [0] * self._num_agents
discount = self._discount
episode_complete = any(o.player_result for o in self._obs)
if episode_complete:
self._state = environment.StepType.LAST
discount = 0
for i, o in enumerate(self._obs):
player_id = o.observation.player_common.player_id
for result in o.player_result:
if result.player_id == player_id:
outcome[i] = possible_results.get(result.result, 0)
if self._score_index >= 0: # Game score, not win/loss reward.
cur_score = [_get_score(o, self._score_index) for o in self._agent_obs]
if self._episode_steps == 0: # First reward is always 0.
reward = [0] * self._num_agents
else:
reward = [cur - last for cur, last in zip(cur_score, self._last_score)]
self._last_score = cur_score
else:
reward = outcome
if self._renderer_human:
self._renderer_human.render(self._obs[0])
cmd = self._renderer_human.get_actions(
self._run_config, self._controllers[0])
if cmd == renderer_human.ActionCmd.STEP:
pass
elif cmd == renderer_human.ActionCmd.RESTART:
self._state = environment.StepType.LAST
elif cmd == renderer_human.ActionCmd.QUIT:
raise KeyboardInterrupt("Quit?")
game_loop = _get_game_loop(self._agent_obs[0])
self._total_steps += game_loop - self._episode_steps
self._episode_steps = game_loop
if self._episode_steps >= self._episode_length:
self._state = environment.StepType.LAST
if self._discount_zero_after_timeout:
discount = 0.0
if self._episode_steps >= MAX_STEP_COUNT:
logging.info("Cut short to avoid SC2's max step count of 2^19=524288.")
if self._state == environment.StepType.LAST:
if (self._save_replay_episodes > 0 and
self._episode_count % self._save_replay_episodes == 0):
self.save_replay(self._replay_dir, self._replay_prefix)
logging.info(("Episode %s finished after %s game steps. "
"Outcome: %s, reward: %s, score: %s"),
self._episode_count, self._episode_steps, outcome, reward,
[_get_score(o) for o in self._agent_obs])
def zero_on_first_step(value):
return 0.0 if self._state == environment.StepType.FIRST else value
return tuple(environment.TimeStep(
step_type=self._state,
reward=zero_on_first_step(r * self._score_multiplier),
discount=zero_on_first_step(discount),
observation=o) for r, o in zip(reward, self._agent_obs))
def send_chat_messages(self, messages, broadcast=True):
"""Useful for logging messages into the replay."""
self._parallel.run(
(c.chat,
message,
sc_pb.ActionChat.Broadcast if broadcast else sc_pb.ActionChat.Team)
for c, message in zip(self._controllers, messages))
def save_replay(self, replay_dir, prefix=None):
if prefix is None:
prefix = self._map_name
replay_path = self._run_config.save_replay(
self._controllers[0].save_replay(), replay_dir, prefix)
logging.info("Wrote replay to: %s", replay_path)
return replay_path
def close(self):
logging.info("Environment Close")
if hasattr(self, "_metrics") and self._metrics:
self._metrics.close()
self._metrics = None
if hasattr(self, "_renderer_human") and self._renderer_human:
self._renderer_human.close()
self._renderer_human = None
# Don't use parallel since it might be broken by an exception.
if hasattr(self, "_controllers") and self._controllers:
for c in self._controllers:
c.quit()
self._controllers = None
if hasattr(self, "_sc2_procs") and self._sc2_procs:
for p in self._sc2_procs:
p.close()
self._sc2_procs = None
if hasattr(self, "_ports") and self._ports:
portspicker.return_ports(self._ports)
self._ports = None
if hasattr(self, "_parallel") and self._parallel is not None:
self._parallel.shutdown()
self._parallel = None
self._game_info = None
def crop_and_deduplicate_names(names):
"""Crops and de-duplicates the passed names.
SC2 gets confused in a multi-agent game when agents have the same
name. We check for name duplication to avoid this, but - SC2 also
crops player names to a hard character limit, which can again lead
to duplicate names. To avoid this we unique-ify names if they are
equivalent after cropping. Ideally SC2 would handle duplicate names,
making this unnecessary.
TODO(b/121092563): Fix this in the SC2 binary.
Args:
names: List of names.
Returns:
De-duplicated names cropped to 32 characters.
"""
max_name_length = 32
# Crop.
cropped = [n[:max_name_length] for n in names]
# De-duplicate.
deduplicated = []
name_counts = collections.Counter(n for n in cropped)
name_index = collections.defaultdict(lambda: 1)
for n in cropped:
if name_counts[n] == 1:
deduplicated.append(n)
else:
deduplicated.append("({}) {}".format(name_index[n], n))
name_index[n] += 1
# Crop again.
recropped = [n[:max_name_length] for n in deduplicated]
if len(set(recropped)) != len(recropped):
raise ValueError("Failed to de-duplicate names")
return recropped
def _get_game_loop(agent_obs):
if isinstance(agent_obs, sc_pb.ResponseObservation):
return agent_obs.observation.game_loop
else:
return agent_obs.game_loop[0]
def _get_score(agent_obs, score_index=0):
if isinstance(agent_obs, sc_pb.ResponseObservation):
if score_index != 0:
raise ValueError(
"Non-zero score index isn't supported for passthrough agents, "
"currently")
return agent_obs.observation.score.score
else:
return agent_obs["score_cumulative"][score_index]
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/base_env_wrapper.py | pysc2/env/base_env_wrapper.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base env wrapper so we don't need to override everything every time."""
from pysc2.env import environment
class BaseEnvWrapper(environment.Base):
"""A base env wrapper so we don't need to override everything every time."""
def __init__(self, env):
self._env = env
def close(self, *args, **kwargs):
return self._env.close(*args, **kwargs)
def action_spec(self, *args, **kwargs):
return self._env.action_spec(*args, **kwargs)
def observation_spec(self, *args, **kwargs):
return self._env.observation_spec(*args, **kwargs)
def reset(self, *args, **kwargs):
return self._env.reset(*args, **kwargs)
def step(self, *args, **kwargs):
return self._env.step(*args, **kwargs)
def save_replay(self, *args, **kwargs):
return self._env.save_replay(*args, **kwargs)
@property
def state(self):
return self._env.state
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/available_actions_printer.py | pysc2/env/available_actions_printer.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An env wrapper to print the available actions."""
from pysc2.env import base_env_wrapper
class AvailableActionsPrinter(base_env_wrapper.BaseEnvWrapper):
"""An env wrapper to print the available actions."""
def __init__(self, env):
super(AvailableActionsPrinter, self).__init__(env)
self._seen = set()
self._action_spec = self.action_spec()[0]
def step(self, *args, **kwargs):
all_obs = super(AvailableActionsPrinter, self).step(*args, **kwargs)
for obs in all_obs:
for avail in obs.observation["available_actions"]:
if avail not in self._seen:
self._seen.add(avail)
self._print(self._action_spec.functions[avail].str(True))
return all_obs
def _print(self, s):
print(s)
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/remote_sc2_env.py | pysc2/env/remote_sc2_env.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Starcraft II environment for playing using remote SC2 instances."""
from typing import Sequence
from absl import logging
from pysc2 import maps
from pysc2 import run_configs
from pysc2.env import sc2_env
from pysc2.lib import features
from pysc2.lib import remote_controller
from pysc2.lib import run_parallel
from s2clientprotocol import sc2api_pb2 as sc_pb
class RestartError(Exception):
pass
class RemoteSC2Env(sc2_env.SC2Env):
"""A Remote Starcraft II environment for playing vs other agents or humans.
Unlike SC2Env, this doesn't actually start any instances and only connects
to a remote instance.
This assumes a 2 player game, and works best with play_vs_agent.py.
"""
def __init__(self,
*,
map_name=None,
save_map=True,
host="127.0.0.1",
host_port=None,
lan_port=None,
race=None,
name="<unknown>",
agent_interface_format=None,
discount=1.,
visualize=False,
step_mul=None,
realtime=False,
replay_dir=None,
replay_prefix=None):
"""Create a SC2 Env that connects to a remote instance of the game.
This assumes that the game is already up and running, and that it only
needs to join the game - and leave once the game has ended. You need some
other script to launch the SC2 process and call RequestCreateGame. Note
that you must call close to leave the game when finished. Not doing so
will lead to issues when attempting to create another game on the same
SC2 process.
This class assumes that the game is multiplayer. LAN ports may be
specified either as a base port (from which the others will be implied),
or as an explicit list.
You must specify an agent_interface_format. See the `AgentInterfaceFormat`
documentation for further detail.
Args:
map_name: Name of a SC2 map. Run bin/map_list to get the full list of
known maps. Alternatively, pass a Map instance. Take a look at the
docs in maps/README.md for more information on available maps.
save_map: Whether to save map data before joining the game.
host: Host where the SC2 process we're connecting to is running.
host_port: The WebSocket port for the SC2 process we're connecting to.
lan_port: Either an explicit sequence of LAN ports corresponding to
[server game port, ...base port, client game port, ...base port],
or an int specifying base port - equivalent to specifying the
sequence [lan_port, lan_port+1, lan_port+2, lan_port+3].
race: Race for this agent.
name: The name of this agent, for saving in the replay.
agent_interface_format: AgentInterfaceFormat object describing the
format of communication between the agent and the environment, else
just InterfaceOptions to use passthrough.
discount: Returned as part of the observation.
visualize: Whether to pop up a window showing the camera and feature
layers. This won't work without access to a window manager.
step_mul: How many game steps per agent step (action/observation). None
means use the map default.
realtime: Whether to use realtime mode. In this mode the game simulation
automatically advances (at 22.4 gameloops per second) rather than
being stepped manually. The number of game loops advanced with each
call to step() won't necessarily match the step_mul specified. The
environment will attempt to honour step_mul, returning observations
with that spacing as closely as possible. Game loops will be skipped
if they cannot be retrieved and processed quickly enough.
replay_dir: Directory to save a replay.
replay_prefix: An optional prefix to use when saving replays.
Raises:
ValueError: if the race is invalid.
ValueError: if the resolutions aren't specified correctly.
ValueError: if lan_port is a sequence but its length != 4.
"""
if agent_interface_format is None:
raise ValueError("Please specify agent_interface_format.")
if not race:
race = sc2_env.Race.random
map_inst = map_name and maps.get(map_name)
self._map_name = map_name
self._game_info = None
self._num_agents = 1
self._discount = discount
self._step_mul = step_mul or (map_inst.step_mul if map_inst else 8)
self._realtime = realtime
self._last_step_time = None
self._save_replay_episodes = 1 if replay_dir else 0
self._replay_dir = replay_dir
self._replay_prefix = replay_prefix
self._score_index = -1 # Win/loss only.
self._score_multiplier = 1
self._episode_length = sc2_env.MAX_STEP_COUNT
self._ensure_available_actions = False
self._discount_zero_after_timeout = False
self._run_config = run_configs.get()
self._parallel = run_parallel.RunParallel() # Needed for multiplayer.
self._in_game = False
self._action_delay_fns = [None]
interface = self._get_interface(
agent_interface_format=agent_interface_format, require_raw=visualize)
if isinstance(lan_port, Sequence):
if len(lan_port) != 4:
raise ValueError("lan_port sequence must be of length 4")
ports = lan_port[:]
else:
ports = [lan_port + p for p in range(4)] # 2 * num players *in the game*.
self._connect_remote(
host, host_port, ports, race, name, map_inst, save_map, interface,
agent_interface_format)
self._finalize(visualize)
def close(self):
# Leave the game so that another may be created in the same SC2 process.
if self._in_game:
logging.info("Leaving game.")
self._controllers[0].leave()
self._in_game = False
logging.info("Left game.")
self._controllers[0].close()
if hasattr(self, "_parallel") and self._parallel is not None:
self._parallel.shutdown()
self._parallel = None
# We don't own the SC2 process, we shouldn't call quit in the super class.
self._controllers = None
self._game_info = None
super(RemoteSC2Env, self).close()
def _connect_remote(self, host, host_port, lan_ports, race, name, map_inst,
save_map, interface, agent_interface_format):
"""Make sure this stays synced with bin/agent_remote.py."""
# Connect!
logging.info("Connecting...")
self._controllers = [remote_controller.RemoteController(host, host_port)]
logging.info("Connected")
if map_inst and save_map:
run_config = run_configs.get()
self._controllers[0].save_map(map_inst.path, map_inst.data(run_config))
# Create the join request.
join = sc_pb.RequestJoinGame(options=interface)
join.race = race
join.player_name = name
join.shared_port = 0 # unused
join.server_ports.game_port = lan_ports.pop(0)
join.server_ports.base_port = lan_ports.pop(0)
join.client_ports.add(
game_port=lan_ports.pop(0), base_port=lan_ports.pop(0))
logging.info("Joining game.")
self._controllers[0].join_game(join)
self._game_info = [self._controllers[0].game_info()]
if not self._map_name:
self._map_name = self._game_info[0].map_name
self._features = [features.features_from_game_info(
game_info=self._game_info[0],
agent_interface_format=agent_interface_format)]
self._in_game = True
logging.info("Game joined.")
def _restart(self):
# Can't restart since it's not clear how you'd coordinate that with the
# other players.
raise RestartError("Can't restart")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/host_remote_agent.py | pysc2/env/host_remote_agent.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates SC2 processes and games for remote agents to connect into."""
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import portspicker
from pysc2.lib import protocol
from pysc2.lib import remote_controller
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
class VsAgent(object):
"""Host a remote agent vs remote agent game.
Starts two SC2 processes, one for each of two remote agents to connect to.
Call create_game, then have the agents connect to their respective port in
host_ports, specifying lan_ports in the join game request.
Agents should leave the game once it has finished, then another game can
be created. Note that failure of either agent to leave prior to creating
the next game will lead to SC2 crashing.
Best used as a context manager for simple and timely resource release.
**NOTE THAT** currently re-connecting to the same SC2 process is flaky.
If you experience difficulties the workaround is to only create one game
per instantiation of VsAgent.
"""
def __init__(self):
self._num_agents = 2
self._run_config = run_configs.get()
self._processes = []
self._controllers = []
self._saved_maps = set()
# Reserve LAN ports.
self._lan_ports = portspicker.pick_unused_ports(self._num_agents * 2)
# Start SC2 processes.
for _ in range(self._num_agents):
process = self._run_config.start(extra_ports=self._lan_ports)
self._processes.append(process)
self._controllers.append(process.controller)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def __del__(self):
self.close()
def create_game(self, map_name):
"""Create a game for the agents to join.
Args:
map_name: The map to use.
"""
self._reconnect()
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if map_name not in self._saved_maps:
for controller in self._controllers:
controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
# Form the create game message.
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_inst.path),
disable_fog=False)
# Set up for two agents.
for _ in range(self._num_agents):
create.player_setup.add(type=sc_pb.Participant)
# Create the game.
self._controllers[0].create_game(create)
self._disconnect()
def _disconnect(self):
for c in self._controllers:
c.close()
self._controllers = []
def _reconnect(self, **kwargs):
if not self._controllers:
self._controllers = [
remote_controller.RemoteController(p.host, p.port, p, **kwargs)
for p in self._processes]
def save_replay(self, replay_dir, replay_name):
self._reconnect()
return self._run_config.save_replay(
self._controllers[0].save_replay(), replay_dir, replay_name)
@property
def hosts(self):
"""The hosts that the remote agents should connect to."""
return [process.host for process in self._processes]
@property
def host_ports(self):
"""The WebSocket ports that the remote agents should connect to."""
return [process.port for process in self._processes]
@property
def lan_ports(self):
"""The LAN ports which the remote agents should specify when joining."""
return self._lan_ports
def close(self):
"""Shutdown and free all resources."""
try:
self._reconnect(timeout_seconds=1)
for controller in self._controllers:
controller.quit()
except (remote_controller.ConnectError, protocol.ConnectionError):
pass
self._controllers = []
for process in self._processes:
process.close()
self._processes = []
portspicker.return_ports(self._lan_ports)
self._lan_ports = []
class VsBot(object):
"""Host a remote agent vs bot game.
Starts a single SC2 process. Call create_game, then have the agent connect
to host_port.
The agent should leave the game once it has finished, then another game can
be created. Note that failure of the agent to leave prior to creating
the next game will lead to SC2 crashing.
Best used as a context manager for simple and timely resource release.
**NOTE THAT** currently re-connecting to the same SC2 process is flaky.
If you experience difficulties the workaround is to only create one game
per instantiation of VsBot.
"""
def __init__(self):
# Start the SC2 process.
self._run_config = run_configs.get()
self._process = self._run_config.start()
self._controller = self._process.controller
self._saved_maps = set()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def __del__(self):
self.close()
def create_game(
self,
map_name,
bot_difficulty=sc_pb.VeryEasy,
bot_race=sc_common.Random,
bot_first=False):
"""Create a game, one remote agent vs the specified bot.
Args:
map_name: The map to use.
bot_difficulty: The difficulty of the bot to play against.
bot_race: The race for the bot.
bot_first: Whether the bot should be player 1 (else is player 2).
"""
self._reconnect()
self._controller.ping()
# Form the create game message.
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if map_name not in self._saved_maps:
self._controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data),
disable_fog=False)
# Set up for one bot, one agent.
if not bot_first:
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty)
if bot_first:
create.player_setup.add(type=sc_pb.Participant)
# Create the game.
self._controller.create_game(create)
self._disconnect()
def _disconnect(self):
self._controller.close()
self._controller = None
def _reconnect(self, **kwargs):
if not self._controller:
self._controller = remote_controller.RemoteController(
self._process.host, self._process.port, self._process, **kwargs)
def save_replay(self, replay_dir, replay_name):
self._reconnect()
return self._run_config.save_replay(
self._controller.save_replay(), replay_dir, replay_name)
@property
def host(self):
"""The host that the remote agent should connect to."""
return self._process.host
@property
def host_port(self):
"""The WebSocket port that the remote agent should connect to."""
return self._process.port
def close(self):
"""Shutdown and free all resources."""
if hasattr(self, "_process") and self._process is not None:
try:
self._reconnect(timeout_seconds=1)
self._controller.quit()
except (remote_controller.ConnectError, protocol.ConnectionError):
pass
self._controller = None
self._process.close()
self._process = None
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converted_env.py | pysc2/env/converted_env.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment which uses converters to transform an underlying environment."""
import functools
import threading
from typing import Any, Mapping, NamedTuple, Sequence
import dm_env
import numpy as np
from pysc2.env.converter import converter as converter_lib
from pysc2.env.converter.proto import converter_pb2
from pysc2.lib import actions as sc2_actions
import tree
import typing_extensions
from s2clientprotocol import common_pb2
from s2clientprotocol import raw_pb2
from s2clientprotocol import sc2api_pb2
_BARRIER_TIMEOUT = 30.0
def squeeze_if_necessary(x: np.ndarray) -> np.ndarray:
"""Remove the trailing 1 of inputs."""
if x.shape and x.shape[-1] == 1:
return np.squeeze(x, axis=-1)
else:
return x
def squeeze_spec_if_necessary(x):
"""Remove the trailing 1 of specs inputs."""
if x.shape and x.shape[-1] == 1:
return x.replace(shape=x.shape[:-1])
else:
return x
class ConverterFactory(typing_extensions.Protocol):
def __call__(self, game_info: sc2api_pb2.ResponseGameInfo) -> Any:
"""Returns an environment converter given a game info."""
class ConvertedEnvironment(dm_env.Environment):
"""Env which uses converters to transform an underlying environment.
Note that this is a multiplayer environment. The returned timesteps contain
lists for their reward and observation fields, with entries in those lists
corresponding to the players in the game. A list of actions must be passed
to step - an action per player. Note, however, that this is expected to
be None where the player isn't expected to act on the current game loop
because their previous action delay hasn't expired yet.
If you would prefer to access the environment in a singleplayer fashion,
see `make_streams`, below.
"""
def __init__(self,
env,
converter_factories: Sequence[ConverterFactory],
allow_out_of_turn_actions=False):
"""Initializes the environment.
Args:
env: The underlying environment which is being converted.
converter_factories: One for each agent player in the game.
allow_out_of_turn_actions: Whether to allow agents to act when it's not
their turns. Used for testing.
"""
self._env = env
self._num_players = len(converter_factories)
self._converter_factories = converter_factories
self._initialized = False
converters = [f(_dummy_game_info()) for f in converter_factories]
self._action_specs = [c.action_spec() for c in converters]
self._obs_specs = [c.observation_spec() for c in converters]
self._target_game_loops = [0] * self._num_players
self._converters = [None] * self._num_players
self._game_loop = None
self._allow_out_of_turn_actions = allow_out_of_turn_actions
def reset(self) -> dm_env.TimeStep:
"""Resets the environment."""
self._initialized = True
self._game_loop = 0
self._target_game_loops = [0] * self._num_players
self._converters = [
f(g) for f, g in zip(self._converter_factories, self._env.game_info)
]
return self._convert_timesteps(self._env.reset())
def step(self, actions) -> dm_env.TimeStep:
"""Steps the environment."""
if not self._initialized:
return self.reset()
converted_actions = []
for i, (action, converter) in enumerate(zip(actions, self._converters)):
if action is None:
if self._target_game_loops[i] <= self._game_loop:
raise RuntimeError('No action specified when its your turn.')
converted_actions.append(sc2_actions.FUNCTIONS.no_op())
else:
if (self._target_game_loops[i] > self._game_loop and
not self._allow_out_of_turn_actions):
raise RuntimeError('Can\'t act when not your turn.')
action_with_delay = converter.convert_action(action)
self._target_game_loops[i] = self._game_loop + action_with_delay.delay
num_actions = len(action_with_delay.request_action.actions)
if not num_actions:
converted_actions.append(sc2api_pb2.Action())
else:
converted_actions.append(
[action_with_delay.request_action.actions[0]] * num_actions)
min_delay = min(g for g in self._target_game_loops) - self._game_loop
timestep = self._convert_timesteps(
self._env.step(converted_actions, min_delay))
self._game_loop = max(int(obs['game_loop']) for obs in timestep.observation)
if timestep.last():
self._initialized = False
self._target_game_loops = [0] * len(self._target_game_loops)
return timestep
def observation_spec(self):
return tree.map_structure(squeeze_spec_if_necessary, self._obs_specs)
def action_spec(self):
return self._action_specs
def close(self):
self._env.close()
self._env = None
def send_chat_messages(self, messages: Sequence[str], broadcast: bool = True):
fn = getattr(self._env, 'send_chat_messages', None)
if fn:
# Make sure that chat messages are less than 255 characters
messages = [x[:254] for x in messages]
fn(messages, broadcast)
def save_replay(self, replay_dir, prefix=None):
return self._env.save_replay(replay_dir, prefix)
def action_delays(self):
return self._env.action_delays()
def num_players(self):
return self._num_players
def is_player_turn(self):
return [t <= self._game_loop for t in self._target_game_loops]
def _convert_timesteps(self, timesteps):
def _convert_obs(obs, converter):
if not isinstance(obs, sc2api_pb2.ResponseObservation):
obs = obs['_response_observation']()
env_obs = converter_pb2.Observation(player=obs)
env_obs = converter.convert_observation(observation=env_obs)
return tree.map_structure(squeeze_if_necessary, env_obs)
# Merge the timesteps from a sequence to a single timestep
return dm_env.TimeStep(
step_type=dm_env.StepType(timesteps[0].step_type),
reward=[timestep.reward for timestep in timesteps],
discount=timesteps[0].discount,
observation=[
_convert_obs(ts.observation, t)
for ts, t in zip(timesteps, self._converters)
])
class _Stream(dm_env.Environment):
"""A stream for a single player interacting with a multiplayer environment."""
def __init__(self, player: int, environment: '_StreamedEnvironment'):
self._player = player
self._environment = environment
def reset(self) -> dm_env.TimeStep:
return self._environment.reset(self._player)
def step(self, action) -> dm_env.TimeStep:
return self._environment.step(action, self._player)
def action_spec(self):
return self._environment.action_spec(self._player)
def observation_spec(self):
return self._environment.observation_spec(self._player)
def close(self):
self._environment.close(self._player)
def save_replay(self, replay_dir, prefix=None):
return self._environment.save_replay(replay_dir, prefix)
class _StreamedEnvironment:
"""Env presenting ConvertedEnvironment as multiple single player streams."""
def __init__(self, underlying_env: ConvertedEnvironment):
if not 1 <= underlying_env.num_players() <= 2:
raise ValueError(
f'Unsupported number of players: {underlying_env.num_players()}')
self._underlying_env = underlying_env
self._num_players = underlying_env.num_players()
self._barrier = threading.Barrier(parties=2)
self._lock = threading.Lock()
self._timestep = None
self._actions = [None] * self._num_players
self._closed = [False] * self._num_players
self._closed_lock = threading.Lock()
def reset(self, player: int) -> dm_env.TimeStep:
"""Resets the underlying environment, syncing players."""
self._wait_for_other_player()
if player == 0:
self._timestep = self._underlying_env.reset()
self._wait_for_other_player()
return self._player_timestep(player)
def step(self, action, player: int) -> dm_env.TimeStep:
"""Steps the underlying environment, syncing players."""
self._actions[player] = action
while True:
self._wait_for_other_player()
if player == 0:
self._timestep = self._underlying_env.step(self._actions)
self._actions = [None] * self._num_players
self._wait_for_other_player()
if self._underlying_env.is_player_turn()[player]:
break
return self._player_timestep(player)
def action_spec(self, player: int):
return self._underlying_env.action_spec()[player]
def observation_spec(self, player: int):
return self._underlying_env.observation_spec()[player]
def close(self, player: int):
with self._closed_lock:
self._closed[player] = True
if all(self._closed):
self._underlying_env.close()
def save_replay(self, replay_dir, prefix=None):
with self._lock:
return self._underlying_env.save_replay(replay_dir, prefix)
def _wait_for_other_player(self):
"""Waits for the other player (if there is one) to reach this point."""
if self._num_players == 1:
return
try:
self._barrier.wait(_BARRIER_TIMEOUT)
except threading.BrokenBarrierError:
raise TimeoutError('Timed out waiting for other player')
def _player_timestep(self, player: int):
first_step = self._timestep.step_type is dm_env.StepType.FIRST
return dm_env.TimeStep(
step_type=self._timestep.step_type,
reward=float(self._timestep.reward[player]) if not first_step else None,
discount=self._timestep.discount if not first_step else None,
observation=self._timestep.observation[player])
def make_streams(converted_environment: ConvertedEnvironment):
"""Makes single player environment streams out of a ConvertedEnvironment.
Each stream is expected to be run in a separate thread as steps involving
multiple player must be executed concurrently. Where multiple players are
expected to act but don't within _BARRIER_TIMEOUT, an exception will be
raised.
Args:
converted_environment: A converted environment configured for 1 or 2
players.
Returns:
A dm_env.Environment for each player.
"""
environment = _StreamedEnvironment(converted_environment)
return [
_Stream(p, environment)
for p in range(converted_environment.num_players())
]
def _dummy_game_info() -> sc2api_pb2.ResponseGameInfo:
"""Returns a dummy game info object.
The converter *specs* don't depend on the game info (this is not true for
the converted data). So, rather than instantiating the game to have the
converter generate specs, we can supply this dummy game info instead.
"""
return sc2api_pb2.ResponseGameInfo(
start_raw=raw_pb2.StartRaw(map_size=common_pb2.Size2DI(x=256, y=256)),
player_info=[
sc2api_pb2.PlayerInfo(race_requested=common_pb2.Protoss),
sc2api_pb2.PlayerInfo(race_requested=common_pb2.Protoss)
])
class EnvironmentSpec(NamedTuple):
obs_spec: Mapping[str, Any]
action_spec: Mapping[str, Any]
def get_environment_spec(
converter_settings: converter_pb2.ConverterSettings,) -> EnvironmentSpec:
"""Gets observation and action spec for the specified converter settings.
Args:
converter_settings: The converter settings to get specs for.
Returns:
(observation spec, action spec).
"""
env_info = converter_pb2.EnvironmentInfo(game_info=_dummy_game_info())
cvr = converter_lib.Converter(converter_settings, env_info)
obs_spec = tree.map_structure(squeeze_spec_if_necessary,
cvr.observation_spec())
return EnvironmentSpec(obs_spec, cvr.action_spec())
def make_converter_factories(
all_converter_settings: Sequence[converter_pb2.ConverterSettings]):
"""Makes converter factories from converter settings."""
def converter_factory(settings: converter_pb2.ConverterSettings,
game_info: sc2api_pb2.ResponseGameInfo):
return converter_lib.Converter(
settings, converter_pb2.EnvironmentInfo(game_info=game_info))
return [
functools.partial(converter_factory, s) for s in all_converter_settings
]
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/run_loop.py | pysc2/env/run_loop.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A run loop for agent/environment interaction."""
import time
def run_loop(agents, env, max_frames=0, max_episodes=0):
"""A run loop to have agents and an environment interact."""
total_frames = 0
total_episodes = 0
start_time = time.time()
observation_spec = env.observation_spec()
action_spec = env.action_spec()
for agent, obs_spec, act_spec in zip(agents, observation_spec, action_spec):
agent.setup(obs_spec, act_spec)
try:
while not max_episodes or total_episodes < max_episodes:
total_episodes += 1
timesteps = env.reset()
for a in agents:
a.reset()
while True:
total_frames += 1
actions = [agent.step(timestep)
for agent, timestep in zip(agents, timesteps)]
if max_frames and total_frames >= max_frames:
return
if timesteps[0].last():
break
timesteps = env.step(actions)
except KeyboardInterrupt:
pass
finally:
elapsed_time = time.time() - start_time
print("Took %.3f seconds for %s steps: %.3f fps" % (
elapsed_time, total_frames, total_frames / elapsed_time))
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/__init__.py | pysc2/env/__init__.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/environment.py | pysc2/env/environment.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python RL Environment API."""
import abc
import collections
import enum
class TimeStep(collections.namedtuple(
'TimeStep', ['step_type', 'reward', 'discount', 'observation'])):
"""Returned with every call to `step` and `reset` on an environment.
A `TimeStep` contains the data emitted by an environment at each step of
interaction. A `TimeStep` holds a `step_type`, an `observation`, and an
associated `reward` and `discount`.
The first `TimeStep` in a sequence will have `StepType.FIRST`. The final
`TimeStep` will have `StepType.LAST`. All other `TimeStep`s in a sequence will
have `StepType.MID.
Attributes:
step_type: A `StepType` enum value.
reward: A scalar, or 0 if `step_type` is `StepType.FIRST`, i.e. at the
start of a sequence.
discount: A discount value in the range `[0, 1]`, or 0 if `step_type`
is `StepType.FIRST`, i.e. at the start of a sequence.
observation: A NumPy array, or a dict, list or tuple of arrays.
"""
__slots__ = ()
def first(self):
return self.step_type is StepType.FIRST
def mid(self):
return self.step_type is StepType.MID
def last(self):
return self.step_type is StepType.LAST
class StepType(enum.IntEnum):
"""Defines the status of a `TimeStep` within a sequence."""
# Denotes the first `TimeStep` in a sequence.
FIRST = 0
# Denotes any `TimeStep` in a sequence that is not FIRST or LAST.
MID = 1
# Denotes the last `TimeStep` in a sequence.
LAST = 2
class Base(metaclass=abc.ABCMeta): # pytype: disable=ignored-abstractmethod
"""Abstract base class for Python RL environments."""
@abc.abstractmethod
def reset(self):
"""Starts a new sequence and returns the first `TimeStep` of this sequence.
Returns:
A `TimeStep` namedtuple containing:
step_type: A `StepType` of `FIRST`.
reward: Zero.
discount: Zero.
observation: A NumPy array, or a dict, list or tuple of arrays
corresponding to `observation_spec()`.
"""
@abc.abstractmethod
def step(self, action):
"""Updates the environment according to the action and returns a `TimeStep`.
If the environment returned a `TimeStep` with `StepType.LAST` at the
previous step, this call to `step` will start a new sequence and `action`
will be ignored.
This method will also start a new sequence if called after the environment
has been constructed and `restart` has not been called. Again, in this case
`action` will be ignored.
Args:
action: A NumPy array, or a dict, list or tuple of arrays corresponding to
`action_spec()`.
Returns:
A `TimeStep` namedtuple containing:
step_type: A `StepType` value.
reward: Reward at this timestep.
discount: A discount in the range [0, 1].
observation: A NumPy array, or a dict, list or tuple of arrays
corresponding to `observation_spec()`.
"""
@abc.abstractmethod
def observation_spec(self):
"""Defines the observations provided by the environment.
Returns:
A tuple of specs (one per agent), where each spec is a dict of shape
tuples.
"""
@abc.abstractmethod
def action_spec(self):
"""Defines the actions that should be provided to `step`.
Returns:
A tuple of specs (one per agent), where each spec is something that
defines the shape of the actions.
"""
def close(self):
"""Frees any resources used by the environment.
Implement this method for an environment backed by an external process.
This method be used directly
```python
env = Env(...)
# Use env.
env.close()
```
or via a context manager
```python
with Env(...) as env:
# Use env.
```
"""
pass
def __enter__(self):
"""Allows the environment to be used in a with-statement context."""
return self
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
"""Allows the environment to be used in a with-statement context."""
self.close()
def __del__(self):
self.close()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converted_env_test.py | pysc2/env/converted_env_test.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import random
from absl.testing import absltest
import dm_env
from dm_env import test_utils
import numpy as np
from pysc2.env import converted_env
from pysc2.env import mock_sc2_env
from pysc2.env import sc2_env
from pysc2.env.converter import converter
from pysc2.env.converter.proto import converter_pb2
from pysc2.lib import features
from s2clientprotocol import common_pb2
from s2clientprotocol import sc2api_pb2
def _action(delay: int):
return {
'function': np.int32(1),
'world': np.int32(2949),
'queued': np.int32(0),
'unit_tags': np.array([1] + [255] * 63, dtype=np.int32),
'target_unit_tag': np.int32(0),
'repeat': np.int32(0),
'delay': np.int32(delay)
}
def _converter_factory(game_info: sc2api_pb2.ResponseGameInfo):
return converter.Converter(
converter_pb2.ConverterSettings(
raw_settings=converter_pb2.ConverterSettings.RawSettings(
num_unit_features=40,
max_unit_selection_size=64,
max_unit_count=512,
resolution=common_pb2.Size2DI(x=128, y=128)),
num_action_types=540,
num_unit_types=217,
num_upgrade_types=86,
max_num_upgrades=40),
environment_info=converter_pb2.EnvironmentInfo(game_info=game_info))
def _agent_interface_format():
return features.AgentInterfaceFormat(
use_raw_units=True, use_raw_actions=True, send_observation_proto=True)
class StreamedEnvTest(absltest.TestCase):
def _check_episode(self, stream):
timestep = stream.reset()
self.assertIsNotNone(timestep)
while True:
timestep = stream.step(_action(random.randint(1, 5)))
if timestep.step_type == dm_env.StepType.LAST:
break
self.assertIsNotNone(timestep)
def test_single_player(self):
env = converted_env.ConvertedEnvironment(
converter_factories=[_converter_factory],
env=mock_sc2_env.SC2TestEnv(
players=[
sc2_env.Agent(race=sc2_env.Race.protoss),
sc2_env.Bot(
race=sc2_env.Race.zerg,
difficulty=sc2_env.Difficulty.very_easy)
],
agent_interface_format=_agent_interface_format(),
game_steps_per_episode=30,
),
)
with converted_env.make_streams(env)[0] as stream:
self._check_episode(stream)
def test_two_player(self):
env = converted_env.ConvertedEnvironment(
converter_factories=[_converter_factory, _converter_factory],
env=mock_sc2_env.SC2TestEnv(
players=[
sc2_env.Agent(race=sc2_env.Race.protoss),
sc2_env.Agent(race=sc2_env.Race.zerg),
],
agent_interface_format=[
_agent_interface_format() for _ in range(2)
],
game_steps_per_episode=30,
),
)
s0, s1 = converted_env.make_streams(env)
with s0, s1:
fs = []
with concurrent.futures.ThreadPoolExecutor() as executor:
fs.append(executor.submit(self._check_episode, s0))
fs.append(executor.submit(self._check_episode, s1))
concurrent.futures.wait(fs)
for f in fs:
f.result()
class StreamedEnvConformanceTest(test_utils.EnvironmentTestMixin,
absltest.TestCase):
def make_object_under_test(self):
env = converted_env.ConvertedEnvironment(
env=mock_sc2_env.SC2TestEnv(
players=[
sc2_env.Agent(race=sc2_env.Race.protoss),
sc2_env.Bot(
race=sc2_env.Race.zerg,
difficulty=sc2_env.Difficulty.very_easy)
],
agent_interface_format=_agent_interface_format(),
game_steps_per_episode=10,
),
converter_factories=[_converter_factory])
return converted_env.make_streams(env)[0]
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/lan_sc2_env.py | pysc2/env/lan_sc2_env.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Starcraft II environment for playing LAN games vs humans.
Check pysc2/bin/play_vs_agent.py for documentation.
"""
import binascii
import collections
import hashlib
import json
import os
import shutil
import socket
import struct
import subprocess
import threading
import time
from absl import logging
from pysc2 import run_configs
from pysc2.env import sc2_env
from pysc2.lib import features
from pysc2.lib import run_parallel
from s2clientprotocol import sc2api_pb2 as sc_pb
class Addr(collections.namedtuple("Addr", ["ip", "port"])):
def __str__(self):
ip = "[%s]" % self.ip if ":" in self.ip else self.ip
return "%s:%s" % (ip, self.port)
def daemon_thread(target, args):
t = threading.Thread(target=target, args=args)
t.daemon = True
t.start()
return t
def udp_server(addr):
family = socket.AF_INET6 if ":" in addr.ip else socket.AF_INET
sock = socket.socket(family, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.bind(addr)
return sock
def tcp_server(tcp_addr, settings):
"""Start up the tcp server, send the settings."""
family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET
sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
sock.bind(tcp_addr)
sock.listen(1)
logging.info("Waiting for connection on %s", tcp_addr)
conn, addr = sock.accept()
logging.info("Accepted connection from %s", Addr(*addr[:2]))
# Send map_data independently for py2/3 and json encoding reasons.
write_tcp(conn, settings["map_data"])
send_settings = {k: v for k, v in settings.items() if k != "map_data"}
logging.debug("settings: %s", send_settings)
write_tcp(conn, json.dumps(send_settings).encode())
return conn
def tcp_client(tcp_addr):
"""Connect to the tcp server, and return the settings."""
family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET
sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
for i in range(300):
logging.info("Connecting to: %s, attempt %d", tcp_addr, i)
try:
sock.connect(tcp_addr)
break
except socket.error:
time.sleep(1)
else:
sock.connect(tcp_addr) # One last try, but don't catch this error.
logging.info("Connected.")
map_data = read_tcp(sock)
settings_str = read_tcp(sock)
if not settings_str:
raise socket.error("Failed to read")
settings = json.loads(settings_str.decode())
logging.info("Got settings. map_name: %s.", settings["map_name"])
logging.debug("settings: %s", settings)
settings["map_data"] = map_data
return sock, settings
def log_msg(prefix, msg):
logging.debug("%s: len: %s, hash: %s, msg: 0x%s", prefix, len(msg),
hashlib.md5(msg).hexdigest()[:6], binascii.hexlify(msg[:25]))
def udp_to_tcp(udp_sock, tcp_conn):
while True:
msg, _ = udp_sock.recvfrom(2**16)
log_msg("read_udp", msg)
if not msg:
return
write_tcp(tcp_conn, msg)
def tcp_to_udp(tcp_conn, udp_sock, udp_to_addr):
while True:
msg = read_tcp(tcp_conn)
if not msg:
return
log_msg("write_udp", msg)
udp_sock.sendto(msg, udp_to_addr)
def read_tcp(conn):
read_size = read_tcp_size(conn, 4)
if not read_size:
return
size = struct.unpack("@I", read_size)[0]
msg = read_tcp_size(conn, size)
log_msg("read_tcp", msg)
return msg
def read_tcp_size(conn, size):
"""Read `size` number of bytes from `conn`, retrying as needed."""
chunks = []
bytes_read = 0
while bytes_read < size:
chunk = conn.recv(size - bytes_read)
if not chunk:
if bytes_read > 0:
logging.warning("Incomplete read: %s of %s.", bytes_read, size)
return
chunks.append(chunk)
bytes_read += len(chunk)
return b"".join(chunks)
def write_tcp(conn, msg):
log_msg("write_tcp", msg)
conn.sendall(struct.pack("@I", len(msg)))
conn.sendall(msg)
def forward_ports(remote_host, local_host, local_listen_ports,
remote_listen_ports):
"""Forwards ports such that multiplayer works between machines.
Args:
remote_host: Where to ssh to.
local_host: "127.0.0.1" or "::1".
local_listen_ports: Which ports to listen on locally to forward remotely.
remote_listen_ports: Which ports to listen on remotely to forward locally.
Returns:
The ssh process.
Raises:
ValueError: if it can't find ssh.
"""
if ":" in local_host and not local_host.startswith("["):
local_host = "[%s]" % local_host
ssh = shutil.which("ssh") or shutil.which("plink")
if not ssh:
raise ValueError("Couldn't find an ssh client.")
args = [ssh, remote_host]
for local_port in local_listen_ports:
args += ["-L", "%s:%s:%s:%s" % (local_host, local_port,
local_host, local_port)]
for remote_port in remote_listen_ports:
args += ["-R", "%s:%s:%s:%s" % (local_host, remote_port,
local_host, remote_port)]
logging.info("SSH port forwarding: %s", " ".join(args))
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=(os.name == "posix"))
class RestartError(Exception):
pass
class LanSC2Env(sc2_env.SC2Env):
"""A Starcraft II environment for playing vs humans over LAN.
This owns a single instance, and expects to join a game hosted by some other
script, likely play_vs_agent.py.
"""
def __init__(self,
*,
host="127.0.0.1",
config_port=None,
race=None,
name="<unknown>",
agent_interface_format=None,
discount=1.,
visualize=False,
step_mul=None,
realtime=False,
replay_dir=None,
replay_prefix=None):
"""Create a SC2 Env that connects to a remote instance of the game.
This assumes that the game is already up and running, and it only needs to
join. You need some other script to launch the process and call
RequestCreateGame. It also assumes that it's a multiplayer game, and that
the ports are consecutive.
You must pass a resolution that you want to play at. You can send either
feature layer resolution or rgb resolution or both. If you send both you
must also choose which to use as your action space. Regardless of which you
choose you must send both the screen and minimap resolutions.
For each of the 4 resolutions, either specify size or both width and
height. If you specify size then both width and height will take that value.
Args:
host: Which ip to use. Either ipv4 or ipv6 localhost.
config_port: Where to find the config port.
race: Race for this agent.
name: The name of this agent, for saving in the replay.
agent_interface_format: AgentInterfaceFormat object describing the
format of communication between the agent and the environment, else
just InterfaceOptions to use passthrough.
discount: Returned as part of the observation.
visualize: Whether to pop up a window showing the camera and feature
layers. This won't work without access to a window manager.
step_mul: How many game steps per agent step (action/observation). None
means use the map default.
realtime: Whether to use realtime mode. In this mode the game simulation
automatically advances (at 22.4 gameloops per second) rather than
being stepped manually. The number of game loops advanced with each
call to step() won't necessarily match the step_mul specified. The
environment will attempt to honour step_mul, returning observations
with that spacing as closely as possible. Game loops will be skipped
if they cannot be retrieved and processed quickly enough.
replay_dir: Directory to save a replay.
replay_prefix: An optional prefix to use when saving replays.
Raises:
ValueError: if the race is invalid.
ValueError: if the resolutions aren't specified correctly.
ValueError: if the host or port are invalid.
"""
if host not in ("127.0.0.1", "::1"):
raise ValueError("Bad host arguments. Must be a localhost")
if not config_port:
raise ValueError("Must pass a config_port.")
if agent_interface_format is None:
raise ValueError("Please specify agent_interface_format.")
if not race:
race = sc2_env.Race.random
self._num_agents = 1
self._discount = discount
self._step_mul = step_mul or 8
self._realtime = realtime
self._last_step_time = None
self._save_replay_episodes = 1 if replay_dir else 0
self._replay_dir = replay_dir
self._replay_prefix = replay_prefix
self._score_index = -1 # Win/loss only.
self._score_multiplier = 1
self._episode_length = sc2_env.MAX_STEP_COUNT
self._ensure_available_actions = False
self._discount_zero_after_timeout = False
self._parallel = run_parallel.RunParallel() # Needed for multiplayer.
self._game_info = None
self._action_delay_fns = [None]
self._requested_races = None
interface = self._get_interface(interface_format=agent_interface_format, require_raw=visualize)
# interface = self._get_interface(
# agent_interface_format=agent_interface_format, require_raw=visualize)
self._launch_remote(host, config_port, race, name, interface,
agent_interface_format)
self._finalize(visualize)
def _launch_remote(self, host, config_port, race, name, interface,
agent_interface_format):
"""Make sure this stays synced with bin/play_vs_agent.py."""
self._tcp_conn, settings = tcp_client(Addr(host, config_port))
self._map_name = settings["map_name"]
if settings["remote"]:
self._udp_sock = udp_server(
Addr(host, settings["ports"]["server"]["game"]))
daemon_thread(tcp_to_udp,
(self._tcp_conn, self._udp_sock,
Addr(host, settings["ports"]["client"]["game"])))
daemon_thread(udp_to_tcp, (self._udp_sock, self._tcp_conn))
extra_ports = [
settings["ports"]["server"]["game"],
settings["ports"]["server"]["base"],
settings["ports"]["client"]["game"],
settings["ports"]["client"]["base"],
]
self._run_config = run_configs.get(version=settings["game_version"])
# self._sc2_procs = [self._run_config.start(
# extra_ports=extra_ports, host=host, window_loc=(700, 50),
# want_rgb=interface.HasField("render"))]
self._sc2_procs = [self._run_config.start(host=host, window_loc=(700, 50),
want_rgb=interface.HasField("render"))]
self._controllers = [p.controller for p in self._sc2_procs]
# Create the join request.
join = sc_pb.RequestJoinGame(options=interface)
join.race = race
join.player_name = name
join.shared_port = 0 # unused
join.server_ports.game_port = settings["ports"]["server"]["game"]
join.server_ports.base_port = settings["ports"]["server"]["base"]
join.client_ports.add(game_port=settings["ports"]["client"]["game"],
base_port=settings["ports"]["client"]["base"])
self._controllers[0].save_map(settings["map_path"], settings["map_data"])
self._controllers[0].join_game(join)
self._game_info = [self._controllers[0].game_info()]
self._features = [features.features_from_game_info(
game_info=self._game_info[0],
agent_interface_format=agent_interface_format)]
self._requested_races = {
info.player_id: info.race_requested
for info in self._game_info[0].player_info
if info.type != sc_pb.Observer
}
def _restart(self):
# Can't restart since it's not clear how you'd coordinate that with the
# other players.
raise RestartError("Can't restart")
def close(self):
if hasattr(self, "_tcp_conn") and self._tcp_conn:
self._tcp_conn.close()
self._tcp_conn = None
if hasattr(self, "_udp_sock") and self._udp_sock:
self._udp_sock.close()
self._udp_sock = None
self._run_config = None
if hasattr(self, "_parallel") and self._parallel is not None:
self._parallel.shutdown()
self._parallel = None
super(LanSC2Env, self).close()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/mock_sc2_env_test.py | pysc2/env/mock_sc2_env_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the StarCraft2 mock environment."""
from absl.testing import absltest
import mock
import numpy as np
from pysc2.env import enums
from pysc2.env import environment
from pysc2.env import mock_sc2_env
from pysc2.env import sc2_env
from pysc2.lib import features
from s2clientprotocol import common_pb2
from s2clientprotocol import raw_pb2
from s2clientprotocol import sc2api_pb2
class _TestMixin(object):
def assert_spec(self, array, shape, dtype):
self.assertSequenceEqual(array.shape, shape)
self.assertEqual(array.dtype, dtype)
def assert_equal(self, actual, expected):
np.testing.assert_equal(actual, expected)
def assert_reset(self, env):
expected = env.next_timestep[0]._replace(
step_type=environment.StepType.FIRST, reward=0, discount=0)
timestep = env.reset()
self.assert_equal(timestep, [expected])
def assert_first_step(self, env):
expected = env.next_timestep[0]._replace(
step_type=environment.StepType.FIRST, reward=0, discount=0)
timestep = env.step([mock.sentinel.action])
self.assert_equal(timestep, [expected])
def assert_mid_step(self, env):
expected = env.next_timestep[0]._replace(
step_type=environment.StepType.MID)
timestep = env.step([mock.sentinel.action])
self.assert_equal(timestep, [expected])
def assert_last_step(self, env):
expected = env.next_timestep[0]._replace(
step_type=environment.StepType.LAST,
discount=0.)
timestep = env.step([mock.sentinel.action])
self.assert_equal(timestep, [expected])
def _test_episode(self, env):
env.next_timestep = [env.next_timestep[0]._replace(
step_type=environment.StepType.MID)]
self.assert_first_step(env)
for step in range(1, 10):
env.next_timestep = [env.next_timestep[0]._replace(
reward=step, discount=step / 10)]
self.assert_mid_step(env)
env.next_timestep = [env.next_timestep[0]._replace(
step_type=environment.StepType.LAST, reward=10, discount=0.0)]
self.assert_last_step(env)
def _test_episode_length(self, env, length):
self.assert_reset(env)
for _ in range(length - 1):
self.assert_mid_step(env)
self.assert_last_step(env)
self.assert_first_step(env)
for _ in range(length - 1):
self.assert_mid_step(env)
self.assert_last_step(env)
class TestTestEnvironment(_TestMixin, absltest.TestCase):
def setUp(self):
super(TestTestEnvironment, self).setUp()
self._env = mock_sc2_env._TestEnvironment(
num_agents=1,
observation_spec=({'mock': [10, 1]},),
action_spec=(mock.sentinel.action_spec,))
def test_observation_spec(self):
self.assertEqual(self._env.observation_spec(), ({'mock': [10, 1]},))
def test_action_spec(self):
self.assertEqual(self._env.action_spec(), (mock.sentinel.action_spec,))
def test_default_observation(self):
observation = self._env._default_observation(
self._env.observation_spec()[0], 0)
self.assert_equal(observation, {'mock': np.zeros([10, 1], dtype=np.int32)})
def test_episode(self):
self._env.episode_length = float('inf')
self._test_episode(self._env)
def test_two_episodes(self):
self._env.episode_length = float('inf')
self._test_episode(self._env)
self._test_episode(self._env)
def test_episode_length(self):
self._env.episode_length = 16
self._test_episode_length(self._env, length=16)
class TestSC2TestEnv(_TestMixin, absltest.TestCase):
def test_episode(self):
env = mock_sc2_env.SC2TestEnv(
map_name='nonexistant map',
agent_interface_format=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(screen=64, minimap=32)))
env.episode_length = float('inf')
self._test_episode(env)
def test_episode_length(self):
env = mock_sc2_env.SC2TestEnv(
map_name='nonexistant map',
agent_interface_format=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(screen=64, minimap=32)))
self.assertEqual(env.episode_length, 10)
self._test_episode_length(env, length=10)
def test_screen_minimap_size(self):
env = mock_sc2_env.SC2TestEnv(
map_name='nonexistant map',
agent_interface_format=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(
screen=(84, 87),
minimap=(64, 67))))
timestep = env.reset()
self.assertLen(timestep, 1)
self.assert_spec(timestep[0].observation['feature_screen'],
[len(features.SCREEN_FEATURES), 87, 84], np.int32)
self.assert_spec(timestep[0].observation['feature_minimap'],
[len(features.MINIMAP_FEATURES), 67, 64], np.int32)
def test_feature_units_are_supported(self):
env = mock_sc2_env.SC2TestEnv(
map_name='nonexistant map',
agent_interface_format=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(screen=64, minimap=32),
use_feature_units=True))
self.assertIn('feature_units', env.observation_spec()[0])
def test_game_info(self):
env = mock_sc2_env.SC2TestEnv(
agent_interface_format=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(screen=64, minimap=32),
use_feature_units=True),
players=[sc2_env.Agent(sc2_env.Race.protoss, 'player'),
sc2_env.Bot(sc2_env.Race.random, sc2_env.Difficulty.easy,
sc2_env.BotBuild.random)])
self.assertLen(env.game_info, 1)
self.assertEqual(
env.game_info[0],
sc2api_pb2.ResponseGameInfo(
start_raw=raw_pb2.StartRaw(
map_size=common_pb2.Size2DI(
x=mock_sc2_env.DUMMY_MAP_SIZE,
y=mock_sc2_env.DUMMY_MAP_SIZE)),
options=sc2api_pb2.InterfaceOptions(
feature_layer=sc2api_pb2.SpatialCameraSetup(
resolution=common_pb2.Size2DI(x=64, y=64),
minimap_resolution=common_pb2.Size2DI(x=32, y=32),
width=24)),
player_info=[
sc2api_pb2.PlayerInfo(
player_id=1,
type=sc2api_pb2.PlayerType.Participant,
race_requested=enums.Race.protoss,
player_name='player'),
sc2api_pb2.PlayerInfo(
player_id=2,
type=sc2api_pb2.PlayerType.Computer,
race_requested=enums.Race.random,
difficulty=enums.Difficulty.easy,
ai_build=enums.BotBuild.random,
player_name='easy')
]))
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/converter.py | pysc2/env/converter/converter.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PySC2 environment converter.
This is a thin wrapper around the pybind implementation, supporting dm specs
and numpy arrays in place of dm_env_rpc protos; also supports documentation
more naturally.
"""
from typing import Any, Mapping
from dm_env import specs
from pysc2.env.converter.cc.python import converter
from pysc2.env.converter.proto import converter_pb2
from dm_env_rpc.v1 import dm_env_rpc_pb2
from dm_env_rpc.v1 import dm_env_utils
from dm_env_rpc.v1 import tensor_utils
from s2clientprotocol import sc2api_pb2
class Converter:
"""PySC2 environment converter.
Converts the PySC2 observation/action interface, supporting more standard
interaction with an ML agent and providing enriched observations.
Limited configuration is supported through the `ConverterSettings` proto.
In particular, clients may choose between 'visual' and 'raw' interfaces.
The visual interface focuses on spatial features and actions which are close
to those used by a human when playing the game. The raw interface retains
some spatial features but focuses on numeric unit data; actions being
specified to units directly, ignoring e.g. the position of the camera.
The converter maintains some state throughout an episode. This state relies
on convert_observation and convert_action being called alternately
throughout the episde. A new converter should be created for each episode.
"""
def __init__(self, settings: converter_pb2.ConverterSettings,
environment_info: converter_pb2.EnvironmentInfo):
self._converter = converter.MakeConverter(
settings=settings.SerializeToString(),
environment_info=environment_info.SerializeToString())
def observation_spec(self) -> Mapping[str, specs.Array]:
"""Returns the observation spec.
This is a flat mapping of string label to dm_env array spec and varies
with the specified converter settings and instantiated environment info.
"""
spec = {}
for k, v in self._converter.ObservationSpec().items():
value = dm_env_rpc_pb2.TensorSpec()
value.ParseFromString(v)
spec[k] = dm_env_utils.tensor_spec_to_dm_env_spec(value)
return spec
def action_spec(self) -> Mapping[str, specs.Array]:
"""Returns the action spec.
This is a flat mapping of string label to dm_env array spec and varies
with the specified converter settings and instantiated environment info.
"""
spec = {}
for k, v in self._converter.ActionSpec().items():
value = dm_env_rpc_pb2.TensorSpec()
value.ParseFromString(v)
spec[k] = dm_env_utils.tensor_spec_to_dm_env_spec(value)
return spec
def convert_observation(
self, observation: converter_pb2.Observation) -> Mapping[str, Any]:
"""Converts a SC2 API observation, enriching it with additional info.
Args:
observation: Proto containing the SC2 API observation proto for the
player, and potentially for his opponent. When operating in supervised
mode must also contain the action taken by the player in response to
this observation.
Returns:
A flat mapping of string labels to numpy arrays / or scalars, as
appropriate.
"""
serialized_converted_obs = self._converter.ConvertObservation(
observation.SerializeToString())
deserialized_converted_obs = {}
for k, v in serialized_converted_obs.items():
value = dm_env_rpc_pb2.Tensor()
value.ParseFromString(v)
try:
unpacked_value = tensor_utils.unpack_tensor(value)
deserialized_converted_obs[k] = unpacked_value
except Exception as e:
raise Exception(f'Unpacking failed for {k}:{v} - {e}')
return deserialized_converted_obs
def convert_action(self, action: Mapping[str, Any]) -> converter_pb2.Action:
"""Converts an agent action into an SC2 API action proto.
Note that the returned action also carries the game loop delay requested
by this player until the next observation.
Args:
action: A flat mapping of string labels to numpy arrays / or scalars.
Returns:
An SC2 API action request + game loop delay.
"""
# TODO(b/210113354): Remove protos serialization over pybind11 boundary.
serialized_action = {
k: tensor_utils.pack_tensor(v).SerializeToString()
for k, v in action.items()
}
converted_action_serialized = self._converter.ConvertAction(
serialized_action)
converted_action = converter_pb2.Action()
converted_action.ParseFromString(converted_action_serialized)
request_action = sc2api_pb2.RequestAction()
request_action.ParseFromString(
converted_action.request_action.SerializeToString())
return converter_pb2.Action(
request_action=request_action, delay=converted_action.delay)
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/derive_interface_options.py | pysc2/env/converter/derive_interface_options.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Derives SC2 interface options from converter settings."""
from pysc2.env.converter.proto import converter_pb2
from s2clientprotocol import common_pb2
from s2clientprotocol import sc2api_pb2
def from_settings(settings: converter_pb2.ConverterSettings):
"""Derives SC2 interface options from converter settings."""
if settings.HasField('visual_settings'):
resolution = settings.visual_settings.screen
else:
resolution = common_pb2.Size2DI(x=1, y=1)
return sc2api_pb2.InterfaceOptions(
feature_layer=sc2api_pb2.SpatialCameraSetup(
width=settings.camera_width_world_units,
allow_cheating_layers=False,
resolution=resolution,
minimap_resolution=settings.minimap,
crop_to_playable_area=settings.crop_to_playable_area),
raw=settings.HasField('raw_settings'),
score=True,
raw_affects_selection=True,
show_cloaked=True,
show_placeholders=True,
show_burrowed_shadows=True,
raw_crop_to_playable_area=settings.crop_to_playable_area)
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/__init__.py | pysc2/env/converter/__init__.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/converter_test.py | pysc2/env/converter/converter_test.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pysc2.env.converter import converter
from pysc2.env.converter.proto import converter_pb2
from s2clientprotocol import common_pb2
from s2clientprotocol import raw_pb2
from s2clientprotocol import sc2api_pb2
from s2clientprotocol import spatial_pb2
NUM_ACTION_TYPES = 539
MAX_UNIT_COUNT = 16
NUM_UNIT_TYPES = 243
NUM_UNIT_FEATURES = 40
NUM_UPGRADES = 40
NUM_UPGRADE_TYPES = 86
MAX_UNIT_SELECTION_SIZE = 16
MAP_SIZE = 128
RAW_RESOLUTION = 128
MINIMAP_SIZE = 64
SCREEN_SIZE = 96
def _make_dummy_env_info():
return converter_pb2.EnvironmentInfo(
game_info=sc2api_pb2.ResponseGameInfo(
player_info=[
sc2api_pb2.PlayerInfo(
player_id=1, type=sc2api_pb2.PlayerType.Participant),
sc2api_pb2.PlayerInfo(
player_id=2, type=sc2api_pb2.PlayerType.Participant),
],
start_raw=raw_pb2.StartRaw(
map_size=common_pb2.Size2DI(x=MAP_SIZE, y=MAP_SIZE))))
def _make_converter_settings_common(**kwargs):
return converter_pb2.ConverterSettings(
num_action_types=NUM_ACTION_TYPES,
num_unit_types=NUM_UNIT_TYPES,
num_upgrade_types=NUM_UPGRADE_TYPES,
max_num_upgrades=NUM_UPGRADES,
minimap=common_pb2.Size2DI(x=MINIMAP_SIZE, y=MINIMAP_SIZE),
minimap_features=['height_map', 'visibility_map'],
add_opponent_features=True,
**kwargs)
def _make_converter_settings(mode: str):
if mode == 'visual':
return _make_converter_settings_common(
visual_settings=converter_pb2.ConverterSettings.VisualSettings(
screen=common_pb2.Size2DI(x=SCREEN_SIZE, y=SCREEN_SIZE),
screen_features=['height_map', 'player_relative']))
else:
return _make_converter_settings_common(
raw_settings=converter_pb2.ConverterSettings.RawSettings(
resolution=common_pb2.Size2DI(x=RAW_RESOLUTION, y=RAW_RESOLUTION),
num_unit_features=NUM_UNIT_FEATURES,
max_unit_count=MAX_UNIT_COUNT,
max_unit_selection_size=MAX_UNIT_SELECTION_SIZE,
enable_action_repeat=True))
def _make_observation():
return converter_pb2.Observation(
player=sc2api_pb2.ResponseObservation(
observation=sc2api_pb2.Observation(
player_common=sc2api_pb2.PlayerCommon(player_id=1),
feature_layer_data=spatial_pb2.ObservationFeatureLayer(
minimap_renders=spatial_pb2.FeatureLayersMinimap(
height_map=common_pb2.ImageData(
bits_per_pixel=8,
size=common_pb2.Size2DI(
x=MINIMAP_SIZE, y=MINIMAP_SIZE),
data=bytes(bytearray(MINIMAP_SIZE * MINIMAP_SIZE))),
visibility_map=common_pb2.ImageData(
bits_per_pixel=8,
size=common_pb2.Size2DI(
x=MINIMAP_SIZE, y=MINIMAP_SIZE),
data=bytes(bytearray(MINIMAP_SIZE * MINIMAP_SIZE)))),
renders=spatial_pb2.FeatureLayers(
height_map=common_pb2.ImageData(
bits_per_pixel=8,
size=common_pb2.Size2DI(x=SCREEN_SIZE, y=SCREEN_SIZE),
data=bytes(bytearray(SCREEN_SIZE * SCREEN_SIZE))),
player_relative=common_pb2.ImageData(
bits_per_pixel=8,
size=common_pb2.Size2DI(x=SCREEN_SIZE, y=SCREEN_SIZE),
data=bytes(bytearray(SCREEN_SIZE * SCREEN_SIZE))))))))
class RawConverterTest(absltest.TestCase):
def test_action_spec(self):
cvr = converter.Converter(
settings=_make_converter_settings('raw'),
environment_info=_make_dummy_env_info())
action_spec = cvr.action_spec()
self.assertCountEqual(action_spec.keys(), [
'queued', 'repeat', 'target_unit_tag', 'unit_tags', 'world', 'delay',
'function'
])
for k, v in action_spec.items():
self.assertEqual(k, v.name, msg=k)
self.assertEqual(v.dtype, np.int32, msg=k)
self.assertEqual(
v.shape, (MAX_UNIT_SELECTION_SIZE,) if k == 'unit_tags' else (),
msg=k)
self.assertEqual(v.minimum, (1,) if k == 'delay' else (0,), msg=k)
for k, v in {
'queued': 1,
'repeat': 2,
'target_unit_tag': MAX_UNIT_COUNT - 1,
'world': RAW_RESOLUTION * RAW_RESOLUTION - 1,
'delay': 127,
'function': NUM_ACTION_TYPES - 1
}.items():
self.assertEqual(action_spec[k].maximum, (v,), msg=k)
def test_action_move_camera(self):
cvr = converter.Converter(
settings=_make_converter_settings('raw'),
environment_info=_make_dummy_env_info())
raw_move_camera = {'delay': 17, 'function': 168, 'world': 131}
action = cvr.convert_action(raw_move_camera)
expected = converter_pb2.Action(
delay=17,
request_action=sc2api_pb2.RequestAction(actions=[
sc2api_pb2.Action(
action_raw=raw_pb2.ActionRaw(
camera_move=raw_pb2.ActionRawCameraMove(
center_world_space=common_pb2.Point(x=3.5, y=126.5))))
]))
self.assertEqual(expected.SerializeToString(), action.SerializeToString())
def test_action_smart_unit(self):
cvr = converter.Converter(
settings=_make_converter_settings('raw'),
environment_info=_make_dummy_env_info())
raw_smart_unit = {
'delay': 31,
'function': 1,
'queued': 0,
'repeat': 0,
'unit_tags': [4],
'world': 5
}
action = cvr.convert_action(raw_smart_unit)
expected = converter_pb2.Action(
delay=31,
request_action=sc2api_pb2.RequestAction(actions=[
sc2api_pb2.Action(
action_raw=raw_pb2.ActionRaw(
unit_command=raw_pb2.ActionRawUnitCommand(
ability_id=1,
unit_tags=(4,),
queue_command=False,
target_world_space_pos=common_pb2.Point2D(
x=5.5, y=127.5))))
]))
self.assertEqual(expected.SerializeToString(), action.SerializeToString())
class VisualConverterTest(absltest.TestCase):
def test_action_spec(self):
cvr = converter.Converter(
settings=_make_converter_settings('visual'),
environment_info=_make_dummy_env_info())
action_spec = cvr.action_spec()
self.assertCountEqual(action_spec, [
'build_queue_id', 'control_group_act', 'control_group_id', 'minimap',
'queued', 'screen', 'screen2', 'select_add', 'select_point_act',
'select_unit_act', 'select_unit_id', 'select_worker', 'unload_id',
'delay', 'function'
])
for k, v in action_spec.items():
self.assertEqual(k, v.name, msg=k)
self.assertEqual(v.dtype, np.int32, msg=k)
self.assertEqual(v.shape, (), msg=k)
self.assertEqual(v.minimum, (1,) if (k == 'delay') else (0,), msg=k)
for k, v in {
'build_queue_id': 9,
'control_group_act': 4,
'control_group_id': 9,
'minimap': MINIMAP_SIZE * MINIMAP_SIZE - 1,
'queued': 1,
'screen': SCREEN_SIZE * SCREEN_SIZE - 1,
'screen2': SCREEN_SIZE * SCREEN_SIZE - 1,
'select_add': 1,
'select_point_act': 3,
'select_unit_act': 3,
'select_unit_id': 499,
'select_worker': 3,
'unload_id': 499,
'delay': 127,
'function': NUM_ACTION_TYPES - 1
}.items():
self.assertEqual(action_spec[k].maximum, (v,), msg=k)
def test_action_move_camera(self):
cvr = converter.Converter(
settings=_make_converter_settings('visual'),
environment_info=_make_dummy_env_info())
move_camera = {'delay': 17, 'function': 1, 'minimap': 6}
action = cvr.convert_action(move_camera)
expected = converter_pb2.Action(
delay=17,
request_action=sc2api_pb2.RequestAction(actions=[
sc2api_pb2.Action(
action_feature_layer=spatial_pb2.ActionSpatial(
camera_move=spatial_pb2.ActionSpatialCameraMove(
center_minimap=common_pb2.PointI(x=6, y=0))))
]))
self.assertEqual(expected.SerializeToString(), action.SerializeToString())
def test_action_smart_screen(self):
cvr = converter.Converter(
settings=_make_converter_settings('visual'),
environment_info=_make_dummy_env_info())
smart_screen = {
'delay': np.int32(4),
'function': np.int32(451),
'queued': np.int32(1),
'screen': np.int32(333)
}
action = cvr.convert_action(smart_screen)
expected = converter_pb2.Action(
delay=4,
request_action=sc2api_pb2.RequestAction(actions=[
sc2api_pb2.Action(
action_feature_layer=spatial_pb2.ActionSpatial(
unit_command=spatial_pb2.ActionSpatialUnitCommand(
ability_id=1,
queue_command=True,
target_screen_coord=common_pb2.PointI(
x=333 % SCREEN_SIZE, y=333 // SCREEN_SIZE))))
]))
self.assertEqual(expected.SerializeToString(), action.SerializeToString())
@parameterized.parameters(('visual',), ('raw',))
class ConverterTest(parameterized.TestCase):
def test_construction(self, mode):
converter.Converter(
settings=_make_converter_settings(mode),
environment_info=_make_dummy_env_info())
def test_convert_action_delay(self, mode):
cvr = converter.Converter(
settings=_make_converter_settings(mode),
environment_info=_make_dummy_env_info())
for delay in range(1, 128):
action = cvr.convert_action(dict(function=0, delay=delay))
self.assertEqual(action.delay, delay)
def test_observation_spec(self, mode):
cvr = converter.Converter(
settings=_make_converter_settings(mode),
environment_info=_make_dummy_env_info())
obs_spec = cvr.observation_spec()
expected_fields = [
'away_race_observed', 'away_race_requested', 'game_loop',
'home_race_requested', 'minimap_height_map', 'minimap_visibility_map',
'mmr', 'opponent_player', 'opponent_unit_counts_bow',
'opponent_upgrades_fixed_length', 'player', 'unit_counts_bow',
'upgrades_fixed_length'
]
if mode == 'raw':
expected_fields += ['raw_units']
else:
expected_fields += [
'available_actions', 'screen_height_map', 'screen_player_relative'
]
self.assertCountEqual(list(obs_spec), expected_fields)
for k, v in obs_spec.items():
self.assertEqual(k, v.name, msg=k)
if k.startswith('minimap_') or k.startswith('screen_'):
self.assertEqual(v.dtype, np.uint8, msg=k)
else:
self.assertEqual(v.dtype, np.int32, msg=k)
if 'upgrades_fixed_length' not in k:
self.assertFalse(hasattr(v, 'min'), msg=k)
self.assertFalse(hasattr(v, 'max'), msg=k)
for k, v in {
'minimap_height_map': 255,
'minimap_visibility_map': 3,
'upgrades_fixed_length': NUM_UPGRADE_TYPES + 1,
'opponent_upgrades_fixed_length': NUM_UPGRADE_TYPES + 1
}.items():
self.assertEqual(obs_spec[k].minimum, (0,), msg=k)
self.assertEqual(obs_spec[k].maximum, (v,), msg=k)
if mode == 'visual':
for k, v in {
'screen_height_map': 255,
'screen_player_relative': 4
}.items():
self.assertEqual(obs_spec[k].minimum, (0,), msg=k)
self.assertEqual(obs_spec[k].maximum, (v,), msg=k)
for f in [
'away_race_observed', 'away_race_requested', 'game_loop',
'home_race_requested'
]:
self.assertEqual(obs_spec[f].shape, (1,), msg=f)
self.assertEqual(obs_spec['mmr'].shape, ())
for k, v in {
'player': 11,
'opponent_player': 10,
'unit_counts_bow': NUM_UNIT_TYPES,
'opponent_unit_counts_bow': NUM_UNIT_TYPES,
'upgrades_fixed_length': NUM_UPGRADES,
'opponent_upgrades_fixed_length': NUM_UPGRADES
}.items():
self.assertEqual(obs_spec[k].shape, (v,), k)
if mode == 'raw':
self.assertEqual(obs_spec['raw_units'].shape,
(MAX_UNIT_COUNT, NUM_UNIT_FEATURES + 2))
else:
self.assertEqual(obs_spec['available_actions'].shape, (NUM_ACTION_TYPES,))
def test_observation_matches_spec(self, mode):
cvr = converter.Converter(
settings=_make_converter_settings(mode),
environment_info=_make_dummy_env_info())
obs_spec = cvr.observation_spec()
converted = cvr.convert_observation(_make_observation())
for k, v in obs_spec.items():
self.assertIn(k, converted)
self.assertEqual(v.shape, converted[k].shape)
for k in converted:
self.assertIn(k, obs_spec)
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/proto/__init__.py | pysc2/env/converter/proto/__init__.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/cc/__init__.py | pysc2/env/converter/cc/__init__.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/cc/game_data/__init__.py | pysc2/env/converter/cc/game_data/__init__.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/cc/game_data/python/uint8_lookup_test.py | pysc2/env/converter/cc/game_data/python/uint8_lookup_test.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from pysc2.env.converter.cc.game_data.proto import buffs_pb2
from pysc2.env.converter.cc.game_data.proto import units_pb2
from pysc2.env.converter.cc.game_data.proto import upgrades_pb2
from pysc2.env.converter.cc.game_data.python import uint8_lookup
class Uint8LookupTest(absltest.TestCase):
def test_pysc2_to_uint8(self):
self.assertEqual(
uint8_lookup.PySc2ToUint8(units_pb2.Zerg.InfestedTerran), 4)
def test_pysc2_to_uint8_buffs(self):
self.assertEqual(
uint8_lookup.PySc2ToUint8Buffs(buffs_pb2.Buffs.BlindingCloudStructure),
3)
def test_pysc2_to_uint8_upgrades(self):
self.assertEqual(
uint8_lookup.PySc2ToUint8Upgrades(upgrades_pb2.Upgrades.Blink), 5)
def test_uint8_to_pysc2(self):
self.assertEqual(
uint8_lookup.Uint8ToPySc2(4), units_pb2.Zerg.InfestedTerran)
def test_uint8_to_pysc2_upgrades(self):
self.assertEqual(
uint8_lookup.Uint8ToPySc2Upgrades(5), upgrades_pb2.Upgrades.Blink)
def test_effect_id_identity(self):
self.assertEqual(uint8_lookup.EffectIdIdentity(17), 17)
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/cc/game_data/python/__init__.py | pysc2/env/converter/cc/game_data/python/__init__.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/cc/game_data/proto/__init__.py | pysc2/env/converter/cc/game_data/proto/__init__.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/cc/python/__init__.py | pysc2/env/converter/cc/python/__init__.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/env/converter/cc/test_data/__init__.py | pysc2/env/converter/cc/test_data/__init__.py | # Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/agents/base_agent.py | pysc2/agents/base_agent.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base agent to write custom scripted agents."""
from pysc2.lib import actions
class BaseAgent(object):
"""A base agent to write custom scripted agents.
It can also act as a passive agent that does nothing but no-ops.
"""
def __init__(self):
self.reward = 0
self.episodes = 0
self.steps = 0
self.obs_spec = None
self.action_spec = None
def setup(self, obs_spec, action_spec):
self.obs_spec = obs_spec
self.action_spec = action_spec
def reset(self):
self.episodes += 1
def step(self, obs):
self.steps += 1
self.reward += obs.reward
return actions.FunctionCall(actions.FUNCTIONS.no_op.id, [])
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/agents/no_op_agent.py | pysc2/agents/no_op_agent.py | # Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A no-op agent for starcraft."""
from pysc2.agents import base_agent
from s2clientprotocol import sc2api_pb2 as sc_pb
class NoOpAgent(base_agent.BaseAgent):
"""A no-op agent for starcraft."""
def step(self, obs):
super(NoOpAgent, self).step(obs)
return sc_pb.Action()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/agents/scripted_agent.py | pysc2/agents/scripted_agent.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scripted agents."""
import numpy
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_PLAYER_SELF = features.PlayerRelative.SELF
_PLAYER_NEUTRAL = features.PlayerRelative.NEUTRAL # beacon/minerals
_PLAYER_ENEMY = features.PlayerRelative.ENEMY
FUNCTIONS = actions.FUNCTIONS
RAW_FUNCTIONS = actions.RAW_FUNCTIONS
def _xy_locs(mask):
"""Mask should be a set of bools from comparison with a feature layer."""
y, x = mask.nonzero()
return list(zip(x, y))
class MoveToBeacon(base_agent.BaseAgent):
"""An agent specifically for solving the MoveToBeacon map."""
def step(self, obs):
super(MoveToBeacon, self).step(obs)
if FUNCTIONS.Move_screen.id in obs.observation.available_actions:
player_relative = obs.observation.feature_screen.player_relative
beacon = _xy_locs(player_relative == _PLAYER_NEUTRAL)
if not beacon:
return FUNCTIONS.no_op()
beacon_center = numpy.mean(beacon, axis=0).round()
return FUNCTIONS.Move_screen("now", beacon_center)
else:
return FUNCTIONS.select_army("select")
class CollectMineralShards(base_agent.BaseAgent):
"""An agent specifically for solving the CollectMineralShards map."""
def step(self, obs):
super(CollectMineralShards, self).step(obs)
if FUNCTIONS.Move_screen.id in obs.observation.available_actions:
player_relative = obs.observation.feature_screen.player_relative
minerals = _xy_locs(player_relative == _PLAYER_NEUTRAL)
if not minerals:
return FUNCTIONS.no_op()
marines = _xy_locs(player_relative == _PLAYER_SELF)
marine_xy = numpy.mean(marines, axis=0).round() # Average location.
distances = numpy.linalg.norm(numpy.array(minerals) - marine_xy, axis=1)
closest_mineral_xy = minerals[numpy.argmin(distances)]
return FUNCTIONS.Move_screen("now", closest_mineral_xy)
else:
return FUNCTIONS.select_army("select")
class CollectMineralShardsFeatureUnits(base_agent.BaseAgent):
"""An agent for solving the CollectMineralShards map with feature units.
Controls the two marines independently:
- select marine
- move to nearest mineral shard that wasn't the previous target
- swap marine and repeat
"""
def setup(self, obs_spec, action_spec):
super(CollectMineralShardsFeatureUnits, self).setup(obs_spec, action_spec)
if "feature_units" not in obs_spec:
raise Exception("This agent requires the feature_units observation.")
def reset(self):
super(CollectMineralShardsFeatureUnits, self).reset()
self._marine_selected = False
self._previous_mineral_xy = [-1, -1]
def step(self, obs):
super(CollectMineralShardsFeatureUnits, self).step(obs)
marines = [unit for unit in obs.observation.feature_units
if unit.alliance == _PLAYER_SELF]
if not marines:
return FUNCTIONS.no_op()
marine_unit = next((m for m in marines
if m.is_selected == self._marine_selected), marines[0])
marine_xy = [marine_unit.x, marine_unit.y]
if not marine_unit.is_selected:
# Nothing selected or the wrong marine is selected.
self._marine_selected = True
return FUNCTIONS.select_point("select", marine_xy)
if FUNCTIONS.Move_screen.id in obs.observation.available_actions:
# Find and move to the nearest mineral.
minerals = [[unit.x, unit.y] for unit in obs.observation.feature_units
if unit.alliance == _PLAYER_NEUTRAL]
if self._previous_mineral_xy in minerals:
# Don't go for the same mineral shard as other marine.
minerals.remove(self._previous_mineral_xy)
if minerals:
# Find the closest.
distances = numpy.linalg.norm(
numpy.array(minerals) - numpy.array(marine_xy), axis=1)
closest_mineral_xy = minerals[numpy.argmin(distances)]
# Swap to the other marine.
self._marine_selected = False
self._previous_mineral_xy = closest_mineral_xy
return FUNCTIONS.Move_screen("now", closest_mineral_xy)
return FUNCTIONS.no_op()
class CollectMineralShardsRaw(base_agent.BaseAgent):
"""An agent for solving CollectMineralShards with raw units and actions.
Controls the two marines independently:
- move to nearest mineral shard that wasn't the previous target
- swap marine and repeat
"""
def setup(self, obs_spec, action_spec):
super(CollectMineralShardsRaw, self).setup(obs_spec, action_spec)
if "raw_units" not in obs_spec:
raise Exception("This agent requires the raw_units observation.")
def reset(self):
super(CollectMineralShardsRaw, self).reset()
self._last_marine = None
self._previous_mineral_xy = [-1, -1]
def step(self, obs):
super(CollectMineralShardsRaw, self).step(obs)
marines = [unit for unit in obs.observation.raw_units
if unit.alliance == _PLAYER_SELF]
if not marines:
return RAW_FUNCTIONS.no_op()
marine_unit = next((m for m in marines if m.tag != self._last_marine))
marine_xy = [marine_unit.x, marine_unit.y]
minerals = [[unit.x, unit.y] for unit in obs.observation.raw_units
if unit.alliance == _PLAYER_NEUTRAL]
if self._previous_mineral_xy in minerals:
# Don't go for the same mineral shard as other marine.
minerals.remove(self._previous_mineral_xy)
if minerals:
# Find the closest.
distances = numpy.linalg.norm(
numpy.array(minerals) - numpy.array(marine_xy), axis=1)
closest_mineral_xy = minerals[numpy.argmin(distances)]
self._last_marine = marine_unit.tag
self._previous_mineral_xy = closest_mineral_xy
return RAW_FUNCTIONS.Move_pt("now", marine_unit.tag, closest_mineral_xy)
return RAW_FUNCTIONS.no_op()
class DefeatRoaches(base_agent.BaseAgent):
"""An agent specifically for solving the DefeatRoaches map."""
def step(self, obs):
super(DefeatRoaches, self).step(obs)
if FUNCTIONS.Attack_screen.id in obs.observation.available_actions:
player_relative = obs.observation.feature_screen.player_relative
roaches = _xy_locs(player_relative == _PLAYER_ENEMY)
if not roaches:
return FUNCTIONS.no_op()
# Find the roach with max y coord.
target = roaches[numpy.argmax(numpy.array(roaches)[:, 1])]
return FUNCTIONS.Attack_screen("now", target)
if FUNCTIONS.select_army.id in obs.observation.available_actions:
return FUNCTIONS.select_army("select")
return FUNCTIONS.no_op()
class DefeatRoachesRaw(base_agent.BaseAgent):
"""An agent specifically for solving DefeatRoaches using raw actions."""
def setup(self, obs_spec, action_spec):
super(DefeatRoachesRaw, self).setup(obs_spec, action_spec)
if "raw_units" not in obs_spec:
raise Exception("This agent requires the raw_units observation.")
def step(self, obs):
super(DefeatRoachesRaw, self).step(obs)
marines = [unit.tag for unit in obs.observation.raw_units
if unit.alliance == _PLAYER_SELF]
roaches = [unit for unit in obs.observation.raw_units
if unit.alliance == _PLAYER_ENEMY]
if marines and roaches:
# Find the roach with max y coord.
target = sorted(roaches, key=lambda r: r.y)[0].tag
return RAW_FUNCTIONS.Attack_unit("now", marines, target)
return FUNCTIONS.no_op()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/agents/__init__.py | pysc2/agents/__init__.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/agents/random_agent.py | pysc2/agents/random_agent.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A random agent for starcraft."""
import os
import random
import time
import numpy as np
from pysc2.agents import base_agent
from pysc2.lib import actions
class RandomAgent(base_agent.BaseAgent):
"""A random agent for starcraft."""
def __init__(self):
super(RandomAgent, self).__init__()
def step(self, obs):
super(RandomAgent, self).step(obs)
function_id = np.random.choice(obs.observation.available_actions)
args = [[np.random.randint(0, size) for size in arg.sizes]
for arg in self.action_spec.functions[function_id].args]
return actions.FunctionCall(function_id, args)
if __name__ == "__main__":
os.system("python -m pysc2.bin.agent --map Simple64 --agent_race protoss")
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/actions_test.py | pysc2/tests/actions_test.py | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that various actions do what you'd expect."""
from absl.testing import absltest
from pysc2.lib import actions
from pysc2.lib import units
from pysc2.tests import utils
def raw_ability_ids(obs):
return list(filter(None, (a.action_raw.unit_command.ability_id
for a in obs.actions)))
class ActionsTest(utils.GameReplayTestCase):
@utils.GameReplayTestCase.setup()
def test_general_attack(self):
self.create_unit(unit_type=units.Protoss.Zealot, owner=1, pos=(30, 30))
self.create_unit(unit_type=units.Protoss.Observer, owner=1, pos=(30, 30))
self.step()
obs = self.observe()
zealot = utils.get_unit(obs[0], unit_type=units.Protoss.Zealot)
observer = utils.get_unit(obs[0], unit_type=units.Protoss.Observer)
self.raw_unit_command(0, "Attack_screen", (zealot.tag, observer.tag),
(32, 32))
self.step(64)
obs = self.observe()
zealot = utils.get_unit(obs[0], unit_type=units.Protoss.Zealot)
observer = utils.get_unit(obs[0], unit_type=units.Protoss.Observer)
self.assert_point(zealot.pos, (32, 32))
self.assert_point(observer.pos, (32, 32))
self.assertEqual(
raw_ability_ids(obs[0]),
[actions.FUNCTIONS.Attack_Attack_screen.ability_id])
self.raw_unit_command(0, "Attack_screen", zealot.tag, (34, 34))
self.step(64)
obs = self.observe()
zealot = utils.get_unit(obs[0], unit_type=units.Protoss.Zealot)
observer = utils.get_unit(obs[0], unit_type=units.Protoss.Observer)
self.assert_point(zealot.pos, (34, 34))
self.assert_point(observer.pos, (32, 32))
self.assertEqual(
raw_ability_ids(obs[0]),
[actions.FUNCTIONS.Attack_Attack_screen.ability_id])
self.raw_unit_command(0, "Attack_screen", observer.tag, (34, 34))
self.step(64)
obs = self.observe()
zealot = utils.get_unit(obs[0], unit_type=units.Protoss.Zealot)
observer = utils.get_unit(obs[0], unit_type=units.Protoss.Observer)
self.assert_point(zealot.pos, (34, 34))
self.assert_point(observer.pos, (34, 34))
self.assertEqual(
raw_ability_ids(obs[0]),
[actions.FUNCTIONS.Scan_Move_screen.ability_id])
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/multi_player_env_test.py | pysc2/tests/multi_player_env_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the multiplayer environment works."""
from absl.testing import absltest
from absl.testing import parameterized
from pysc2.agents import no_op_agent
from pysc2.agents import random_agent
from pysc2.env import run_loop
from pysc2.env import sc2_env
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as common_pb
from s2clientprotocol import sc2api_pb2 as sc_pb
class TestMultiplayerEnv(parameterized.TestCase, utils.TestCase):
@parameterized.named_parameters(
("features",
sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64))),
("rgb",
sc2_env.AgentInterfaceFormat(
rgb_dimensions=sc2_env.Dimensions(screen=84, minimap=64))),
("features_and_rgb", [
sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64)),
sc2_env.AgentInterfaceFormat(
rgb_dimensions=sc2_env.Dimensions(screen=128, minimap=32))
]),
("passthrough_and_features", [
sc_pb.InterfaceOptions(
raw=True,
score=True,
feature_layer=sc_pb.SpatialCameraSetup(
resolution=common_pb.Size2DI(x=84, y=84),
minimap_resolution=common_pb.Size2DI(x=64, y=64),
width=24)),
sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64))
]),
)
def test_multi_player_env(self, agent_interface_format):
steps = 100
step_mul = 16
players = 2
if not isinstance(agent_interface_format, list):
agent_interface_format = [agent_interface_format] * players
with sc2_env.SC2Env(
map_name="Simple64",
players=[sc2_env.Agent(sc2_env.Race.random, "random"),
sc2_env.Agent(sc2_env.Race.random, "random")],
step_mul=step_mul,
game_steps_per_episode=steps * step_mul // 2,
agent_interface_format=agent_interface_format) as env:
agents = [
random_agent.RandomAgent() if isinstance(
aif, sc2_env.AgentInterfaceFormat) else no_op_agent.NoOpAgent()
for aif in agent_interface_format
]
run_loop.run_loop(agents, env, steps)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/render_test.py | pysc2/tests/render_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify that the game renders rgb pixels."""
from absl.testing import absltest
import numpy as np
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import features
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
class TestRender(utils.TestCase):
def test_render(self):
interface = sc_pb.InterfaceOptions()
interface.raw = True
interface.score = True
interface.feature_layer.width = 24
interface.feature_layer.resolution.x = 84
interface.feature_layer.resolution.y = 84
interface.feature_layer.minimap_resolution.x = 64
interface.feature_layer.minimap_resolution.y = 64
interface.feature_layer.crop_to_playable_area = True
interface.feature_layer.allow_cheating_layers = True
interface.render.resolution.x = 256
interface.render.resolution.y = 256
interface.render.minimap_resolution.x = 128
interface.render.minimap_resolution.y = 128
def or_zeros(layer, size):
if layer is not None:
return layer.astype(np.int32, copy=False)
else:
return np.zeros((size.y, size.x), dtype=np.int32)
run_config = run_configs.get()
with run_config.start() as controller:
map_inst = maps.get("Simple64")
create = sc_pb.RequestCreateGame(
realtime=False, disable_fog=False,
local_map=sc_pb.LocalMap(map_path=map_inst.path,
map_data=map_inst.data(run_config)))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer, race=sc_common.Random, difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(race=sc_common.Random, options=interface)
controller.create_game(create)
controller.join_game(join)
game_info = controller.game_info()
self.assertEqual(interface.raw, game_info.options.raw)
self.assertEqual(interface.feature_layer, game_info.options.feature_layer)
# Can fail if rendering is disabled.
self.assertEqual(interface.render, game_info.options.render)
for _ in range(50):
controller.step(8)
observation = controller.observe()
obs = observation.observation
rgb_screen = features.Feature.unpack_rgb_image(obs.render_data.map)
rgb_minimap = features.Feature.unpack_rgb_image(obs.render_data.minimap)
fl_screen = np.stack(
[or_zeros(f.unpack(obs), interface.feature_layer.resolution)
for f in features.SCREEN_FEATURES])
fl_minimap = np.stack(
[or_zeros(f.unpack(obs), interface.feature_layer.minimap_resolution)
for f in features.MINIMAP_FEATURES])
# Right shapes.
self.assertEqual(rgb_screen.shape, (256, 256, 3))
self.assertEqual(rgb_minimap.shape, (128, 128, 3))
self.assertEqual(fl_screen.shape,
(len(features.SCREEN_FEATURES), 84, 84))
self.assertEqual(fl_minimap.shape,
(len(features.MINIMAP_FEATURES), 64, 64))
# Not all black.
self.assertTrue(rgb_screen.any())
self.assertTrue(rgb_minimap.any())
self.assertTrue(fl_screen.any())
self.assertTrue(fl_minimap.any())
if observation.player_result:
break
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/step_mul_override_test.py | pysc2/tests/step_mul_override_test.py | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that stepping without observing works correctly for multiple players."""
from absl.testing import absltest
from pysc2.env import sc2_env
from pysc2.lib import actions
from pysc2.tests import utils
AGENT_INTERFACE_FORMAT = sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(screen=32, minimap=32)
)
class StepMulOverrideTest(utils.TestCase):
def test_returns_game_loop_zero_on_first_step_despite_override(self):
with sc2_env.SC2Env(
map_name="DefeatRoaches",
players=[sc2_env.Agent(sc2_env.Race.random)],
step_mul=1,
agent_interface_format=AGENT_INTERFACE_FORMAT) as env:
timestep = env.step(
actions=[actions.FUNCTIONS.no_op()],
step_mul=1234)
self.assertEqual(
timestep[0].observation.game_loop[0],
0)
def test_respects_override(self):
with sc2_env.SC2Env(
map_name="DefeatRoaches",
players=[sc2_env.Agent(sc2_env.Race.random)],
step_mul=1,
agent_interface_format=AGENT_INTERFACE_FORMAT) as env:
expected_game_loop = 0
for delta in range(10):
timestep = env.step(
actions=[actions.FUNCTIONS.no_op()],
step_mul=delta)
expected_game_loop += delta
self.assertEqual(
timestep[0].observation.game_loop[0],
expected_game_loop)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/obs_test.py | pysc2/tests/obs_test.py | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that various observations do what you'd expect."""
from absl.testing import absltest
from pysc2.lib import actions
from pysc2.lib import buffs
from pysc2.lib import features
from pysc2.lib import units
from pysc2.tests import utils
from s2clientprotocol import debug_pb2 as sc_debug
from s2clientprotocol import raw_pb2 as sc_raw
# It seems the time from issuing an action until it has an effect is 2 frames.
# It'd be nice if that was faster, and it is 1 in single-player, but in
# multi-player it seems it needs to be propagated to the host and back, which
# takes 2 steps minimum. Unfortunately this also includes camera moves.
EXPECTED_ACTION_DELAY = 2
class ObsTest(utils.GameReplayTestCase):
@utils.GameReplayTestCase.setup()
def test_hallucination(self):
self.god()
# Create some sentries.
self.create_unit(unit_type=units.Protoss.Sentry, owner=1, pos=(30, 30))
self.create_unit(unit_type=units.Protoss.Sentry, owner=2, pos=(30, 28))
self.step()
obs = self.observe()
# Give one enough energy.
tag = utils.get_unit(obs[0], unit_type=units.Protoss.Sentry, owner=1).tag
self.debug(unit_value=sc_debug.DebugSetUnitValue(
unit_value=sc_debug.DebugSetUnitValue.Energy, value=200, unit_tag=tag))
self.step()
obs = self.observe()
# Create a hallucinated archon.
self.raw_unit_command(0, "Hallucination_Archon_quick", tag)
self.step()
obs = self.observe()
# Verify the owner knows it's a hallucination, but the opponent doesn't.
p1 = utils.get_unit(obs[0], unit_type=units.Protoss.Archon)
p2 = utils.get_unit(obs[1], unit_type=units.Protoss.Archon)
self.assertTrue(p1.is_hallucination)
self.assertFalse(p2.is_hallucination)
# Create an observer so the opponent has detection.
self.create_unit(unit_type=units.Protoss.Observer, owner=2, pos=(28, 30))
self.step()
obs = self.observe()
# Verify the opponent now also knows it's a hallucination.
p1 = utils.get_unit(obs[0], unit_type=units.Protoss.Archon)
p2 = utils.get_unit(obs[1], unit_type=units.Protoss.Archon)
self.assertTrue(p1.is_hallucination)
self.assertTrue(p2.is_hallucination)
@utils.GameReplayTestCase.setup(show_cloaked=False)
def test_hide_cloaked(self):
self.assertFalse(self._info.options.show_cloaked)
self.god()
self.move_camera(32, 32)
# Create some units. One cloaked, one to see it without detection.
self.create_unit(unit_type=units.Protoss.DarkTemplar, owner=1, pos=(30, 30))
self.create_unit(unit_type=units.Protoss.Sentry, owner=2, pos=(28, 30))
self.step(16)
obs = self.observe()
# Verify both can see it, but that only the owner knows details.
p1 = utils.get_unit(obs[0], unit_type=units.Protoss.DarkTemplar)
p2 = utils.get_unit(obs[1], unit_type=units.Protoss.DarkTemplar)
self.assert_unit(p1, display_type=sc_raw.Visible, health=40, shield=80,
cloak=sc_raw.CloakedAllied)
self.assertIsNone(p2)
screen1 = self._features.transform_obs(obs[0])["feature_screen"]
screen2 = self._features.transform_obs(obs[1])["feature_screen"]
dt = utils.xy_locs(screen1.unit_type == units.Protoss.DarkTemplar)[0]
self.assert_layers(screen1, dt, unit_type=units.Protoss.DarkTemplar,
unit_hit_points=40, unit_shields=80, cloaked=1)
self.assert_layers(screen2, dt, unit_type=0,
unit_hit_points=0, unit_shields=0, cloaked=0)
# Create an observer so the opponent has detection.
self.create_unit(unit_type=units.Protoss.Observer, owner=2, pos=(28, 28))
self.step(16) # It takes a few frames for the observer to detect.
obs = self.observe()
# Verify both can see it, with the same details
p1 = utils.get_unit(obs[0], unit_type=units.Protoss.DarkTemplar)
p2 = utils.get_unit(obs[1], unit_type=units.Protoss.DarkTemplar)
self.assert_unit(p1, display_type=sc_raw.Visible, health=40, shield=80,
cloak=sc_raw.CloakedAllied)
self.assert_unit(p2, display_type=sc_raw.Visible, health=40, shield=80,
cloak=sc_raw.CloakedDetected)
screen1 = self._features.transform_obs(obs[0])["feature_screen"]
screen2 = self._features.transform_obs(obs[1])["feature_screen"]
dt = utils.xy_locs(screen1.unit_type == units.Protoss.DarkTemplar)[0]
self.assert_layers(screen1, dt, unit_type=units.Protoss.DarkTemplar,
unit_hit_points=40, unit_shields=80, cloaked=1)
self.assert_layers(screen2, dt, unit_type=units.Protoss.DarkTemplar,
unit_hit_points=40, unit_shields=80, cloaked=1)
@utils.GameReplayTestCase.setup()
def test_show_cloaked(self):
self.assertTrue(self._info.options.show_cloaked)
self.god()
self.move_camera(32, 32)
# Create some units. One cloaked, one to see it without detection.
self.create_unit(unit_type=units.Protoss.DarkTemplar, owner=1, pos=(30, 30))
self.create_unit(unit_type=units.Protoss.Sentry, owner=2, pos=(28, 30))
self.step(16)
obs = self.observe()
# Verify both can see it, but that only the owner knows details.
p1 = utils.get_unit(obs[0], unit_type=units.Protoss.DarkTemplar)
p2 = utils.get_unit(obs[1], unit_type=units.Protoss.DarkTemplar)
self.assert_unit(p1, display_type=sc_raw.Visible, health=40, shield=80,
cloak=sc_raw.CloakedAllied)
self.assert_unit(p2, display_type=sc_raw.Hidden, health=0, shield=0,
cloak=sc_raw.Cloaked)
screen1 = self._features.transform_obs(obs[0])["feature_screen"]
screen2 = self._features.transform_obs(obs[1])["feature_screen"]
dt = utils.xy_locs(screen1.unit_type == units.Protoss.DarkTemplar)[0]
self.assert_layers(screen1, dt, unit_type=units.Protoss.DarkTemplar,
unit_hit_points=40, unit_shields=80, cloaked=1)
self.assert_layers(screen2, dt, unit_type=units.Protoss.DarkTemplar,
unit_hit_points=0, unit_shields=0, cloaked=1)
# Create an observer so the opponent has detection.
self.create_unit(unit_type=units.Protoss.Observer, owner=2, pos=(28, 28))
self.step(16) # It takes a few frames for the observer to detect.
obs = self.observe()
# Verify both can see it, with the same details
p1 = utils.get_unit(obs[0], unit_type=units.Protoss.DarkTemplar)
p2 = utils.get_unit(obs[1], unit_type=units.Protoss.DarkTemplar)
self.assert_unit(p1, display_type=sc_raw.Visible, health=40, shield=80,
cloak=sc_raw.CloakedAllied)
self.assert_unit(p2, display_type=sc_raw.Visible, health=40, shield=80,
cloak=sc_raw.CloakedDetected)
screen1 = self._features.transform_obs(obs[0])["feature_screen"]
screen2 = self._features.transform_obs(obs[1])["feature_screen"]
dt = utils.xy_locs(screen1.unit_type == units.Protoss.DarkTemplar)[0]
self.assert_layers(screen1, dt, unit_type=units.Protoss.DarkTemplar,
unit_hit_points=40, unit_shields=80, cloaked=1)
self.assert_layers(screen2, dt, unit_type=units.Protoss.DarkTemplar,
unit_hit_points=40, unit_shields=80, cloaked=1)
@utils.GameReplayTestCase.setup()
def test_pos(self):
self.create_unit(unit_type=units.Protoss.Archon, owner=1, pos=(20, 30))
self.create_unit(unit_type=units.Protoss.Observer, owner=1, pos=(40, 30))
self.step()
obs = self.observe()
archon = utils.get_unit(obs[0], unit_type=units.Protoss.Archon)
observer = utils.get_unit(obs[0], unit_type=units.Protoss.Observer)
self.assert_point(archon.pos, (20, 30))
self.assert_point(observer.pos, (40, 30))
self.assertLess(archon.pos.z, observer.pos.z) # The observer flies.
self.assertGreater(archon.radius, observer.radius)
# Move them towards the center, make sure they move.
self.raw_unit_command(0, "Move_screen", (archon.tag, observer.tag),
(30, 25))
self.step(40)
obs2 = self.observe()
archon2 = utils.get_unit(obs2[0], unit_type=units.Protoss.Archon)
observer2 = utils.get_unit(obs2[0], unit_type=units.Protoss.Observer)
self.assertGreater(archon2.pos.x, 20)
self.assertLess(observer2.pos.x, 40)
self.assertLess(archon2.pos.z, observer2.pos.z)
@utils.GameReplayTestCase.setup()
def test_fog(self):
obs = self.observe()
def assert_visible(unit, display_type, alliance, cloak):
self.assert_unit(unit, display_type=display_type, alliance=alliance,
cloak=cloak)
self.create_unit(unit_type=units.Protoss.Sentry, owner=1, pos=(30, 32))
self.create_unit(unit_type=units.Protoss.DarkTemplar, owner=1, pos=(32, 32))
self.step()
obs = self.observe()
assert_visible(utils.get_unit(obs[0], unit_type=units.Protoss.Sentry),
sc_raw.Visible, sc_raw.Self, sc_raw.NotCloaked)
assert_visible(utils.get_unit(obs[0], unit_type=units.Protoss.DarkTemplar),
sc_raw.Visible, sc_raw.Self, sc_raw.CloakedAllied)
self.assertIsNone(utils.get_unit(obs[1], unit_type=units.Protoss.Sentry))
self.assertIsNone(utils.get_unit(obs[1],
unit_type=units.Protoss.DarkTemplar))
obs = self.observe(disable_fog=True)
assert_visible(utils.get_unit(obs[0], unit_type=units.Protoss.Sentry),
sc_raw.Visible, sc_raw.Self, sc_raw.NotCloaked)
assert_visible(utils.get_unit(obs[0], unit_type=units.Protoss.DarkTemplar),
sc_raw.Visible, sc_raw.Self, sc_raw.CloakedAllied)
assert_visible(utils.get_unit(obs[1], unit_type=units.Protoss.Sentry),
sc_raw.Hidden, sc_raw.Enemy, sc_raw.CloakedUnknown)
assert_visible(utils.get_unit(obs[1], unit_type=units.Protoss.DarkTemplar),
sc_raw.Hidden, sc_raw.Enemy, sc_raw.CloakedUnknown)
@utils.GameReplayTestCase.setup()
def test_effects(self):
def get_effect_proto(obs, effect_id):
for e in obs.observation.raw_data.effects:
if e.effect_id == effect_id:
return e
return None
def get_effect_obs(obs, effect_id):
for e in obs:
if e.effect == effect_id:
return e
return None
self.god()
self.move_camera(32, 32)
# Create some sentries.
self.create_unit(unit_type=units.Protoss.Sentry, owner=1, pos=(30, 30))
self.create_unit(unit_type=units.Protoss.Stalker, owner=1, pos=(28, 30))
self.create_unit(unit_type=units.Protoss.Phoenix, owner=2, pos=(30, 28))
self.step()
obs = self.observe()
# Give enough energy.
sentry = utils.get_unit(obs[0], unit_type=units.Protoss.Sentry)
stalker = utils.get_unit(obs[0], unit_type=units.Protoss.Stalker)
pheonix = utils.get_unit(obs[0], unit_type=units.Protoss.Phoenix)
self.set_energy(sentry.tag, 200)
self.set_energy(pheonix.tag, 200)
self.step()
obs = self.observe()
self.raw_unit_command(0, "Effect_GuardianShield_quick", sentry.tag)
self.step(16)
obs = self.observe()
self.assertIn(buffs.Buffs.GuardianShield,
utils.get_unit(obs[0], tag=sentry.tag).buff_ids)
self.assertIn(buffs.Buffs.GuardianShield,
utils.get_unit(obs[1], tag=sentry.tag).buff_ids)
self.assertIn(buffs.Buffs.GuardianShield,
utils.get_unit(obs[0], tag=stalker.tag).buff_ids)
self.assertIn(buffs.Buffs.GuardianShield,
utils.get_unit(obs[1], tag=stalker.tag).buff_ids)
self.assertNotIn(buffs.Buffs.GuardianShield,
utils.get_unit(obs[0], tag=pheonix.tag).buff_ids)
self.assertNotIn(buffs.Buffs.GuardianShield,
utils.get_unit(obs[1], tag=pheonix.tag).buff_ids)
# Both players should see the shield.
e = get_effect_proto(obs[0], features.Effects.GuardianShield)
self.assertIsNotNone(e)
self.assert_point(e.pos[0], (30, 30))
self.assertEqual(e.alliance, sc_raw.Self)
self.assertEqual(e.owner, 1)
self.assertGreater(e.radius, 3)
e = get_effect_proto(obs[1], features.Effects.GuardianShield)
self.assertIsNotNone(e)
self.assert_point(e.pos[0], (30, 30))
self.assertEqual(e.alliance, sc_raw.Enemy)
self.assertEqual(e.owner, 1)
self.assertGreater(e.radius, 3)
# Should show up on the feature layers too.
transformed_obs1 = self._features.transform_obs(obs[0])
transformed_obs2 = self._features.transform_obs(obs[1])
screen1 = transformed_obs1["feature_screen"]
screen2 = transformed_obs2["feature_screen"]
sentry_pos = utils.xy_locs(screen1.unit_type == units.Protoss.Sentry)[0]
self.assert_layers(screen1, sentry_pos, unit_type=units.Protoss.Sentry,
effects=features.Effects.GuardianShield,
buffs=buffs.Buffs.GuardianShield)
self.assert_layers(screen2, sentry_pos, unit_type=units.Protoss.Sentry,
effects=features.Effects.GuardianShield,
buffs=buffs.Buffs.GuardianShield)
phoenix_pos = utils.xy_locs(screen1.unit_type == units.Protoss.Phoenix)[0]
self.assert_layers(screen1, phoenix_pos, unit_type=units.Protoss.Phoenix,
effects=features.Effects.GuardianShield, buffs=0)
self.assert_layers(screen2, phoenix_pos, unit_type=units.Protoss.Phoenix,
effects=features.Effects.GuardianShield, buffs=0)
# Also in the raw_effects.
raw1 = transformed_obs1["raw_effects"]
e = get_effect_obs(raw1, features.Effects.GuardianShield)
self.assertIsNotNone(e)
# Not located at (30, 30) due to map shape and minimap coords.
self.assertGreater(e.x, 20)
self.assertGreater(e.y, 20)
self.assertEqual(e.alliance, sc_raw.Self)
self.assertEqual(e.owner, 1)
self.assertGreater(e.radius, 3)
self.raw_unit_command(1, "Effect_GravitonBeam_screen", pheonix.tag,
target=stalker.tag)
self.step(32)
obs = self.observe()
self.assertIn(buffs.Buffs.GravitonBeam,
utils.get_unit(obs[0], tag=stalker.tag).buff_ids)
self.assertIn(buffs.Buffs.GravitonBeam,
utils.get_unit(obs[1], tag=stalker.tag).buff_ids)
self.assertNotIn(buffs.Buffs.GravitonBeam,
utils.get_unit(obs[0], tag=sentry.tag).buff_ids)
self.assertNotIn(buffs.Buffs.GravitonBeam,
utils.get_unit(obs[1], tag=sentry.tag).buff_ids)
self.assertNotIn(buffs.Buffs.GravitonBeam,
utils.get_unit(obs[0], tag=pheonix.tag).buff_ids)
self.assertNotIn(buffs.Buffs.GravitonBeam,
utils.get_unit(obs[1], tag=pheonix.tag).buff_ids)
@utils.GameReplayTestCase.setup()
def test_active(self):
obs = self.observe()
# P1 can see P2.
self.create_unit(
unit_type=units.Protoss.Observer, owner=1,
pos=utils.get_unit(obs[1], unit_type=units.Protoss.Nexus).pos)
self.step(32) # Make sure visibility updates.
obs = self.observe()
for i, o in enumerate(obs):
# Probes are active gathering
for u in utils.get_units(o, unit_type=units.Protoss.Probe).values():
self.assert_unit(u, display_type=sc_raw.Visible, is_active=True)
# Own Nexus is idle
nexus = utils.get_unit(o, unit_type=units.Protoss.Nexus, owner=i+1)
self.assert_unit(nexus, display_type=sc_raw.Visible, is_active=False)
self.assertEmpty(nexus.orders)
# Give it an action.
self.raw_unit_command(i, "Train_Probe_quick", nexus.tag)
# P1 can tell P2's Nexus is idle.
nexus = utils.get_unit(obs[0], unit_type=units.Protoss.Nexus, owner=2)
self.assert_unit(nexus, display_type=sc_raw.Visible, is_active=False)
# Observer is idle.
self.assert_unit(utils.get_unit(obs[0], unit_type=units.Protoss.Observer),
display_type=sc_raw.Visible, is_active=False)
self.assert_unit(utils.get_unit(obs[1], unit_type=units.Protoss.Observer),
display_type=sc_raw.Hidden, is_active=False)
self.step(32)
obs = self.observe()
# All Nexus are now active
nexus0 = utils.get_unit(obs[0], unit_type=units.Protoss.Nexus, owner=1)
nexus1 = utils.get_unit(obs[0], unit_type=units.Protoss.Nexus, owner=2)
nexus2 = utils.get_unit(obs[1], unit_type=units.Protoss.Nexus)
self.assert_unit(nexus0, display_type=sc_raw.Visible, is_active=True)
self.assert_unit(nexus1, display_type=sc_raw.Visible, is_active=True)
self.assert_unit(nexus2, display_type=sc_raw.Visible, is_active=True)
self.assertLen(nexus0.orders, 1)
self.assertLen(nexus2.orders, 1)
self.assertEmpty(nexus1.orders) # Can't see opponent's orders
# Go back to a snapshot
self.kill_unit(utils.get_unit(obs[0], unit_type=units.Protoss.Observer).tag)
self.step(100) # Make sure visibility updates.
obs = self.observe()
self.assertIsNone(utils.get_unit(obs[0], unit_type=units.Protoss.Observer))
# Own Nexus is now active, snapshot isn't.
nexus0 = utils.get_unit(obs[0], unit_type=units.Protoss.Nexus, owner=1)
nexus1 = utils.get_unit(obs[0], unit_type=units.Protoss.Nexus, owner=2)
nexus2 = utils.get_unit(obs[1], unit_type=units.Protoss.Nexus)
self.assert_unit(nexus0, display_type=sc_raw.Visible, is_active=True)
self.assert_unit(nexus1, display_type=sc_raw.Snapshot, is_active=False)
self.assert_unit(nexus2, display_type=sc_raw.Visible, is_active=True)
self.assertLen(nexus0.orders, 1)
self.assertLen(nexus2.orders, 1)
self.assertEmpty(nexus1.orders) # Can't see opponent's orders
@utils.GameReplayTestCase.setup(disable_fog=True)
def test_disable_fog(self):
obs = self.observe()
for i, o in enumerate(obs):
# Probes are active gathering
for u in utils.get_units(o, unit_type=units.Protoss.Probe).values():
self.assert_unit(u, display_type=sc_raw.Visible, is_active=True)
# All Nexus are idle.
own = utils.get_unit(o, unit_type=units.Protoss.Nexus, owner=i+1)
other = utils.get_unit(o, unit_type=units.Protoss.Nexus, owner=2-i)
self.assert_unit(own, display_type=sc_raw.Visible, is_active=False)
self.assert_unit(other, display_type=sc_raw.Visible, is_active=False)
self.assertEmpty(own.orders)
self.assertEmpty(other.orders)
# Give it an action.
self.raw_unit_command(i, "Train_Probe_quick", own.tag)
self.step(32)
obs = self.observe()
# All Nexus are active.
for i, o in enumerate(obs):
own = utils.get_unit(o, unit_type=units.Protoss.Nexus, owner=i+1)
other = utils.get_unit(o, unit_type=units.Protoss.Nexus, owner=2-i)
self.assert_unit(own, display_type=sc_raw.Visible, is_active=True)
self.assert_unit(other, display_type=sc_raw.Visible, is_active=True)
self.assertLen(own.orders, 1)
self.assertEmpty(other.orders)
@utils.GameReplayTestCase.setup()
def test_action_delay(self):
self.observe()
self.create_unit(unit_type=units.Protoss.Zealot, owner=1, pos=(32, 32))
self.step(16)
obs1 = self.observe()
self.assertLen(obs1[0].actions, 0)
zealot1 = utils.get_unit(obs1[0], unit_type=units.Protoss.Zealot, owner=1)
self.assertLen(zealot1.orders, 0)
self.raw_unit_command(0, "Move_screen", zealot1.tag, (30, 30))
# If the delay is taken down to 1, remove this first step of verifying the
# actions length is 0.
self.assertEqual(EXPECTED_ACTION_DELAY, 2)
self.step(1)
obs2 = self.observe()
self.assertLen(obs2[0].action_errors, 0)
self.assertLen(obs2[0].actions, 0)
self.step(1)
obs2 = self.observe()
self.assertLen(obs2[0].action_errors, 0)
self.assertGreaterEqual(len(obs2[0].actions), 1)
for action in obs2[0].actions:
if action.HasField("action_raw"):
break
else:
self.assertFalse("No raw action found")
self.assertEqual(action.game_loop, obs1[0].observation.game_loop+1) # pylint: disable=undefined-loop-variable
unit_command = action.action_raw.unit_command # pylint: disable=undefined-loop-variable
self.assertEqual(unit_command.ability_id,
actions.FUNCTIONS.Move_Move_screen.ability_id)
self.assert_point(unit_command.target_world_space_pos, (30, 30))
self.assertEqual(unit_command.unit_tags[0], zealot1.tag)
zealot2 = utils.get_unit(obs2[0], unit_type=units.Protoss.Zealot, owner=1)
self.assertLen(zealot2.orders, 1)
self.assertEqual(zealot2.orders[0].ability_id,
actions.FUNCTIONS.Move_Move_screen.ability_id)
self.assert_point(zealot2.orders[0].target_world_space_pos, (30, 30))
@utils.GameReplayTestCase.setup()
def test_camera_movement_delay(self):
obs1 = self.observe()
screen1 = self._features.transform_obs(obs1[0])["feature_screen"]
nexus1 = utils.xy_locs(screen1.unit_type == units.Protoss.Nexus)
self.step(1)
obs2 = self.observe()
screen2 = self._features.transform_obs(obs2[0])["feature_screen"]
nexus2 = utils.xy_locs(screen2.unit_type == units.Protoss.Nexus)
self.assertEqual(nexus1, nexus2) # Same place.
loc = obs1[0].observation.raw_data.player.camera
self.move_camera(loc.x + 3, loc.y + 3)
self.step(EXPECTED_ACTION_DELAY + 1)
obs3 = self.observe()
screen3 = self._features.transform_obs(obs3[0])["feature_screen"]
nexus3 = utils.xy_locs(screen3.unit_type == units.Protoss.Nexus)
self.assertNotEqual(nexus1, nexus3) # Different location due to camera.
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/multi_player_test.py | pysc2/tests/multi_player_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that multiplayer works independently of the SC2Env."""
import os
from absl import logging
from absl.testing import absltest
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import point
from pysc2.lib import portspicker
from pysc2.lib import run_parallel
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
def print_stage(stage):
logging.info((" %s " % stage).center(80, "-"))
class TestMultiplayer(utils.TestCase):
def test_multi_player(self):
players = 2
run_config = run_configs.get()
parallel = run_parallel.RunParallel()
map_inst = maps.get("Simple64")
screen_size_px = point.Point(64, 64)
minimap_size_px = point.Point(32, 32)
interface = sc_pb.InterfaceOptions()
screen_size_px.assign_to(interface.feature_layer.resolution)
minimap_size_px.assign_to(interface.feature_layer.minimap_resolution)
# Reserve a whole bunch of ports for the weird multiplayer implementation.
ports = portspicker.pick_unused_ports(players * 2)
logging.info("Valid Ports: %s", ports)
# Actually launch the game processes.
print_stage("start")
sc2_procs = [run_config.start(extra_ports=ports, want_rgb=False)
for _ in range(players)]
controllers = [p.controller for p in sc2_procs]
try:
# Save the maps so they can access it.
map_path = os.path.basename(map_inst.path)
print_stage("save_map")
for c in controllers: # Skip parallel due to a race condition on Windows.
c.save_map(map_path, map_inst.data(run_config))
# Create the create request.
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_path))
for _ in range(players):
create.player_setup.add(type=sc_pb.Participant)
# Create the join request.
join = sc_pb.RequestJoinGame(race=sc_common.Random, options=interface)
join.shared_port = 0 # unused
join.server_ports.game_port = ports[0]
join.server_ports.base_port = ports[1]
join.client_ports.add(game_port=ports[2], base_port=ports[3])
# Play a few short games.
for _ in range(2): # 2 episodes
# Create and Join
print_stage("create")
controllers[0].create_game(create)
print_stage("join")
parallel.run((c.join_game, join) for c in controllers)
print_stage("run")
for game_loop in range(1, 10): # steps per episode
# Step the game
parallel.run(c.step for c in controllers)
# Observe
obs = parallel.run(c.observe for c in controllers)
for p_id, o in enumerate(obs):
self.assertEqual(o.observation.game_loop, game_loop)
self.assertEqual(o.observation.player_common.player_id, p_id + 1)
# Act
actions = [sc_pb.Action() for _ in range(players)]
for action in actions:
pt = (point.Point.unit_rand() * minimap_size_px).floor()
pt.assign_to(action.action_feature_layer.camera_move.center_minimap)
parallel.run((c.act, a) for c, a in zip(controllers, actions))
# Done this game.
print_stage("leave")
parallel.run(c.leave for c in controllers)
finally:
print_stage("quit")
# Done, shut down. Don't depend on parallel since it might be broken.
for c in controllers:
c.quit()
for p in sc2_procs:
p.close()
portspicker.return_ports(ports)
parallel.shutdown()
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/protocol_error_test.py | pysc2/tests/protocol_error_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify that we blow up if SC2 thinks we did something wrong."""
from absl.testing import absltest
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import protocol
from pysc2.lib import remote_controller
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
class TestProtocolError(utils.TestCase):
"""Verify that we blow up if SC2 thinks we did something wrong."""
def test_error(self):
with run_configs.get().start(want_rgb=False) as controller:
with self.assertRaises(remote_controller.RequestError):
controller.create_game(sc_pb.RequestCreateGame()) # Missing map, etc.
with self.assertRaises(protocol.ProtocolError):
controller.join_game(sc_pb.RequestJoinGame()) # No game to join.
def test_replay_a_replay(self):
run_config = run_configs.get()
with run_config.start(want_rgb=False) as controller:
map_inst = maps.get("Flat64")
map_data = map_inst.data(run_config)
interface = sc_pb.InterfaceOptions(raw=True)
# Play a quick game to generate a replay.
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(
map_path=map_inst.path, map_data=map_data))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(type=sc_pb.Computer, race=sc_common.Terran,
difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(race=sc_common.Terran, options=interface)
controller.create_game(create)
controller.join_game(join)
controller.step(100)
obs = controller.observe()
replay_data = controller.save_replay()
# Run through the replay verifying that it finishes but wasn't recording
# a replay.
start_replay = sc_pb.RequestStartReplay(
replay_data=replay_data,
map_data=map_data,
options=interface,
observed_player_id=1)
controller.start_replay(start_replay)
controller.step(1000)
obs2 = controller.observe()
self.assertEqual(obs.observation.game_loop, obs2.observation.game_loop)
with self.assertRaises(protocol.ProtocolError):
controller.save_replay()
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/ping_test.py | pysc2/tests/ping_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark the ping rate of SC2."""
from absl.testing import absltest
from pysc2 import run_configs
from pysc2.lib import stopwatch
from pysc2.tests import utils
class TestPing(utils.TestCase):
def test_ping(self):
count = 100
with run_configs.get().start(want_rgb=False) as controller:
with stopwatch.sw("first"):
controller.ping()
for _ in range(count):
controller.ping()
self.assertEqual(stopwatch.sw["ping"].num, count)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/utils.py | pysc2/tests/utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test tools."""
import functools
from absl import logging
from absl.testing import absltest
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point
from pysc2.lib import portspicker
from pysc2.lib import run_parallel
from pysc2.lib import stopwatch
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import debug_pb2 as sc_debug
from s2clientprotocol import error_pb2 as sc_error
from s2clientprotocol import raw_pb2 as sc_raw
from s2clientprotocol import sc2api_pb2 as sc_pb
class TestCase(absltest.TestCase):
"""A test base class that enables stopwatch profiling."""
def setUp(self):
super(TestCase, self).setUp()
stopwatch.sw.clear()
stopwatch.sw.enable()
def tearDown(self):
super(TestCase, self).tearDown()
s = str(stopwatch.sw)
if s:
logging.info("Stop watch profile:\n%s", s)
stopwatch.sw.disable()
def get_units(obs, filter_fn=None, owner=None, unit_type=None, tag=None):
"""Return a dict of units that match the filter."""
if unit_type and not isinstance(unit_type, (list, tuple)):
unit_type = (unit_type,)
out = {}
for u in obs.observation.raw_data.units:
if ((filter_fn is None or filter_fn(u)) and
(owner is None or u.owner == owner) and
(unit_type is None or u.unit_type in unit_type) and
(tag is None or u.tag == tag)):
out[u.tag] = u
return out
def get_unit(*args, **kwargs):
"""Return the first unit that matches, or None."""
try:
return next(iter(get_units(*args, **kwargs).values()))
except StopIteration:
return None
def xy_locs(mask):
"""Mask should be a set of bools from comparison with a feature layer."""
ys, xs = mask.nonzero()
return [point.Point(x, y) for x, y in zip(xs, ys)]
def only_in_game(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
if self.in_game: # pytype: disable=attribute-error
return func(self, *args, **kwargs)
return decorator
class GameReplayTestCase(TestCase):
"""Tests that run through a game, then verify it still works in a replay."""
@staticmethod
def setup(**kwargs):
"""A decorator to replace unittest.setUp so it can take args."""
def decorator(func): # pylint: disable=missing-docstring
@functools.wraps(func)
def _setup(self): # pylint: disable=missing-docstring
def test_in_game():
print((" %s: Starting game " % func.__name__).center(80, "-"))
self.start_game(**kwargs)
func(self)
def test_in_replay():
self.start_replay()
print((" %s: Starting replay " % func.__name__).center(80, "-"))
func(self)
try:
test_in_game()
test_in_replay()
finally:
self.close()
return _setup
return decorator
def start_game(self, show_cloaked=True, disable_fog=False, players=2):
"""Start a multiplayer game with options."""
self._disable_fog = disable_fog
run_config = run_configs.get()
self._parallel = run_parallel.RunParallel() # Needed for multiplayer.
map_inst = maps.get("Flat64")
self._map_data = map_inst.data(run_config)
self._ports = portspicker.pick_unused_ports(4) if players == 2 else []
self._sc2_procs = [run_config.start(extra_ports=self._ports, want_rgb=False)
for _ in range(players)]
self._controllers = [p.controller for p in self._sc2_procs]
if players == 2:
for c in self._controllers: # Serial due to a race condition on Windows.
c.save_map(map_inst.path, self._map_data)
self._interface = sc_pb.InterfaceOptions()
self._interface.raw = True
self._interface.raw_crop_to_playable_area = True
self._interface.show_cloaked = show_cloaked
self._interface.score = False
self._interface.feature_layer.width = 24
self._interface.feature_layer.resolution.x = 64
self._interface.feature_layer.resolution.y = 64
self._interface.feature_layer.minimap_resolution.x = 64
self._interface.feature_layer.minimap_resolution.y = 64
create = sc_pb.RequestCreateGame(
random_seed=1, disable_fog=self._disable_fog,
local_map=sc_pb.LocalMap(map_path=map_inst.path))
for _ in range(players):
create.player_setup.add(type=sc_pb.Participant)
if players == 1:
create.local_map.map_data = self._map_data
create.player_setup.add(type=sc_pb.Computer, race=sc_common.Random,
difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(race=sc_common.Protoss,
options=self._interface)
if players == 2:
join.shared_port = 0 # unused
join.server_ports.game_port = self._ports[0]
join.server_ports.base_port = self._ports[1]
join.client_ports.add(game_port=self._ports[2],
base_port=self._ports[3])
self._controllers[0].create_game(create)
self._parallel.run((c.join_game, join) for c in self._controllers)
self._info = self._controllers[0].game_info()
self._features = features.features_from_game_info(
self._info, use_raw_units=True)
self._map_size = point.Point.build(self._info.start_raw.map_size)
print("Map size:", self._map_size)
self.in_game = True
self.step() # Get into the game properly.
def start_replay(self):
"""Switch from the game to a replay."""
self.step(300)
replay_data = self._controllers[0].save_replay()
self._parallel.run(c.leave for c in self._controllers)
for player_id, controller in enumerate(self._controllers):
controller.start_replay(sc_pb.RequestStartReplay(
replay_data=replay_data,
map_data=self._map_data,
options=self._interface,
disable_fog=self._disable_fog,
observed_player_id=player_id+1))
self.in_game = False
self.step() # Get into the game properly.
def close(self): # Instead of tearDown.
"""Shut down the SC2 instances."""
# Don't use parallel since it might be broken by an exception.
if hasattr(self, "_controllers") and self._controllers:
for c in self._controllers:
c.quit()
self._controllers = None
if hasattr(self, "_sc2_procs") and self._sc2_procs:
for p in self._sc2_procs:
p.close()
self._sc2_procs = None
if hasattr(self, "_ports") and self._ports:
portspicker.return_ports(self._ports)
self._ports = None
if hasattr(self, "_parallel") and self._parallel is not None:
self._parallel.shutdown()
self._parallel = None
def step(self, count=4):
return self._parallel.run((c.step, count) for c in self._controllers)
def observe(self, disable_fog=False):
return self._parallel.run((c.observe, disable_fog) # pytype: disable=attribute-error
for c in self._controllers) # pytype: disable=attribute-error
@only_in_game
def move_camera(self, x, y):
action = sc_pb.Action()
action.action_raw.camera_move.center_world_space.x = x
action.action_raw.camera_move.center_world_space.y = y
return self._parallel.run((c.act, action) for c in self._controllers) # pytype: disable=attribute-error
@only_in_game
def raw_unit_command(self, player, ability_id, unit_tags, pos=None,
target=None):
"""Issue a raw unit command."""
if isinstance(ability_id, str):
ability_id = actions.FUNCTIONS[ability_id].ability_id
action = sc_pb.Action()
cmd = action.action_raw.unit_command
cmd.ability_id = ability_id
if isinstance(unit_tags, (list, tuple)):
cmd.unit_tags.extend(unit_tags)
else:
cmd.unit_tags.append(unit_tags)
if pos:
cmd.target_world_space_pos.x = pos[0]
cmd.target_world_space_pos.y = pos[1]
elif target:
cmd.target_unit_tag = target
response = self._controllers[player].act(action) # pytype: disable=attribute-error
for result in response.result:
self.assertEqual(result, sc_error.Success)
@only_in_game
def debug(self, player=0, **kwargs):
self._controllers[player].debug([sc_debug.DebugCommand(**kwargs)]) # pytype: disable=attribute-error
def god(self):
"""Stop the units from killing each other so we can observe them."""
self.debug(0, game_state=sc_debug.god)
self.debug(1, game_state=sc_debug.god)
def create_unit(self, unit_type, owner, pos, quantity=1):
if isinstance(pos, tuple):
pos = sc_common.Point2D(x=pos[0], y=pos[1])
elif isinstance(pos, sc_common.Point):
pos = sc_common.Point2D(x=pos.x, y=pos.y)
return self.debug(create_unit=sc_debug.DebugCreateUnit(
unit_type=unit_type, owner=owner, pos=pos, quantity=quantity))
def kill_unit(self, unit_tags):
if not isinstance(unit_tags, (list, tuple)):
unit_tags = [unit_tags]
return self.debug(kill_unit=sc_debug.DebugKillUnit(tag=unit_tags))
def set_energy(self, tag, energy):
self.debug(unit_value=sc_debug.DebugSetUnitValue(
unit_value=sc_debug.DebugSetUnitValue.Energy, value=energy,
unit_tag=tag))
def assert_point(self, proto_pos, pos):
self.assertAlmostEqual(proto_pos.x, pos[0])
self.assertAlmostEqual(proto_pos.y, pos[1])
def assert_layers(self, layers, pos, **kwargs):
for k, v in sorted(kwargs.items()):
self.assertEqual(layers[k, pos.y, pos.x], v,
msg="%s[%s, %s]: expected: %s, got: %s" % (
k, pos.y, pos.x, v, layers[k, pos.y, pos.x]))
def assert_unit(self, unit, **kwargs):
self.assertTrue(unit)
self.assertIsInstance(unit, sc_raw.Unit)
for k, v in sorted(kwargs.items()):
if k == "pos":
self.assert_point(unit.pos, v)
else:
self.assertEqual(getattr(unit, k), v,
msg="%s: expected: %s, got: %s\n%s" % (
k, v, getattr(unit, k), unit))
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/observer_test.py | pysc2/tests/observer_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that two built in bots can be watched by an observer."""
from absl.testing import absltest
from pysc2 import maps
from pysc2 import run_configs
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
class TestObserver(utils.TestCase):
def test_observer(self):
run_config = run_configs.get()
map_inst = maps.get("Simple64")
with run_config.start(want_rgb=False) as controller:
create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap(
map_path=map_inst.path, map_data=map_inst.data(run_config)))
create.player_setup.add(
type=sc_pb.Computer, race=sc_common.Random, difficulty=sc_pb.VeryEasy)
create.player_setup.add(
type=sc_pb.Computer, race=sc_common.Random, difficulty=sc_pb.VeryHard)
create.player_setup.add(type=sc_pb.Observer)
controller.create_game(create)
join = sc_pb.RequestJoinGame(
options=sc_pb.InterfaceOptions(), # cheap observations
observed_player_id=0)
controller.join_game(join)
outcome = False
for _ in range(60 * 60): # 60 minutes should be plenty.
controller.step(16)
obs = controller.observe()
if obs.player_result:
print("Outcome after %s steps (%0.1f game minutes):" % (
obs.observation.game_loop, obs.observation.game_loop / (16 * 60)))
for r in obs.player_result:
print("Player %s: %s" % (r.player_id, sc_pb.Result.Name(r.result)))
outcome = True
break
self.assertTrue(outcome)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/general_actions_test.py | pysc2/tests/general_actions_test.py | #!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify that the general ids in stable ids match what we expect."""
from absl.testing import absltest
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import actions
from pysc2.tests import utils
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
class TestGeneralActions(utils.TestCase):
"""Verify that the general ids in stable ids match what we expect."""
def test_general_actions(self):
run_config = run_configs.get()
with run_config.start(want_rgb=False) as controller:
map_inst = maps.get("Simple64")
create = sc_pb.RequestCreateGame(
realtime=False, disable_fog=False,
local_map=sc_pb.LocalMap(map_path=map_inst.path,
map_data=map_inst.data(run_config)))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer, race=sc_common.Random, difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(race=sc_common.Random,
options=sc_pb.InterfaceOptions(raw=True))
controller.create_game(create)
controller.join_game(join)
abilities = controller.data().abilities
errors = []
for f in actions.FUNCTIONS:
if abilities[f.ability_id].remaps_to_ability_id != f.general_id:
errors.append("FUNCTIONS %s/%s has abilitiy %s, general %s, expected "
"general %s" % (
f.id, f.name, f.ability_id, f.general_id,
abilities[f.ability_id].remaps_to_ability_id))
for f in actions.RAW_FUNCTIONS:
if abilities[f.ability_id].remaps_to_ability_id != f.general_id:
errors.append(
"RAW_FUNCTIONS %s/%s has abilitiy %s, general %s, expected "
"general %s" % (
f.id, f.name, f.ability_id, f.general_id,
abilities[f.ability_id].remaps_to_ability_id))
print("\n".join(errors))
self.assertFalse(errors)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/random_agent_test.py | pysc2/tests/random_agent_test.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a random agent for a few steps."""
from absl.testing import absltest
from absl.testing import parameterized
from pysc2.agents import random_agent
from pysc2.env import run_loop
from pysc2.env import sc2_env
from pysc2.tests import utils
class TestRandomAgent(parameterized.TestCase, utils.TestCase):
@parameterized.named_parameters(
("features", sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64))),
("rgb", sc2_env.AgentInterfaceFormat(
rgb_dimensions=sc2_env.Dimensions(screen=128, minimap=64))),
("all", sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(screen=84, minimap=64),
rgb_dimensions=sc2_env.Dimensions(screen=128, minimap=64),
action_space=sc2_env.ActionSpace.FEATURES,
use_unit_counts=True,
use_feature_units=True)),
)
def test_random_agent(self, agent_interface_format):
steps = 250
step_mul = 8
with sc2_env.SC2Env(
map_name=["Simple64", "Simple96"],
players=[sc2_env.Agent([sc2_env.Race.random, sc2_env.Race.terran]),
sc2_env.Bot([sc2_env.Race.zerg, sc2_env.Race.protoss],
sc2_env.Difficulty.easy,
[sc2_env.BotBuild.rush, sc2_env.BotBuild.timing])],
agent_interface_format=agent_interface_format,
step_mul=step_mul,
game_steps_per_episode=steps * step_mul//3) as env:
agent = random_agent.RandomAgent()
run_loop.run_loop([agent], env, steps)
self.assertEqual(agent.steps, steps)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/__init__.py | pysc2/tests/__init__.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
NKAI-Decision-Team/LLM-PySC2 | https://github.com/NKAI-Decision-Team/LLM-PySC2/blob/551c863475c0c4a96a181080974d24b59589e9f3/pysc2/tests/debug_test.py | pysc2/tests/debug_test.py | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the debug commands work."""
from absl.testing import absltest
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import units
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import debug_pb2 as sc_debug
from s2clientprotocol import sc2api_pb2 as sc_pb
class DebugTest(absltest.TestCase):
def test_multi_player(self):
run_config = run_configs.get()
map_inst = maps.get("Simple64")
with run_config.start(want_rgb=False) as controller:
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(
map_path=map_inst.path, map_data=map_inst.data(run_config)))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer,
race=sc_common.Terran,
difficulty=sc_pb.VeryEasy)
join = sc_pb.RequestJoinGame(race=sc_common.Terran,
options=sc_pb.InterfaceOptions(raw=True))
controller.create_game(create)
controller.join_game(join)
info = controller.game_info()
map_size = info.start_raw.map_size
controller.step(2)
obs = controller.observe()
def get_marines(obs):
return {u.tag: u for u in obs.observation.raw_data.units
if u.unit_type == units.Terran.Marine}
self.assertEmpty(get_marines(obs))
controller.debug(sc_debug.DebugCommand(
create_unit=sc_debug.DebugCreateUnit(
unit_type=units.Terran.Marine,
owner=1,
pos=sc_common.Point2D(x=map_size.x // 2, y=map_size.y // 2),
quantity=5)))
controller.step(2)
obs = controller.observe()
marines = get_marines(obs)
self.assertLen(marines, 5)
tags = sorted(marines.keys())
controller.debug([
sc_debug.DebugCommand(kill_unit=sc_debug.DebugKillUnit(
tag=[tags[0]])),
sc_debug.DebugCommand(unit_value=sc_debug.DebugSetUnitValue(
unit_value=sc_debug.DebugSetUnitValue.Life, value=5,
unit_tag=tags[1])),
])
controller.step(2)
obs = controller.observe()
marines = get_marines(obs)
self.assertLen(marines, 4)
self.assertNotIn(tags[0], marines)
self.assertEqual(marines[tags[1]].health, 5)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 551c863475c0c4a96a181080974d24b59589e9f3 | 2026-01-05T07:14:50.369338Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.