repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
taomanwai/tensorboardcolab | https://github.com/taomanwai/tensorboardcolab/blob/337dce9890dadcb977b60322c30c76dee7fb42b1/tensorboardcolab/callbacks.py | tensorboardcolab/callbacks.py | import tensorflow as tf
from keras.callbacks import TensorBoard
import time
import os
import io
from tensorboardcolab.core import TensorBoardColab
class TensorBoardColab:
def __init__(self, port=6006, graph_path='./Graph', startup_waiting_time=8):
self.port = port
self.graph_path = graph_path
self.writer = None
self.deep_writers = {}
self.eager_execution = None
get_ipython().system_raw('npm i -s -q --unsafe-perm -g ngrok') # sudo npm i -s -q --unsafe-perm -g ngrok
setup_passed = False
retry_count = 0
sleep_time = startup_waiting_time / 3.0
while not setup_passed:
get_ipython().system_raw('kill -9 $(sudo lsof -t -i:%d)' % port)
get_ipython().system_raw('rm -Rf ' + graph_path)
print('Wait for %d seconds...' % startup_waiting_time)
time.sleep(sleep_time)
get_ipython().system_raw('tensorboard --logdir %s --host 0.0.0.0 --port %d &' % (graph_path, port))
time.sleep(sleep_time)
get_ipython().system_raw('ngrok http %d &' % port)
time.sleep(sleep_time)
try:
tensorboard_link = get_ipython().getoutput(
'curl -s http://localhost:4040/api/tunnels | python3 -c "import sys, json; print(json.load(sys.stdin))"')[
0]
tensorboard_link = eval(tensorboard_link)['tunnels'][0]['public_url']
setup_passed = True
except:
setup_passed = False
retry_count += 1
print('Initialization failed, retry again (%d)' % retry_count)
print('\n')
print("TensorBoard link:")
print(tensorboard_link)
def get_graph_path(self):
return self.graph_path
def is_eager_execution(self):
if self.eager_execution is None:
try:
tf.summary.FileWriter(self.graph_path)
self.eager_execution = False
except Exception as err:
self.eager_execution = str(
err) == 'tf.summary.FileWriter is not compatible with eager execution. Use tf.contrib.summary instead.'
return self.eager_execution
def get_writer(self):
if self.writer is None:
if self.is_eager_execution():
self.writer = tf.contrib.summary.create_file_writer(self.graph_path)
else:
self.writer = tf.summary.FileWriter(self.graph_path)
return self.writer
def get_deep_writers(self, name):
if not (name in self.deep_writers):
log_path = os.path.join(self.graph_path, name)
if self.is_eager_execution():
self.deep_writers[name] = tf.contrib.summary.create_file_writer(log_path)
else:
self.deep_writers[name] = tf.summary.FileWriter(log_path)
return self.deep_writers[name]
def save_image(self, title, image):
image_path = os.path.join(self.graph_path, 'images')
if self.is_eager_execution():
print('Warning: save_image() is not supported in eager execution mode')
# writer = tf.contrib.summary.create_file_writer(image_path)
# writer.set_as_default()
# with tf.contrib.summary.always_record_summaries():
# tf.contrib.summary.image(
# title,
# image_tensor
# )
else:
summary_op = tf.summary.image(title, image)
with tf.Session() as sess:
summary = sess.run(summary_op)
writer = tf.summary.FileWriter(image_path)
writer.add_summary(summary)
writer.close()
def save_value(self, graph_name, line_name, epoch, value):
if self.is_eager_execution():
self.get_deep_writers(line_name).set_as_default()
global_step = tf.train.get_or_create_global_step()
global_step.assign(epoch)
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar(graph_name, value)
else:
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = graph_name
self.get_deep_writers(line_name).add_summary(summary, epoch)
def flush_line(self, line_name):
self.get_deep_writers(line_name).flush()
def close(self):
if self.writer is not None:
self.writer.close()
self.writer = None
for key in self.deep_writers:
self.deep_writers[key].close()
self.deep_writers = {}
class TensorBoardColabCallback(TensorBoard):
def __init__(self, tbc=None, write_graph=True, **kwargs):
# Make the original `TensorBoard` log to a subdirectory 'training'
if tbc is None:
return
log_dir = tbc.get_graph_path()
training_log_dir = os.path.join(log_dir, 'training')
super(TensorBoardColabCallback, self).__init__(training_log_dir, **kwargs)
# Log the validation metrics to a separate subdirectory
self.val_log_dir = os.path.join(log_dir, 'validation')
def set_model(self, model):
# Setup writer for validation metrics
if self.is_eager_execution():
self.val_writer = tf.contrib.summary.create_file_writer(self.val_log_dir)
else:
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TensorBoardColabCallback, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
# Pop the validation logs and handle them separately with
# `self.val_writer`. Also rename the keys so that they can
# be plotted on the same figure with the training metrics
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
for name, value in val_logs.items():
if self.is_eager_execution():
self.val_writer.set_as_default()
global_step = tf.train.get_or_create_global_step()
global_step.assign(epoch)
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar(name, value.item())
else:
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, epoch)
self.val_writer.flush()
# Pass the remaining logs to `TensorBoard.on_epoch_end`
logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
super(TensorBoardColabCallback, self).on_epoch_end(epoch, logs)
def on_train_end(self, logs=None):
super(TensorBoardColabCallback, self).on_train_end(logs)
self.val_writer.close()
| python | MIT | 337dce9890dadcb977b60322c30c76dee7fb42b1 | 2026-01-05T07:13:48.796857Z | false |
taomanwai/tensorboardcolab | https://github.com/taomanwai/tensorboardcolab/blob/337dce9890dadcb977b60322c30c76dee7fb42b1/tensorboardcolab/core.py | tensorboardcolab/core.py | import tensorflow as tf
from keras.callbacks import TensorBoard
import time
import os
import io
class TensorBoardColab:
def __init__(self, port=6006, graph_path='./Graph', startup_waiting_time=8):
self.port = port
self.graph_path = graph_path
self.writer = None
self.deep_writers = {}
self.eager_execution = None
get_ipython().system_raw('npm i -s -q --unsafe-perm -g ngrok') # sudo npm i -s -q --unsafe-perm -g ngrok
setup_passed = False
retry_count = 0
sleep_time = startup_waiting_time / 3.0
while not setup_passed:
get_ipython().system_raw('kill -9 $(sudo lsof -t -i:%d)' % port)
get_ipython().system_raw('rm -Rf ' + graph_path)
print('Wait for %d seconds...' % startup_waiting_time)
time.sleep(sleep_time)
get_ipython().system_raw('tensorboard --logdir %s --host 0.0.0.0 --port %d &' % (graph_path, port))
time.sleep(sleep_time)
get_ipython().system_raw('ngrok http %d &' % port)
time.sleep(sleep_time)
try:
tensorboard_link = get_ipython().getoutput(
'curl -s http://localhost:4040/api/tunnels | python3 -c "import sys, json; print(json.load(sys.stdin))"')[
0]
tensorboard_link = eval(tensorboard_link)['tunnels'][0]['public_url']
setup_passed = True
except:
setup_passed = False
retry_count += 1
print('Initialization failed, retry again (%d)' % retry_count)
print('\n')
print("TensorBoard link:")
print(tensorboard_link)
def get_graph_path(self):
return self.graph_path
def is_eager_execution(self):
if self.eager_execution is None:
try:
tf.summary.FileWriter(self.graph_path)
self.eager_execution = False
except Exception as err:
self.eager_execution = str(
err) == 'tf.summary.FileWriter is not compatible with eager execution. Use tf.contrib.summary instead.'
return self.eager_execution
def get_writer(self):
if self.writer is None:
if self.is_eager_execution():
self.writer = tf.contrib.summary.create_file_writer(self.graph_path)
else:
self.writer = tf.summary.FileWriter(self.graph_path)
return self.writer
def get_deep_writers(self, name):
if not (name in self.deep_writers):
log_path = os.path.join(self.graph_path, name)
if self.is_eager_execution():
self.deep_writers[name] = tf.contrib.summary.create_file_writer(log_path)
else:
self.deep_writers[name] = tf.summary.FileWriter(log_path)
return self.deep_writers[name]
def save_image(self, title, image):
image_path = os.path.join(self.graph_path, 'images')
if self.is_eager_execution():
print('Warning: save_image() is not supported in eager execution mode')
# writer = tf.contrib.summary.create_file_writer(image_path)
# writer.set_as_default()
# with tf.contrib.summary.always_record_summaries():
# tf.contrib.summary.image(
# title,
# image_tensor
# )
else:
summary_op = tf.summary.image(title, image)
with tf.Session() as sess:
summary = sess.run(summary_op)
writer = tf.summary.FileWriter(image_path)
writer.add_summary(summary)
writer.close()
def save_value(self, graph_name, line_name, epoch, value):
if self.is_eager_execution():
self.get_deep_writers(line_name).set_as_default()
global_step = tf.train.get_or_create_global_step()
global_step.assign(epoch)
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar(graph_name, value)
else:
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = graph_name
self.get_deep_writers(line_name).add_summary(summary, epoch)
def flush_line(self, line_name):
self.get_deep_writers(line_name).flush()
def close(self):
if self.writer is not None:
self.writer.close()
self.writer = None
for key in self.deep_writers:
self.deep_writers[key].close()
self.deep_writers = {}
class TensorBoardColabCallback(TensorBoard):
def __init__(self, tbc=None, write_graph=True, **kwargs):
# Make the original `TensorBoard` log to a subdirectory 'training'
if tbc is None:
return
self.tbc = tbc
log_dir = tbc.get_graph_path()
training_log_dir = os.path.join(log_dir, 'training')
super(TensorBoardColabCallback, self).__init__(training_log_dir, **kwargs)
# Log the validation metrics to a separate subdirectory
self.val_log_dir = os.path.join(log_dir, 'validation')
def set_model(self, model):
# Setup writer for validation metrics
if self.tbc.is_eager_execution():
self.val_writer = tf.contrib.summary.create_file_writer(self.val_log_dir)
else:
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TensorBoardColabCallback, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
# Pop the validation logs and handle them separately with
# `self.val_writer`. Also rename the keys so that they can
# be plotted on the same figure with the training metrics
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
for name, value in val_logs.items():
if self.tbc.is_eager_execution():
self.val_writer.set_as_default()
global_step = tf.train.get_or_create_global_step()
global_step.assign(epoch)
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar(name, value.item())
else:
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, epoch)
self.val_writer.flush()
# Pass the remaining logs to `TensorBoard.on_epoch_end`
logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
super(TensorBoardColabCallback, self).on_epoch_end(epoch, logs)
def on_train_end(self, logs=None):
super(TensorBoardColabCallback, self).on_train_end(logs)
self.val_writer.close() | python | MIT | 337dce9890dadcb977b60322c30c76dee7fb42b1 | 2026-01-05T07:13:48.796857Z | false |
taomanwai/tensorboardcolab | https://github.com/taomanwai/tensorboardcolab/blob/337dce9890dadcb977b60322c30c76dee7fb42b1/tensorboardcolab/__init__.py | tensorboardcolab/__init__.py | from tensorboardcolab.core import *
from tensorboardcolab.callbacks import * | python | MIT | 337dce9890dadcb977b60322c30c76dee7fb42b1 | 2026-01-05T07:13:48.796857Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/tests/conftest.py | tests/conftest.py | import io
import lorem
import numpy as np
import pydub
import pytest
from _pytest.logging import LogCaptureFixture
from loguru import logger
import pyrobbot
from pyrobbot.chat import Chat
from pyrobbot.chat_configs import ChatOptions, VoiceChatConfigs
from pyrobbot.voice_chat import VoiceChat
@pytest.fixture()
def caplog(caplog: LogCaptureFixture):
"""Override the default `caplog` fixture to propagate Loguru to the caplog handler."""
# Source: <https://loguru.readthedocs.io/en/stable/resources/migration.html
# #replacing-caplog-fixture-from-pytest-library>
handler_id = logger.add(
caplog.handler,
format="{message}",
level=0,
filter=lambda record: record["level"].no >= caplog.handler.level,
enqueue=False, # Set to 'True' if your test is spawning child processes.
)
yield caplog
logger.remove(handler_id)
# Register markers and constants
def pytest_configure(config):
config.addinivalue_line(
"markers",
"no_chat_completion_create_mocking: do not mock openai.ChatCompletion.create",
)
config.addinivalue_line(
"markers",
"no_embedding_create_mocking: mark test to not mock openai.Embedding.create",
)
pytest.original_package_cache_directory = (
pyrobbot.GeneralDefinitions.PACKAGE_CACHE_DIRECTORY
)
@pytest.fixture(autouse=True)
def _set_env(monkeypatch):
# Make sure we don't consume our tokens in tests
monkeypatch.setenv("OPENAI_API_KEY", "INVALID_API_KEY")
monkeypatch.setenv("STREAMLIT_SERVER_HEADLESS", "true")
@pytest.fixture(autouse=True)
def _mocked_general_constants(tmp_path, mocker):
mocker.patch(
"pyrobbot.GeneralDefinitions.PACKAGE_CACHE_DIRECTORY", tmp_path / "cache"
)
@pytest.fixture()
def mock_wav_bytes_string():
"""Mock a WAV file as a bytes string."""
return (
b"RIFF$\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x01\x00\x00\x04\x00"
b"\x00\x00\x04\x00\x00\x01\x00\x08\x00data\x00\x00\x00\x00"
)
@pytest.fixture(autouse=True)
def _openai_api_request_mockers(request, mocker):
"""Mockers for OpenAI API requests. We don't want to consume our tokens in tests."""
def _mock_openai_chat_completion_create(*args, **kwargs): # noqa: ARG001
"""Mock `openai.ChatCompletion.create`. Yield from lorem ipsum instead."""
completion_chunk = type("CompletionChunk", (), {})
completion_chunk_choice = type("CompletionChunkChoice", (), {})
completion_chunk_choice_delta = type("CompletionChunkChoiceDelta", (), {})
for word in lorem.get_paragraph().split():
completion_chunk_choice_delta.content = word + " "
completion_chunk_choice.delta = completion_chunk_choice_delta
completion_chunk.choices = [completion_chunk_choice]
yield completion_chunk
# Yield some code as well, to test the code filtering
code_path = pyrobbot.GeneralDefinitions.PACKAGE_DIRECTORY / "__init__.py"
for word in [
"```python\n",
*code_path.read_text().splitlines(keepends=True)[:5],
"```\n",
]:
completion_chunk_choice_delta.content = word + " "
completion_chunk_choice.delta = completion_chunk_choice_delta
completion_chunk.choices = [completion_chunk_choice]
yield completion_chunk
def _mock_openai_embedding_create(*args, **kwargs): # noqa: ARG001
"""Mock `openai.Embedding.create`. Yield from lorem ipsum instead."""
embedding_request_mock_type = type("EmbeddingRequest", (), {})
embedding_mock_type = type("Embedding", (), {})
usage_mock_type = type("Usage", (), {})
embedding = embedding_mock_type()
embedding.embedding = np.random.rand(512).tolist()
embedding_request = embedding_request_mock_type()
embedding_request.data = [embedding]
usage = usage_mock_type()
usage.prompt_tokens = 0
usage.total_tokens = 0
embedding_request.usage = usage
return embedding_request
if "no_chat_completion_create_mocking" not in request.keywords:
mocker.patch(
"openai.resources.chat.completions.Completions.create",
new=_mock_openai_chat_completion_create,
)
if "no_embedding_create_mocking" not in request.keywords:
mocker.patch(
"openai.resources.embeddings.Embeddings.create",
new=_mock_openai_embedding_create,
)
@pytest.fixture(autouse=True)
def _internet_search_mockers(mocker):
"""Mockers for the internet search module."""
mocker.patch("duckduckgo_search.DDGS.text", return_value=lorem.get_paragraph())
@pytest.fixture()
def _input_builtin_mocker(mocker, user_input):
"""Mock the `input` builtin. Raise `KeyboardInterrupt` after the second call."""
# We allow two calls in order to allow for the chat context handler to kick in
def _mock_input(*args, **kwargs): # noqa: ARG001
try:
_mock_input.execution_counter += 1
except AttributeError:
_mock_input.execution_counter = 0
if _mock_input.execution_counter > 1:
raise KeyboardInterrupt
return user_input
mocker.patch( # noqa: PT008
"builtins.input", new=lambda _: _mock_input(user_input=user_input)
)
@pytest.fixture(params=ChatOptions.get_allowed_values("model")[:2])
def llm_model(request):
return request.param
context_model_values = ChatOptions.get_allowed_values("context_model")
@pytest.fixture(params=[context_model_values[0], context_model_values[2]])
def context_model(request):
return request.param
@pytest.fixture()
def default_chat_configs(llm_model, context_model):
return ChatOptions(model=llm_model, context_model=context_model)
@pytest.fixture()
def default_voice_chat_configs(llm_model, context_model):
return VoiceChatConfigs(model=llm_model, context_model=context_model)
@pytest.fixture()
def cli_args_overrides(default_chat_configs):
args = []
for field, value in default_chat_configs.model_dump().items():
if value not in [None, True, False]:
args = [*args, *[f"--{field.replace('_', '-')}", str(value)]]
return args
@pytest.fixture()
def default_chat(default_chat_configs):
return Chat(configs=default_chat_configs)
@pytest.fixture()
def default_voice_chat(default_voice_chat_configs):
chat = VoiceChat(configs=default_voice_chat_configs)
chat.inactivity_timeout_seconds = 1e-5
chat.tts_engine = "google"
return chat
@pytest.fixture(autouse=True)
def _voice_chat_mockers(mocker, mock_wav_bytes_string):
"""Mockers for the text-to-speech module."""
mocker.patch(
"pyrobbot.voice_chat.VoiceChat._assistant_still_replying", return_value=False
)
mock_google_tts_obj = type("mock_gTTS", (), {})
mock_openai_tts_response = type("mock_openai_tts_response", (), {})
def _mock_iter_bytes(*args, **kwargs): # noqa: ARG001
return [mock_wav_bytes_string]
mock_openai_tts_response.iter_bytes = _mock_iter_bytes
mocker.patch(
"pydub.AudioSegment.from_mp3",
return_value=pydub.AudioSegment.from_wav(io.BytesIO(mock_wav_bytes_string)),
)
mocker.patch("gtts.gTTS", return_value=mock_google_tts_obj)
mocker.patch(
"openai.resources.audio.speech.Speech.create",
return_value=mock_openai_tts_response,
)
mock_transcription = type("MockTranscription", (), {})
mock_transcription.text = "patched"
mocker.patch(
"openai.resources.audio.transcriptions.Transcriptions.create",
return_value=mock_transcription,
)
mocker.patch(
"speech_recognition.Recognizer.recognize_google",
return_value=mock_transcription.text,
)
mocker.patch("webrtcvad.Vad.is_speech", return_value=False)
mocker.patch("pygame.mixer.init")
mocker.patch("chime.play_wav")
mocker.patch("chime.play_wav")
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/tests/smoke/test_app.py | tests/smoke/test_app.py | import contextlib
import streamlit
import streamlit_webrtc.component
from pyrobbot.app import app
def test_app(mocker, default_voice_chat_configs):
class MockAttrDict(streamlit.runtime.state.session_state_proxy.SessionStateProxy):
def __getattr__(self, attr):
return self.get(attr, mocker.MagicMock())
def __getitem__(self, key):
with contextlib.suppress(KeyError):
return super().__getitem__(key)
return mocker.MagicMock()
mocker.patch.object(streamlit, "session_state", new=MockAttrDict())
mocker.patch.object(
streamlit.runtime.state.session_state_proxy,
"SessionStateProxy",
new=MockAttrDict,
)
mocker.patch("streamlit.chat_input", return_value="foobar")
mocker.patch(
"pyrobbot.chat_configs.VoiceChatConfigs.from_file",
return_value=default_voice_chat_configs,
)
mocker.patch.object(
streamlit_webrtc.component,
"webrtc_streamer",
mocker.MagicMock(return_value=mocker.MagicMock()),
)
mocker.patch("streamlit.number_input", return_value=0)
mocker.patch(
"pyrobbot.chat_configs.VoiceChatConfigs.model_validate",
return_value=default_voice_chat_configs,
)
app.run_app()
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/tests/smoke/test_commands.py | tests/smoke/test_commands.py | import io
import subprocess
import pytest
from pydub import AudioSegment
from pyrobbot.__main__ import main
from pyrobbot.argparse_wrapper import get_parsed_args
def test_default_command():
args = get_parsed_args(argv=[])
assert args.command == "ui"
@pytest.mark.usefixtures("_input_builtin_mocker")
@pytest.mark.parametrize("user_input", ["Hi!", ""], ids=["regular-input", "empty-input"])
def test_terminal_command(cli_args_overrides):
args = ["terminal", "--report-accounting-when-done", *cli_args_overrides]
args = list(dict.fromkeys(args))
main(args)
def test_accounting_command():
main(["accounting"])
def test_ui_command(mocker, caplog):
original_run = subprocess.run
def new_run(*args, **kwargs):
kwargs.pop("timeout", None)
try:
original_run(*args, **kwargs, timeout=0.5)
except subprocess.TimeoutExpired as error:
raise KeyboardInterrupt from error
mocker.patch("subprocess.run", new=new_run)
main(["ui"])
assert "Exiting." in caplog.text
@pytest.mark.parametrize("stt", ["google", "openai"])
@pytest.mark.parametrize("tts", ["google", "openai"])
def test_voice_chat(mocker, mock_wav_bytes_string, tts, stt):
# We allow even number of calls in order to let the function be tested first and
# then terminate the chat
def _mock_listen(*args, **kwargs): # noqa: ARG001
try:
_mock_listen.execution_counter += 1
except AttributeError:
_mock_listen.execution_counter = 0
if _mock_listen.execution_counter % 2:
return None
return AudioSegment.from_wav(io.BytesIO(mock_wav_bytes_string))
mocker.patch("pyrobbot.voice_chat.VoiceChat.listen", _mock_listen)
main(["voice", "--tts", tts, "--stt", stt])
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/tests/unit/test_internet_utils.py | tests/unit/test_internet_utils.py | import contextlib
import duckduckgo_search
from pyrobbot.internet_utils import websearch
# if called inside tests or fixtures. Leave it like this for now.
search_results = []
with contextlib.suppress(duckduckgo_search.exceptions.DuckDuckGoSearchException):
search_results = list(websearch("foobar"))
def test_websearch():
for i_result, result in enumerate(search_results):
assert isinstance(result, dict)
assert ("detailed" in result) == (i_result == 0)
for key in ["summary", "relevance", "href"]:
assert key in result
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/tests/unit/test_chat.py | tests/unit/test_chat.py | import openai
import pytest
from pyrobbot import GeneralDefinitions
from pyrobbot.chat import Chat
from pyrobbot.chat_configs import ChatOptions
@pytest.mark.order(1)
@pytest.mark.usefixtures("_input_builtin_mocker")
@pytest.mark.no_chat_completion_create_mocking()
@pytest.mark.parametrize("user_input", ["regular-input"])
def testbed_doesnt_actually_connect_to_openai(caplog):
llm = ChatOptions.get_allowed_values("model")[0]
context_model = ChatOptions.get_allowed_values("context_model")[0]
chat_configs = ChatOptions(model=llm, context_model=context_model)
chat = Chat(configs=chat_configs)
chat.start()
success = chat.response_failure_message().content in caplog.text
err_msg = "Refuse to continue: Testbed is trying to connect to OpenAI API!"
err_msg += f"\nThis is what the logger says:\n{caplog.text}"
if not success:
pytest.exit(err_msg)
@pytest.mark.order(2)
def test_we_are_using_tmp_cachedir():
try:
assert (
pytest.original_package_cache_directory
!= GeneralDefinitions.PACKAGE_CACHE_DIRECTORY
)
except AssertionError:
pytest.exit(
"Refuse to continue: Tests attempted to use the package's real cache dir "
+ f"({GeneralDefinitions.PACKAGE_CACHE_DIRECTORY})!"
)
@pytest.mark.usefixtures("_input_builtin_mocker")
@pytest.mark.parametrize("user_input", ["Hi!", ""], ids=["regular-input", "empty-input"])
def test_terminal_chat(default_chat):
default_chat.start()
default_chat.__del__() # Just to trigger testing the custom del method
def test_chat_configs(default_chat, default_chat_configs):
assert default_chat._passed_configs == default_chat_configs
@pytest.mark.no_chat_completion_create_mocking()
@pytest.mark.usefixtures("_input_builtin_mocker")
@pytest.mark.parametrize("user_input", ["regular-input"])
def test_request_timeout_retry(mocker, default_chat, caplog):
def _mock_openai_chat_completion_create(*args, **kwargs): # noqa: ARG001
raise openai.APITimeoutError("Mocked timeout error was not caught!")
mocker.patch(
"openai.resources.chat.completions.Completions.create",
new=_mock_openai_chat_completion_create,
)
mocker.patch("time.sleep") # Don't waste time sleeping in tests
default_chat.start()
assert "APITimeoutError" in caplog.text
def test_can_read_chat_from_cache(default_chat):
default_chat.save_cache()
new_chat = Chat.from_cache(default_chat.cache_dir)
assert new_chat.configs == default_chat.configs
def test_create_from_cache_returns_default_chat_if_invalid_cachedir(default_chat, caplog):
_ = Chat.from_cache(default_chat.cache_dir / "foobar")
assert "Creating Chat with default configs" in caplog.text
@pytest.mark.usefixtures("_input_builtin_mocker")
@pytest.mark.parametrize("user_input", ["regular-input"])
def test_internet_search_can_be_triggered(default_chat, mocker):
mocker.patch(
"pyrobbot.openai_utils.make_api_chat_completion_call", return_value=iter(["yes"])
)
mocker.patch("pyrobbot.chat.Chat.respond_system_prompt", return_value=iter(["yes"]))
mocker.patch(
"pyrobbot.internet_utils.raw_websearch",
return_value=iter(
[
{
"href": "foo/bar",
"summary": 50 * "foo ",
"detailed": 50 * "foo ",
"relevance": 1.0,
}
]
),
)
default_chat.start()
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/tests/unit/test_text_to_speech.py | tests/unit/test_text_to_speech.py | import pytest
from pydub import AudioSegment
from pyrobbot.sst_and_tts import SpeechToText
@pytest.mark.parametrize("stt_engine", ["google", "openai"])
def test_stt(default_voice_chat, stt_engine):
"""Test the speech-to-text method."""
default_voice_chat.stt_engine = stt_engine
stt = SpeechToText(
openai_client=default_voice_chat.openai_client,
speech=AudioSegment.silent(duration=100),
engine=stt_engine,
general_token_usage_db=default_voice_chat.general_token_usage_db,
token_usage_db=default_voice_chat.token_usage_db,
)
assert stt.text == "patched"
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/tests/unit/test_voice_chat.py | tests/unit/test_voice_chat.py | import contextlib
import pytest
from pydantic import ValidationError
from pydub import AudioSegment
from sounddevice import PortAudioError
from pyrobbot.chat_configs import VoiceChatConfigs
from pyrobbot.sst_and_tts import TextToSpeech
from pyrobbot.voice_chat import VoiceChat
def test_soundcard_import_check(mocker, caplog):
"""Test that the voice chat cannot be instantiated if soundcard is not imported."""
mocker.patch("pyrobbot.voice_chat._sounddevice_imported", False)
_ = VoiceChat(configs=VoiceChatConfigs())
msg = "Module `sounddevice`, needed for local audio recording, is not available."
assert msg in caplog.text
@pytest.mark.parametrize("param_name", ["sample_rate", "frame_duration"])
def test_cannot_instanciate_assistant_with_invalid_webrtcvad_params(param_name):
"""Test that the voice chat cannot be instantiated with invalid webrtcvad params."""
with pytest.raises(ValidationError, match="Input should be"):
VoiceChat(configs=VoiceChatConfigs(**{param_name: 1}))
def test_listen(default_voice_chat):
"""Test the listen method."""
with contextlib.suppress(PortAudioError, pytest.PytestUnraisableExceptionWarning):
default_voice_chat.listen()
def test_speak(default_voice_chat, mocker):
tts = TextToSpeech(
openai_client=default_voice_chat.openai_client,
text="foo",
general_token_usage_db=default_voice_chat.general_token_usage_db,
token_usage_db=default_voice_chat.token_usage_db,
)
mocker.patch("pygame.mixer.Sound")
mocker.patch("pyrobbot.voice_chat._get_lower_alphanumeric", return_value="ok cancel")
mocker.patch(
"pyrobbot.voice_chat.VoiceChat.listen",
return_value=AudioSegment.silent(duration=150),
)
default_voice_chat.speak(tts)
def test_answer_question(default_voice_chat):
default_voice_chat.answer_question("foo")
def test_interrupt_reply(default_voice_chat):
default_voice_chat.interrupt_reply.set()
default_voice_chat.questions_queue.get = lambda: None
default_voice_chat.questions_queue.task_done = lambda: None
default_voice_chat.start()
def test_handle_interrupt_expressions(default_voice_chat, mocker):
mocker.patch("pyrobbot.general_utils.str2_minus_str1", return_value="cancel")
default_voice_chat.questions_queue.get = lambda: None
default_voice_chat.questions_queue.task_done = lambda: None
default_voice_chat.questions_queue.answer_question = lambda _question: None
msgs_to_compare = {
"assistant_txt": "foo",
"user_audio": AudioSegment.silent(duration=150),
}
default_voice_chat.check_for_interrupt_expressions_queue.put(msgs_to_compare)
default_voice_chat.start()
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/chat_configs.py | pyrobbot/chat_configs.py | #!/usr/bin/env python3
"""Registration and validation of options."""
import argparse
import json
import types
import typing
from getpass import getuser
from pathlib import Path
from typing import Literal, Optional, get_args, get_origin
from pydantic import BaseModel, Field
from . import GeneralDefinitions
from .tokens import PRICE_PER_K_TOKENS_EMBEDDINGS, PRICE_PER_K_TOKENS_LLM
class BaseConfigModel(BaseModel, extra="forbid"):
"""Base model for configuring options."""
@classmethod
def get_allowed_values(cls, field: str):
"""Return a tuple of allowed values for `field`."""
annotation = cls._get_field_param(field=field, param="annotation")
if isinstance(annotation, type(Literal[""])):
return get_args(annotation)
return None
@classmethod
def get_type(cls, field: str):
"""Return type of `field`."""
type_hint = typing.get_type_hints(cls)[field]
if isinstance(type_hint, type):
if isinstance(type_hint, types.GenericAlias):
return get_origin(type_hint)
return type_hint
type_hint_first_arg = get_args(type_hint)[0]
if isinstance(type_hint_first_arg, type):
return type_hint_first_arg
return None
@classmethod
def get_default(cls, field: str):
"""Return allowed value(s) for `field`."""
return cls.model_fields[field].get_default()
@classmethod
def get_description(cls, field: str):
"""Return description of `field`."""
return cls._get_field_param(field=field, param="description")
@classmethod
def from_cli_args(cls, cli_args: argparse.Namespace):
"""Return an instance of the class from CLI args."""
relevant_args = {
k: v
for k, v in vars(cli_args).items()
if k in cls.model_fields and v is not None
}
return cls.model_validate(relevant_args)
@classmethod
def _get_field_param(cls, field: str, param: str):
"""Return param `param` of field `field`."""
return getattr(cls.model_fields[field], param, None)
def __getitem__(self, item):
"""Make possible to retrieve values as in a dict."""
try:
return getattr(self, item)
except AttributeError as error:
raise KeyError(item) from error
def export(self, fpath: Path):
"""Export the model's data to a file."""
with open(fpath, "w") as configs_file:
configs_file.write(self.model_dump_json(indent=2, exclude_unset=True))
@classmethod
def from_file(cls, fpath: Path):
"""Return an instance of the class given configs stored in a json file."""
with open(fpath, "r") as configs_file:
return cls.model_validate(json.load(configs_file))
class OpenAiApiCallOptions(BaseConfigModel):
"""Model for configuring options for OpenAI API calls."""
_openai_url = "https://platform.openai.com/docs/api-reference/chat/create#chat-create"
_models_url = "https://platform.openai.com/docs/models"
model: Literal[tuple(PRICE_PER_K_TOKENS_LLM)] = Field(
default=next(iter(PRICE_PER_K_TOKENS_LLM)),
description=f"OpenAI LLM model to use. See {_openai_url}-model and {_models_url}",
)
max_tokens: Optional[int] = Field(
default=None, gt=0, description=f"See <{_openai_url}-max_tokens>"
)
presence_penalty: Optional[float] = Field(
default=None, ge=-2.0, le=2.0, description=f"See <{_openai_url}-presence_penalty>"
)
frequency_penalty: Optional[float] = Field(
default=None,
ge=-2.0,
le=2.0,
description=f"See <{_openai_url}-frequency_penalty>",
)
temperature: Optional[float] = Field(
default=None, ge=0.0, le=2.0, description=f"See <{_openai_url}-temperature>"
)
top_p: Optional[float] = Field(
default=None, ge=0.0, le=1.0, description=f"See <{_openai_url}-top_p>"
)
timeout: Optional[float] = Field(
default=10.0, gt=0.0, description="Timeout for API requests in seconds"
)
class ChatOptions(OpenAiApiCallOptions):
"""Model for the chat's configuration options."""
username: str = Field(default=getuser(), description="Name of the chat's user")
assistant_name: str = Field(default="Rob", description="Name of the chat's assistant")
system_name: str = Field(
default=f"{GeneralDefinitions.PACKAGE_NAME}_system",
description="Name of the chat's system",
)
ai_instructions: tuple[str, ...] = Field(
default=(
"You answer correctly.",
"You do not lie or make up information unless explicitly asked to do so.",
),
description="Initial instructions for the AI",
)
context_model: Literal[tuple(PRICE_PER_K_TOKENS_EMBEDDINGS)] = Field(
default=next(iter(PRICE_PER_K_TOKENS_EMBEDDINGS)),
description=(
"Model to use for chat context (~memory). "
+ "Once picked, it cannot be changed."
),
json_schema_extra={"frozen": True},
)
initial_greeting: Optional[str] = Field(
default="", description="Initial greeting given by the assistant"
)
private_mode: Optional[bool] = Field(
default=False,
description="Toggle private mode. If this flag is used, the chat will not "
+ "be logged and the chat history will not be saved.",
)
api_connection_max_n_attempts: int = Field(
default=5,
gt=0,
description="Maximum number of attempts to connect to the OpenAI API",
)
language: str = Field(
default="en",
description="Initial language adopted by the assistant. Use either the ISO-639-1 "
"format (e.g. 'pt'), or an RFC5646 language tag (e.g. 'pt-br').",
)
tts_engine: Literal["openai", "google"] = Field(
default="openai",
description="The text-to-speech engine to use. The `google` engine is free "
"(for now, at least), but the `openai` engine (which will charge from your "
"API credits) sounds more natural.",
)
stt_engine: Literal["openai", "google"] = Field(
default="google",
description="The preferred speech-to-text engine to use. The `google` engine is "
"free (for now, at least); the `openai` engine is less succeptible to outages.",
)
openai_tts_voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"] = (
Field(default="onyx", description="Voice to use for OpenAI's TTS")
)
class VoiceAssistantConfigs(BaseConfigModel):
"""Model for the text-to-speech assistant's configuration options."""
exit_expressions: list[str] = Field(
default=["bye-bye", "ok bye-bye", "okay bye-bye"],
description="Expression(s) to use in order to exit the chat",
json_schema_extra={"changeable": False},
)
cancel_expressions: list[str] = Field(
default=["ok", "okay", "cancel", "stop", "listen"],
description="Word(s) to use in order to cancel the current reply",
json_schema_extra={"changeable": False},
)
min_speech_duration_seconds: float = Field(
default=0.1,
gt=0,
description="Minimum duration of speech (in seconds) for the assistant to listen",
json_schema_extra={"changeable": False},
)
inactivity_timeout_seconds: int = Field(
default=1,
gt=0,
description="How much time user should be inactive "
"for the assistant to stop listening",
)
speech_likelihood_threshold: float = Field(
default=0.5,
ge=0.0,
le=1.0,
description="Accept audio as speech if the likelihood is above this threshold",
json_schema_extra={"changeable": False},
)
# sample_rate and frame_duration have to be consistent with the values uaccepted by
# the webrtcvad package
sample_rate: Literal[8000, 16000, 32000, 48000] = Field(
default=48000,
description="Sample rate for audio recording, in Hz.",
json_schema_extra={"changeable": False},
)
frame_duration: Literal[10, 20, 30] = Field(
default=20,
description="Frame duration for audio recording, in milliseconds.",
json_schema_extra={"changeable": False},
)
reply_only_as_text: Optional[bool] = Field(
default=None, description="Reply only as text. The assistant will not speak."
)
skip_initial_greeting: Optional[bool] = Field(
default=None, description="Skip initial greeting."
)
class VoiceChatConfigs(ChatOptions, VoiceAssistantConfigs):
"""Model for the voice chat's configuration options."""
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/argparse_wrapper.py | pyrobbot/argparse_wrapper.py | #!/usr/bin/env python3
"""Wrappers for argparse functionality."""
import argparse
import contextlib
import sys
from pydantic import BaseModel
from . import GeneralDefinitions
from .chat_configs import ChatOptions, VoiceChatConfigs
from .command_definitions import (
accounting_report,
browser_chat,
terminal_chat,
voice_chat,
)
def _populate_parser_from_pydantic_model(parser, model: BaseModel):
_argarse2pydantic = {
"type": model.get_type,
"default": model.get_default,
"choices": model.get_allowed_values,
"help": model.get_description,
}
for field_name, field in model.model_fields.items():
with contextlib.suppress(AttributeError):
if not field.json_schema_extra.get("changeable", True):
continue
args_opts = {
key: _argarse2pydantic[key](field_name)
for key in _argarse2pydantic
if _argarse2pydantic[key](field_name) is not None
}
if args_opts.get("type") == bool:
if args_opts.get("default") is True:
args_opts["action"] = "store_false"
else:
args_opts["action"] = "store_true"
args_opts.pop("default", None)
args_opts.pop("type", None)
args_opts["required"] = field.is_required()
if "help" in args_opts:
args_opts["help"] = f"{args_opts['help']} (default: %(default)s)"
if "default" in args_opts and isinstance(args_opts["default"], (list, tuple)):
args_opts.pop("type", None)
args_opts["nargs"] = "*"
parser.add_argument(f"--{field_name.replace('_', '-')}", **args_opts)
return parser
def get_parsed_args(argv=None, default_command="ui"):
"""Get parsed command line arguments.
Args:
argv (list): A list of passed command line args.
default_command (str, optional): The default command to run.
Returns:
argparse.Namespace: Parsed command line arguments.
"""
if argv is None:
argv = sys.argv[1:]
first_argv = next(iter(argv), "'")
info_flags = ["--version", "-v", "-h", "--help"]
if not argv or (first_argv.startswith("-") and first_argv not in info_flags):
argv = [default_command, *argv]
# Main parser that will handle the script's commands
main_parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
main_parser.add_argument(
"--version",
"-v",
action="version",
version=f"{GeneralDefinitions.PACKAGE_NAME} v" + GeneralDefinitions.VERSION,
)
subparsers = main_parser.add_subparsers(
title="commands",
dest="command",
required=True,
description=(
"Valid commands (note that commands also accept their "
+ "own arguments, in particular [-h]):"
),
help="command description",
)
# Common options to most commands
chat_options_parser = _populate_parser_from_pydantic_model(
parser=argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False
),
model=ChatOptions,
)
chat_options_parser.add_argument(
"--report-accounting-when-done",
action="store_true",
help="Report estimated costs when done with the chat.",
)
# Web app chat
parser_ui = subparsers.add_parser(
"ui",
aliases=["app", "webapp", "browser"],
parents=[chat_options_parser],
help="Run the chat UI on the browser.",
)
parser_ui.set_defaults(run_command=browser_chat)
# Voice chat
voice_options_parser = _populate_parser_from_pydantic_model(
parser=argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False
),
model=VoiceChatConfigs,
)
parser_voice_chat = subparsers.add_parser(
"voice",
aliases=["v", "speech", "talk"],
parents=[voice_options_parser],
help="Run the chat over voice only.",
)
parser_voice_chat.set_defaults(run_command=voice_chat)
# Terminal chat
parser_terminal = subparsers.add_parser(
"terminal",
aliases=["."],
parents=[chat_options_parser],
help="Run the chat on the terminal.",
)
parser_terminal.set_defaults(run_command=terminal_chat)
# Accounting report
parser_accounting = subparsers.add_parser(
"accounting",
aliases=["acc"],
help="Show the estimated number of used tokens and associated costs, and exit.",
)
parser_accounting.set_defaults(run_command=accounting_report)
return main_parser.parse_args(argv)
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/chat_context.py | pyrobbot/chat_context.py | """Chat context/history management."""
import ast
import itertools
from abc import ABC, abstractmethod
from datetime import datetime, timezone
from typing import TYPE_CHECKING
import numpy as np
import openai
import pandas as pd
from scipy.spatial.distance import cosine as cosine_similarity
from .embeddings_database import EmbeddingsDatabase
from .general_utils import retry
if TYPE_CHECKING:
from .chat import Chat
class ChatContext(ABC):
"""Abstract base class for representing the context of a chat."""
def __init__(self, parent_chat: "Chat"):
"""Initialise the instance given a parent `Chat` object."""
self.parent_chat = parent_chat
self.database = EmbeddingsDatabase(
db_path=self.context_file_path, embedding_model=self.embedding_model
)
self._msg_fields_for_context = ["role", "content"]
@property
def embedding_model(self):
"""Return the embedding model used for context management."""
return self.parent_chat.context_model
@property
def context_file_path(self):
"""Return the path to the context file."""
return self.parent_chat.context_file_path
def add_to_history(self, exchange_id: str, msg_list: list[dict]):
"""Add message exchange to history."""
self.database.insert_message_exchange(
exchange_id=exchange_id,
chat_model=self.parent_chat.model,
message_exchange=msg_list,
embedding=self.request_embedding(msg_list=msg_list),
)
def load_history(self) -> list[dict]:
"""Load the chat history."""
db_history_df = self.database.retrieve_history()
# Convert unix timestamps to datetime objs at the local timezone
db_history_df["timestamp"] = db_history_df["timestamp"].apply(
lambda ts: datetime.fromtimestamp(ts)
.replace(microsecond=0, tzinfo=timezone.utc)
.astimezone(tz=None)
.replace(tzinfo=None)
)
msg_exchanges = db_history_df["message_exchange"].apply(ast.literal_eval).tolist()
# Add timestamps and path to eventual audio files to messages
for i_msg_exchange, timestamp in enumerate(db_history_df["timestamp"]):
# Index 0 is for the user's message, index 1 is for the assistant's reply
msg_exchanges[i_msg_exchange][0]["timestamp"] = timestamp
msg_exchanges[i_msg_exchange][1]["reply_audio_file_path"] = db_history_df[
"reply_audio_file_path"
].iloc[i_msg_exchange]
msg_exchanges[i_msg_exchange][1]["chat_model"] = db_history_df[
"chat_model"
].iloc[i_msg_exchange]
return list(itertools.chain.from_iterable(msg_exchanges))
def get_context(self, msg: dict):
"""Return messages to serve as context for `msg` when requesting a completion."""
return _make_list_of_context_msgs(
history=self.select_relevant_history(msg=msg),
system_name=self.parent_chat.system_name,
)
@abstractmethod
def request_embedding(self, msg_list: list[dict]):
"""Request embedding from OpenAI API."""
@abstractmethod
def select_relevant_history(self, msg: dict):
"""Select chat history msgs to use as context for `msg`."""
class FullHistoryChatContext(ChatContext):
"""Context class using full chat history."""
# Implement abstract methods
def request_embedding(self, msg_list: list[dict]): # noqa: ARG002
"""Return a placeholder embedding."""
return
def select_relevant_history(self, msg: dict): # noqa: ARG002
"""Select chat history msgs to use as context for `msg`."""
history = []
for full_history_msg in self.load_history():
history_msg = {
k: v
for k, v in full_history_msg.items()
if k in self._msg_fields_for_context
}
history.append(history_msg)
return history
class EmbeddingBasedChatContext(ChatContext):
"""Chat context using embedding models."""
def request_embedding_for_text(self, text: str):
"""Request embedding for `text` from OpenAI according to used embedding model."""
embedding_request = request_embedding_from_openai(
text=text,
model=self.embedding_model,
openai_client=self.parent_chat.openai_client,
)
# Update parent chat's token usage db with tokens used in embedding request
for db in [
self.parent_chat.general_token_usage_db,
self.parent_chat.token_usage_db,
]:
for comm_type, n_tokens in embedding_request["tokens_usage"].items():
input_or_output_kwargs = {f"n_{comm_type}_tokens": n_tokens}
db.insert_data(model=self.embedding_model, **input_or_output_kwargs)
return embedding_request["embedding"]
# Implement abstract methods
def request_embedding(self, msg_list: list[dict]):
"""Convert `msg_list` into a paragraph and get embedding from OpenAI API call."""
text = "\n".join(
[f"{msg['role'].strip()}: {msg['content'].strip()}" for msg in msg_list]
)
return self.request_embedding_for_text(text=text)
def select_relevant_history(self, msg: dict):
"""Select chat history msgs to use as context for `msg`."""
relevant_history = []
for full_context_msg in _select_relevant_history(
history_df=self.database.retrieve_history(),
embedding=self.request_embedding_for_text(text=msg["content"]),
):
context_msg = {
k: v
for k, v in full_context_msg.items()
if k in self._msg_fields_for_context
}
relevant_history.append(context_msg)
return relevant_history
@retry()
def request_embedding_from_openai(text: str, model: str, openai_client: openai.OpenAI):
"""Request embedding for `text` according to context model `model` from OpenAI."""
text = text.strip()
embedding_request = openai_client.embeddings.create(input=[text], model=model)
embedding = embedding_request.data[0].embedding
input_tokens = embedding_request.usage.prompt_tokens
output_tokens = embedding_request.usage.total_tokens - input_tokens
tokens_usage = {"input": input_tokens, "output": output_tokens}
return {"embedding": embedding, "tokens_usage": tokens_usage}
def _make_list_of_context_msgs(history: list[dict], system_name: str):
sys_directives = "Considering the previous messages, answer the next message:"
sys_msg = {"role": "system", "name": system_name, "content": sys_directives}
return [*history, sys_msg]
def _select_relevant_history(
history_df: pd.DataFrame,
embedding: np.ndarray,
max_n_prompt_reply_pairs: int = 5,
max_n_tailing_prompt_reply_pairs: int = 2,
):
history_df["embedding"] = (
history_df["embedding"].apply(ast.literal_eval).apply(np.array)
)
history_df["similarity"] = history_df["embedding"].apply(
lambda x: cosine_similarity(x, embedding)
)
# Get the last messages added to the history
df_last_n_chats = history_df.tail(max_n_tailing_prompt_reply_pairs)
# Get the most similar messages
df_similar_chats = (
history_df.sort_values("similarity", ascending=False)
.head(max_n_prompt_reply_pairs)
.sort_values("timestamp")
)
df_context = pd.concat([df_similar_chats, df_last_n_chats])
selected_history = (
df_context["message_exchange"].apply(ast.literal_eval).drop_duplicates()
).tolist()
return list(itertools.chain.from_iterable(selected_history))
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/internet_utils.py | pyrobbot/internet_utils.py | """Internet search module for the package."""
import asyncio
import re
import numpy as np
import requests
from bs4 import BeautifulSoup
from bs4.element import Comment
from duckduckgo_search import AsyncDDGS
from loguru import logger
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from unidecode import unidecode
from . import GeneralDefinitions
from .general_utils import retry
def cosine_similarity_sentences(sentence1, sentence2):
"""Compute the cosine similarity between two sentences."""
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform([sentence1, sentence2])
similarity = cosine_similarity(vectors[0], vectors[1])
return similarity[0][0]
def element_is_visible(element):
"""Return True if the element is visible."""
tags_to_exclude = [
"[document]",
"head",
"header",
"html",
"input",
"meta",
"noscript",
"script",
"style",
"style",
"title",
]
if element.parent.name in tags_to_exclude or isinstance(element, Comment):
return False
return True
def extract_text_from_html(body):
"""Extract the text from an HTML document."""
soup = BeautifulSoup(body, "html.parser")
page_has_captcha = soup.find("div", id="recaptcha") is not None
if page_has_captcha:
return ""
texts = soup.find_all(string=True)
visible_texts = filter(element_is_visible, texts)
return " ".join(t.strip() for t in visible_texts if t.strip())
def find_whole_word_index(my_string, my_substring):
"""Find the index of a substring in a string, but only if it is a whole word match."""
pattern = re.compile(r"\b{}\b".format(re.escape(my_substring)))
match = pattern.search(my_string)
if match:
return match.start()
return -1 # Substring not found
async def async_raw_websearch(
query: str,
max_results: int = 5,
region: str = GeneralDefinitions.IPINFO["country_name"],
):
"""Search the web using DuckDuckGo Search API."""
async with AsyncDDGS(proxies=None) as addgs:
results = await addgs.text(
keywords=query,
region=region,
max_results=max_results,
backend="html",
)
return results
def raw_websearch(
query: str,
max_results: int = 5,
region: str = GeneralDefinitions.IPINFO["country_name"],
):
"""Search the web using DuckDuckGo Search API."""
raw_results = asyncio.run(
async_raw_websearch(query=query, max_results=max_results, region=region)
)
raw_results = raw_results or []
results = []
for result in raw_results:
if not isinstance(result, dict):
logger.error("Expected a `dict`, got type {}: {}", type(result), result)
results.append({})
continue
if result.get("body") is None:
continue
try:
response = requests.get(result["href"], allow_redirects=False, timeout=10)
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout):
continue
else:
content_type = response.headers.get("content-type")
if (not content_type) or ("text/html" not in content_type):
continue
html = unidecode(extract_text_from_html(response.text))
summary = unidecode(result["body"])
relevance = cosine_similarity_sentences(query.lower(), summary.lower())
relevance_threshold = 1e-2
if relevance < relevance_threshold:
continue
new_results = {
"href": result["href"],
"summary": summary,
"detailed": html,
"relevance": relevance,
}
results.append(new_results)
return results
@retry(error_msg="Error performing web search")
def websearch(query, **kwargs):
"""Search the web using DuckDuckGo Search API."""
raw_results = raw_websearch(query, **kwargs)
raw_results = iter(
sorted(raw_results, key=lambda x: x.get("relevance", 0.0), reverse=True)
)
min_relevant_keyword_length = 4
min_n_words = 40
for result in raw_results:
html = result.get("detailed", "")
index_first_query_word_to_appear = np.inf
for word in unidecode(query).split():
if len(word) < min_relevant_keyword_length:
continue
index = find_whole_word_index(html.lower(), word.lower())
if -1 < index < index_first_query_word_to_appear:
index_first_query_word_to_appear = index
if -1 < index_first_query_word_to_appear < np.inf:
html = html[index_first_query_word_to_appear:]
selected_words = html.split()[:500]
if len(selected_words) < min_n_words:
# Don't return results with less than approx one paragraph
continue
html = " ".join(selected_words)
yield {
"href": result.get("href", ""),
"summary": result.get("summary", ""),
"detailed": html,
"relevance": result.get("relevance", ""),
}
break
for result in raw_results:
yield {
"href": result.get("href", ""),
"summary": result.get("summary", ""),
"relevance": result.get("relevance", ""),
}
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/general_utils.py | pyrobbot/general_utils.py | """General utility functions and classes."""
import difflib
import inspect
import json
import re
import time
from functools import wraps
from pathlib import Path
from typing import Optional
import httpx
import openai
from loguru import logger
from pydub import AudioSegment
from pydub.silence import detect_leading_silence
class ReachedMaxNumberOfAttemptsError(Exception):
"""Error raised when the max number of attempts has been reached."""
def _get_lower_alphanumeric(string: str):
"""Return a string with only lowercase alphanumeric characters."""
return re.sub("[^0-9a-zA-Z]+", " ", string.strip().lower())
def str2_minus_str1(str1: str, str2: str):
"""Return the words in str2 that are not in str1."""
output_list = [diff for diff in difflib.ndiff(str1, str2) if diff[0] == "+"]
str_diff = "".join(el.replace("+ ", "") for el in output_list if el.startswith("+"))
return str_diff
def get_call_traceback(depth=5):
"""Get the traceback of the call to the function."""
curframe = inspect.currentframe()
callframe = inspect.getouterframes(curframe)
call_path = []
for iframe, frame in enumerate(callframe):
fpath = frame.filename
lineno = frame.lineno
function = frame.function
code_context = frame.code_context[0].strip()
call_path.append(
{
"fpath": fpath,
"lineno": lineno,
"function": function,
"code_context": code_context,
}
)
if iframe == depth:
break
return call_path
def trim_beginning(audio: AudioSegment, **kwargs):
"""Trim the beginning of the audio to remove silence."""
beginning = detect_leading_silence(audio, **kwargs)
return audio[beginning:]
def trim_ending(audio: AudioSegment, **kwargs):
"""Trim the ending of the audio to remove silence."""
audio = trim_beginning(audio.reverse(), **kwargs)
return audio.reverse()
def trim_silence(audio: AudioSegment, **kwargs):
"""Trim the silence from the beginning and ending of the audio."""
kwargs["silence_threshold"] = kwargs.get("silence_threshold", -40.0)
audio = trim_beginning(audio, **kwargs)
return trim_ending(audio, **kwargs)
def retry(
max_n_attempts: int = 5,
handled_errors: tuple[Exception, ...] = (
openai.APITimeoutError,
httpx.HTTPError,
RuntimeError,
),
error_msg: Optional[str] = None,
):
"""Retry executing the decorated function/generator."""
def retry_or_fail(error):
"""Decide whether to retry or fail based on the number of attempts."""
retry_or_fail.execution_count = getattr(retry_or_fail, "execution_count", 0) + 1
if retry_or_fail.execution_count < max_n_attempts:
logger.warning(
"{}. Making new attempt ({}/{})...",
error,
retry_or_fail.execution_count + 1,
max_n_attempts,
)
time.sleep(1)
else:
raise ReachedMaxNumberOfAttemptsError(error_msg) from error
def retry_decorator(function):
"""Wrap `function`."""
@wraps(function)
def wrapper_f(*args, **kwargs):
while True:
try:
return function(*args, **kwargs)
except handled_errors as error: # noqa: PERF203
retry_or_fail(error=error)
@wraps(function)
def wrapper_generator_f(*args, **kwargs):
success = False
while not success:
try:
yield from function(*args, **kwargs)
except handled_errors as error: # noqa: PERF203
retry_or_fail(error=error)
else:
success = True
return wrapper_generator_f if inspect.isgeneratorfunction(function) else wrapper_f
return retry_decorator
class AlternativeConstructors:
"""Mixin class for alternative constructors."""
@classmethod
def from_dict(cls, configs: dict, **kwargs):
"""Creates an instance from a configuration dictionary.
Converts the configuration dictionary into a instance of this class
and uses it to instantiate the Chat class.
Args:
configs (dict): The configuration options as a dictionary.
**kwargs: Additional keyword arguments to pass to the class constructor.
Returns:
cls: An instance of Chat initialized with the given configurations.
"""
return cls(configs=cls.default_configs.model_validate(configs), **kwargs)
@classmethod
def from_cli_args(cls, cli_args, **kwargs):
"""Creates an instance from CLI arguments.
Extracts relevant options from the CLI arguments and initializes a class instance
with them.
Args:
cli_args: The command line arguments.
**kwargs: Additional keyword arguments to pass to the class constructor.
Returns:
cls: An instance of the class initialized with CLI-specified configurations.
"""
chat_opts = {
k: v
for k, v in vars(cli_args).items()
if k in cls.default_configs.model_fields and v is not None
}
return cls.from_dict(chat_opts, **kwargs)
@classmethod
def from_cache(cls, cache_dir: Path, **kwargs):
"""Loads an instance from a cache directory.
Args:
cache_dir (Path): The path to the cache directory.
**kwargs: Additional keyword arguments to pass to the class constructor.
Returns:
cls: An instance of the class loaded with cached configurations and metadata.
"""
try:
with open(cache_dir / "configs.json", "r") as configs_f:
new_configs = json.load(configs_f)
except FileNotFoundError:
logger.warning(
"Could not find config file in cache directory <{}>. "
+ "Creating {} with default configs.",
cache_dir,
cls.__name__,
)
new_configs = cls.default_configs.model_dump()
try:
with open(cache_dir / "metadata.json", "r") as metadata_f:
new_metadata = json.load(metadata_f)
except FileNotFoundError:
logger.warning(
"Could not find metadata file in cache directory <{}>. "
+ "Creating {} with default metadata.",
cache_dir,
cls.__name__,
)
new_metadata = None
new = cls.from_dict(new_configs, **kwargs)
if new_metadata is not None:
new.metadata = new_metadata
logger.debug(
"Reseting chat_id from cache: {} --> {}.",
new.id,
new.metadata["chat_id"],
)
new.id = new.metadata["chat_id"]
return new
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/chat.py | pyrobbot/chat.py | #!/usr/bin/env python3
"""Implementation of the Chat class."""
import contextlib
import json
import shutil
import uuid
from collections import defaultdict
from datetime import datetime
from pathlib import Path
from typing import Optional
import openai
from attr import dataclass
from loguru import logger
from pydub import AudioSegment
from tzlocal import get_localzone
from . import GeneralDefinitions
from .chat_configs import ChatOptions
from .chat_context import EmbeddingBasedChatContext, FullHistoryChatContext
from .general_utils import (
AlternativeConstructors,
ReachedMaxNumberOfAttemptsError,
get_call_traceback,
)
from .internet_utils import websearch
from .openai_utils import OpenAiClientWrapper, make_api_chat_completion_call
from .sst_and_tts import SpeechToText, TextToSpeech
from .tokens import PRICE_PER_K_TOKENS_EMBEDDINGS, TokenUsageDatabase
@dataclass
class AssistantResponseChunk:
"""A chunk of the assistant's response."""
exchange_id: str
content: str
chunk_type: str = "text"
class Chat(AlternativeConstructors):
"""Manages conversations with an AI chat model.
This class encapsulates the chat behavior, including handling the chat context,
managing cache directories, and interfacing with the OpenAI API for generating chat
responses.
"""
_translation_cache = defaultdict(dict)
default_configs = ChatOptions()
def __init__(
self,
openai_client: OpenAiClientWrapper = None,
configs: ChatOptions = default_configs,
):
"""Initializes a chat instance.
Args:
configs (ChatOptions, optional): The configurations for this chat session.
openai_client (openai.OpenAI, optional): An OpenAiClientWrapper instance.
Raises:
NotImplementedError: If the context model specified in configs is unknown.
"""
self.id = str(uuid.uuid4())
logger.trace(
"Init chat {}, as requested by from <{}>", self.id, get_call_traceback()
)
logger.debug("Init chat {}", self.id)
self._code_marker = "\uE001" # TEST
self._passed_configs = configs
for field in self._passed_configs.model_fields:
setattr(self, field, self._passed_configs[field])
try:
self.openai_client = (
openai_client
if openai_client is not None
else OpenAiClientWrapper(
timeout=self.timeout, private_mode=self.private_mode
)
)
except openai.OpenAIError as error:
logger.opt(exception=True).debug(error)
logger.error(
"Cannot connect to OpenAI API. Please verify your API key. {}.", error
)
self.openai_client = None
@property
def base_directive(self):
"""Return the base directive for the LLM."""
code_marker = self._code_marker
local_datetime = datetime.now(get_localzone()).isoformat(timespec="seconds")
msg_content = (
f"Your name is {self.assistant_name}. Your model is {self.model}\n"
f"You are a helpful assistant to {self.username}\n"
f"You have internet access\n"
f"You MUST ALWAYS write {code_marker} before AND after code blocks. Example: "
f"```foo ... ``` MUST become {code_marker}```foo ... ```{code_marker}\n"
f"The current city is {GeneralDefinitions.IPINFO['city']} in "
f"{GeneralDefinitions.IPINFO['country_name']}\n"
f"The local datetime is {local_datetime}\n"
f"\nYou must follow all directives by {self.system_name} "
f"unless otherwise instructed by {self.username}\n"
f"You must reply in {self.language} unless otherwise instructed\n"
"> If asked to look up online, web, internet etc, *you MUST ALWAYS AGREE*\n"
"\n*IMPORTANT*: If you can't find information neither in your training data "
"nor in the conversation context then, unless you have either already "
f"looked up online or not understood {self.username}'s prompt, you must "
"do as follows:\n"
" > Do *NOT* apologise nor say you are sorry nor give any excuses.\n"
" > Do *NOT* ask for permission to lookup online.\n"
" > STATE CLEARLY that you will look it up online.\n"
"\n".join([f"{instruct.strip(' .')}." for instruct in self.ai_instructions])
)
return {"role": "system", "name": self.system_name, "content": msg_content}
@property
def configs(self):
"""Return the chat's configs after initialisation."""
configs_dict = {}
for field_name in self._passed_configs.model_fields:
configs_dict[field_name] = getattr(self, field_name)
return self._passed_configs.model_validate(configs_dict)
@property
def tmp_dir(self):
"""Return the temporary directory for the chat."""
return Path(self._tmp_dir.name)
@property
def cache_dir(self):
"""Return the cache directory for this chat."""
parent_dir = self.openai_client.get_cache_dir(private_mode=self.private_mode)
directory = parent_dir / f"chat_{self.id}"
directory.mkdir(parents=True, exist_ok=True)
return directory
@property
def configs_file(self):
"""File to store the chat's configs."""
return self.cache_dir / "configs.json"
@property
def context_file_path(self):
"""Return the path to the file that stores the chat context and history."""
return self.cache_dir / "embeddings.db"
@property
def context_handler(self):
"""Return the chat's context handler."""
if self.context_model == "full-history":
return FullHistoryChatContext(parent_chat=self)
if self.context_model in PRICE_PER_K_TOKENS_EMBEDDINGS:
return EmbeddingBasedChatContext(parent_chat=self)
raise NotImplementedError(f"Unknown context model: {self.context_model}")
@property
def token_usage_db(self):
"""Return the chat's token usage database."""
return TokenUsageDatabase(fpath=self.cache_dir / "chat_token_usage.db")
@property
def general_token_usage_db(self):
"""Return the general token usage database for all chats.
Even private-mode chats will use this database to keep track of total token usage.
"""
general_cache_dir = self.openai_client.get_cache_dir(private_mode=False)
return TokenUsageDatabase(fpath=general_cache_dir.parent / "token_usage.db")
@property
def metadata_file(self):
"""File to store the chat metadata."""
return self.cache_dir / "metadata.json"
@property
def metadata(self):
"""Keep metadata associated with the chat."""
try:
_ = self._metadata
except AttributeError:
try:
with open(self.metadata_file, "r") as f:
self._metadata = json.load(f)
except (FileNotFoundError, json.decoder.JSONDecodeError):
self._metadata = {}
return self._metadata
@metadata.setter
def metadata(self, value):
self._metadata = dict(value)
def save_cache(self):
"""Store the chat's configs and metadata to the cache directory."""
self.configs.export(self.configs_file)
metadata = self.metadata # Trigger loading metadata if not yet done
metadata["chat_id"] = self.id
with open(self.metadata_file, "w") as metadata_f:
json.dump(metadata, metadata_f, indent=2)
def clear_cache(self):
"""Remove the cache directory."""
logger.debug("Clearing cache for chat {}", self.id)
shutil.rmtree(self.cache_dir, ignore_errors=True)
def load_history(self):
"""Load chat history from cache."""
return self.context_handler.load_history()
@property
def initial_greeting(self):
"""Return the initial greeting for the chat."""
default_greeting = f"Hi! I'm {self.assistant_name}. How can I assist you?"
user_set_greeting = False
with contextlib.suppress(AttributeError):
user_set_greeting = self._initial_greeting != ""
if not user_set_greeting:
self._initial_greeting = default_greeting
custom_greeting = user_set_greeting and self._initial_greeting != default_greeting
if custom_greeting or self.language[:2] != "en":
self._initial_greeting = self._translate(self._initial_greeting)
return self._initial_greeting
@initial_greeting.setter
def initial_greeting(self, value: str):
self._initial_greeting = str(value).strip()
def respond_user_prompt(self, prompt: str, **kwargs):
"""Respond to a user prompt."""
yield from self._respond_prompt(prompt=prompt, role="user", **kwargs)
def respond_system_prompt(
self, prompt: str, add_to_history=False, skip_check=True, **kwargs
):
"""Respond to a system prompt."""
for response_chunk in self._respond_prompt(
prompt=prompt,
role="system",
add_to_history=add_to_history,
skip_check=skip_check,
**kwargs,
):
yield response_chunk.content
def yield_response_from_msg(
self, prompt_msg: dict, add_to_history: bool = True, **kwargs
):
"""Yield response from a prompt message."""
exchange_id = str(uuid.uuid4())
code_marker = self._code_marker
try:
inside_code_block = False
for answer_chunk in self._yield_response_from_msg(
exchange_id=exchange_id,
prompt_msg=prompt_msg,
add_to_history=add_to_history,
**kwargs,
):
code_marker_detected = code_marker in answer_chunk
inside_code_block = (code_marker_detected and not inside_code_block) or (
inside_code_block and not code_marker_detected
)
yield AssistantResponseChunk(
exchange_id=exchange_id,
content=answer_chunk.strip(code_marker),
chunk_type="code" if inside_code_block else "text",
)
except (ReachedMaxNumberOfAttemptsError, openai.OpenAIError) as error:
yield self.response_failure_message(exchange_id=exchange_id, error=error)
def start(self):
"""Start the chat."""
# ruff: noqa: T201
print(f"{self.assistant_name}> {self.initial_greeting}\n")
try:
while True:
question = input(f"{self.username}> ").strip()
if not question:
continue
print(f"{self.assistant_name}> ", end="", flush=True)
for chunk in self.respond_user_prompt(prompt=question):
print(chunk.content, end="", flush=True)
print()
print()
except (KeyboardInterrupt, EOFError):
print("", end="\r")
logger.info("Leaving chat")
def report_token_usage(self, report_current_chat=True, report_general: bool = False):
"""Report token usage and associated costs."""
dfs = {}
if report_general:
dfs["All Recorded Chats"] = (
self.general_token_usage_db.get_usage_balance_dataframe()
)
if report_current_chat:
dfs["Current Chat"] = self.token_usage_db.get_usage_balance_dataframe()
if dfs:
for category, df in dfs.items():
header = f"{df.attrs['description']}: {category}"
table_separator = "=" * (len(header) + 4)
print(table_separator)
print(f" {header} ")
print(table_separator)
print(df)
print()
print(df.attrs["disclaimer"])
def response_failure_message(
self, exchange_id: Optional[str] = "", error: Optional[Exception] = None
):
"""Return the error message errors getting a response."""
msg = "Could not get a response right now."
if error is not None:
msg += f" The reason seems to be: {error} "
msg += "Please check your connection or OpenAI API key."
logger.opt(exception=True).debug(error)
return AssistantResponseChunk(exchange_id=exchange_id, content=msg)
def stt(self, speech: AudioSegment):
"""Convert audio to text."""
return SpeechToText(
speech=speech,
openai_client=self.openai_client,
engine=self.stt_engine,
language=self.language,
timeout=self.timeout,
general_token_usage_db=self.general_token_usage_db,
token_usage_db=self.token_usage_db,
)
def tts(self, text: str):
"""Convert text to audio."""
return TextToSpeech(
text=text,
openai_client=self.openai_client,
language=self.language,
engine=self.tts_engine,
openai_tts_voice=self.openai_tts_voice,
timeout=self.timeout,
general_token_usage_db=self.general_token_usage_db,
token_usage_db=self.token_usage_db,
)
def _yield_response_from_msg(
self,
exchange_id,
prompt_msg: dict,
add_to_history: bool = True,
skip_check: bool = False,
):
"""Yield response from a prompt message (lower level interface)."""
# Get appropriate context for prompt from the context handler
context = self.context_handler.get_context(msg=prompt_msg)
# Make API request and yield response chunks
full_reply_content = ""
for chunk in make_api_chat_completion_call(
conversation=[self.base_directive, *context, prompt_msg], chat_obj=self
):
full_reply_content += chunk.strip(self._code_marker)
yield chunk
if not skip_check:
last_msg_exchange = (
f"`user` says: {prompt_msg['content']}\n"
f"`you` replies: {full_reply_content}"
)
system_check_msg = (
"Consider the following dialogue between `user` and `you` "
"AND NOTHING MORE:\n\n"
f"{last_msg_exchange}\n\n"
"Now answer the following question using only 'yes' or 'no':\n"
"Were `you` able to provide a good answer the `user`s prompt, without "
"neither `you` nor `user` asking or implying the need or intention to "
"perform a search or lookup online, on the web or the internet?\n"
)
reply = "".join(self.respond_system_prompt(prompt=system_check_msg))
reply = reply.strip(".' ").lower()
if ("no" in reply) or (self._translate("no") in reply):
instructions_for_web_search = (
"You are a professional web searcher. You will be presented with a "
"dialogue between `user` and `you`. Considering the dialogue and "
"relevant previous messages, write "
"the best short web search query to look for an answer to the "
"`user`'s prompt. You MUST follow the rules below:\n"
"* Write *only the query* and nothing else\n"
"* DO NOT RESTRICT the search to any particular website "
"unless otherwise instructed\n"
"* You MUST reply in the `user`'s language unless otherwise asked\n\n"
"The `dialogue` is:"
)
instructions_for_web_search += f"\n\n{last_msg_exchange}"
internet_query = "".join(
self.respond_system_prompt(prompt=instructions_for_web_search)
)
yield "\n\n" + self._translate(
"Searching the web now. My search is: "
) + f" '{internet_query}'..."
web_results_json_dumps = "\n\n".join(
json.dumps(result, indent=2) for result in websearch(internet_query)
)
if web_results_json_dumps:
logger.opt(colors=True).debug(
"Web search rtn: <yellow>{}</yellow>...", web_results_json_dumps
)
original_prompt = prompt_msg["content"]
prompt = (
"You are a talented data analyst, "
"capable of summarising any information, even complex `json`. "
"You will be shown a `json` and a `prompt`. Your task is to "
"summarise the `json` to answer the `prompt`. "
"You MUST follow the rules below:\n\n"
"* *ALWAYS* provide a meaningful summary to the the `json`\n"
"* *Do NOT include links* or anything a human can't pronounce, "
"unless otherwise instructed\n"
"* Prefer searches without quotes but use them if needed\n"
"* Answer in human language (i.e., no json, etc)\n"
"* Answer in the `user`'s language unless otherwise asked\n"
"* Make sure to point out that the information is from a quick "
"web search and may be innacurate\n"
"* Mention the sources shortly WITHOUT MENTIONING WEB LINKS\n\n"
"The `json` and the `prompt` are presented below:\n"
)
prompt += f"\n```json\n{web_results_json_dumps}\n```\n"
prompt += f"\n`prompt`: '{original_prompt}'"
yield "\n\n" + self._translate(
" I've got some results. Let me summarise them for you..."
)
full_reply_content += " "
yield "\n\n"
for chunk in self.respond_system_prompt(prompt=prompt):
full_reply_content += chunk.strip(self._code_marker)
yield chunk
else:
yield self._translate(
"Sorry, but I couldn't find anything on the web this time."
)
if add_to_history:
# Put current chat exchange in context handler's history
self.context_handler.add_to_history(
exchange_id=exchange_id,
msg_list=[
prompt_msg,
{"role": "assistant", "content": full_reply_content},
],
)
def _respond_prompt(self, prompt: str, role: str, **kwargs):
prompt_as_msg = {"role": role.lower().strip(), "content": prompt.strip()}
yield from self.yield_response_from_msg(prompt_as_msg, **kwargs)
def _translate(self, text):
lang = self.language
cached_translation = type(self)._translation_cache[text].get(lang) # noqa SLF001
if cached_translation:
return cached_translation
logger.debug("Processing translation of '{}' to '{}'...", text, lang)
translation_prompt = (
f"Translate the text between triple quotes below to {lang}. "
"DO NOT WRITE ANYTHING ELSE. Only the translation. "
f"If the text is already in {lang}, then don't translate. Just return ''.\n"
f"'''{text}'''"
)
translation = "".join(self.respond_system_prompt(prompt=translation_prompt))
translation = translation.strip(" '\"")
if not translation.strip():
translation = text.strip()
logger.debug("Translated '{}' to '{}' as '{}'", text, lang, translation)
type(self)._translation_cache[text][lang] = translation # noqa: SLF001
type(self)._translation_cache[translation][lang] = translation # noqa: SLF001
return translation
def __del__(self):
"""Delete the chat instance."""
logger.debug("Deleting chat {}", self.id)
chat_started = self.context_handler.database.n_entries > 0
if self.private_mode or not chat_started:
self.clear_cache()
else:
self.save_cache()
self.clear_cache()
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/embeddings_database.py | pyrobbot/embeddings_database.py | """Management of embeddings/chat history storage and retrieval."""
import datetime
import json
import sqlite3
from pathlib import Path
from typing import Union
import pandas as pd
from loguru import logger
class EmbeddingsDatabase:
"""Class for managing an SQLite database storing embeddings and associated data."""
def __init__(self, db_path: Path, embedding_model: str):
"""Initialise the EmbeddingsDatabase object.
Args:
db_path (Path): The path to the SQLite database file.
embedding_model (str): The embedding model associated with this database.
"""
self.db_path = db_path
self.embedding_model = embedding_model
self.create()
def create(self):
"""Create the necessary tables and triggers in the SQLite database."""
self.db_path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(self.db_path)
# SQL to create the nedded tables
create_table_sqls = {
"embedding_model": """
CREATE TABLE IF NOT EXISTS embedding_model (
created_timestamp INTEGER NOT NULL,
embedding_model TEXT NOT NULL,
PRIMARY KEY (embedding_model)
)
""",
"messages": """
CREATE TABLE IF NOT EXISTS messages (
id TEXT PRIMARY KEY NOT NULL,
timestamp INTEGER NOT NULL,
chat_model TEXT NOT NULL,
message_exchange TEXT NOT NULL,
embedding TEXT
)
""",
"reply_audio_files": """
CREATE TABLE IF NOT EXISTS reply_audio_files (
id TEXT PRIMARY KEY NOT NULL,
file_path TEXT NOT NULL,
FOREIGN KEY (id) REFERENCES messages(id) ON DELETE CASCADE
)
""",
}
with conn:
for table_name, table_create_sql in create_table_sqls.items():
# Create tables
conn.execute(table_create_sql)
# Create triggers to prevent modification after insertion
conn.execute(
f"""
CREATE TRIGGER IF NOT EXISTS prevent_{table_name}_modification
BEFORE UPDATE ON {table_name}
BEGIN
SELECT RAISE(FAIL, 'Table "{table_name}": modification not allowed');
END;
"""
)
# Close the connection to the database
conn.close()
def get_embedding_model(self):
"""Retrieve the database's embedding model.
Returns:
str: The embedding model or None if teh database is not yet initialised.
"""
conn = sqlite3.connect(self.db_path)
query = "SELECT embedding_model FROM embedding_model;"
# Execute the query and fetch the result
embedding_model = None
with conn:
cur = conn.cursor()
cur.execute(query)
result = cur.fetchone()
embedding_model = result[0] if result else None
conn.close()
return embedding_model
def insert_message_exchange(
self, exchange_id, chat_model, message_exchange, embedding
):
"""Insert a message exchange into the database's 'messages' table.
Args:
exchange_id (str): The id of the message exchange.
chat_model (str): The chat model.
message_exchange: The message exchange.
embedding: The embedding associated with the message exchange.
Raises:
ValueError: If the database already contains a different embedding model.
"""
stored_embedding_model = self.get_embedding_model()
if stored_embedding_model is None:
self._init_database()
elif stored_embedding_model != self.embedding_model:
raise ValueError(
"Database already contains a different embedding model: "
f"{self.get_embedding_model()}.\n"
"Cannot continue."
)
timestamp = int(datetime.datetime.utcnow().timestamp())
message_exchange = json.dumps(message_exchange)
embedding = json.dumps(embedding)
conn = sqlite3.connect(self.db_path)
sql = """
INSERT INTO messages (id, timestamp, chat_model, message_exchange, embedding)
VALUES (?, ?, ?, ?, ?)"""
with conn:
conn.execute(
sql, (exchange_id, timestamp, chat_model, message_exchange, embedding)
)
conn.close()
def insert_assistant_audio_file_path(
self, exchange_id: str, file_path: Union[str, Path]
):
"""Insert the path to the assistant's reply audio file into the database.
Args:
exchange_id: The id of the message exchange.
file_path: Path to the assistant's reply audio file.
"""
file_path = file_path.as_posix()
conn = sqlite3.connect(self.db_path)
with conn:
# Check if the corresponding id exists in the messages table
cursor = conn.cursor()
cursor.execute("SELECT 1 FROM messages WHERE id=?", (exchange_id,))
exists = cursor.fetchone() is not None
if exists:
# Insert into reply_audio_files
cursor.execute(
"INSERT INTO reply_audio_files (id, file_path) VALUES (?, ?)",
(exchange_id, file_path),
)
else:
logger.error("The corresponding id does not exist in the messages table")
conn.close()
def retrieve_history(self, exchange_id=None):
"""Retrieve data from all tables in the db combined in a single dataframe."""
query = """
SELECT messages.id,
messages.timestamp,
messages.chat_model,
messages.message_exchange,
reply_audio_files.file_path AS reply_audio_file_path,
embedding
FROM messages
LEFT JOIN reply_audio_files
ON messages.id = reply_audio_files.id
"""
if exchange_id:
query += f" WHERE messages.id = '{exchange_id}'"
conn = sqlite3.connect(self.db_path)
with conn:
data_df = pd.read_sql_query(query, conn)
conn.close()
return data_df
@property
def n_entries(self):
"""Return the number of entries in the `messages` table."""
conn = sqlite3.connect(self.db_path)
query = "SELECT COUNT(*) FROM messages;"
with conn:
cur = conn.cursor()
cur.execute(query)
result = cur.fetchone()
conn.close()
return result[0]
def _init_database(self):
"""Initialise the 'embedding_model' table in the database."""
conn = sqlite3.connect(self.db_path)
create_time = int(datetime.datetime.utcnow().timestamp())
sql = "INSERT INTO embedding_model "
sql += "(created_timestamp, embedding_model) VALUES (?, ?);"
with conn:
conn.execute(sql, (create_time, self.embedding_model))
conn.close()
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/__main__.py | pyrobbot/__main__.py | #!/usr/bin/env python3
"""Program's entry point."""
from .argparse_wrapper import get_parsed_args
def main(argv=None):
"""Program's main routine."""
args = get_parsed_args(argv=argv)
args.run_command(args=args)
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/command_definitions.py | pyrobbot/command_definitions.py | #!/usr/bin/env python3
"""Commands supported by the package's script."""
import subprocess
from loguru import logger
from . import GeneralDefinitions
from .chat import Chat
from .chat_configs import ChatOptions
from .voice_chat import VoiceChat
def voice_chat(args):
"""Start a voice-based chat."""
VoiceChat.from_cli_args(cli_args=args).start()
def browser_chat(args):
"""Run the chat on the browser."""
ChatOptions.from_cli_args(args).export(fpath=GeneralDefinitions.PARSED_ARGS_FILE)
try:
subprocess.run(
[ # noqa: S603, S607
"streamlit",
"run",
GeneralDefinitions.APP_PATH.as_posix(),
"--",
GeneralDefinitions.PARSED_ARGS_FILE.as_posix(),
],
cwd=GeneralDefinitions.APP_DIR.as_posix(),
check=True,
)
except (KeyboardInterrupt, EOFError):
logger.info("Exiting.")
def terminal_chat(args):
"""Run the chat on the terminal."""
chat = Chat.from_cli_args(cli_args=args)
chat.start()
if args.report_accounting_when_done:
chat.report_token_usage(report_general=True)
def accounting_report(args):
"""Show the accumulated costs of the chat and exit."""
chat = Chat.from_cli_args(cli_args=args)
# Prevent chat from creating entry in the cache directory
chat.private_mode = True
chat.report_token_usage(report_general=True, report_current_chat=False)
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/sst_and_tts.py | pyrobbot/sst_and_tts.py | """Code related to speech-to-text and text-to-speech conversions."""
import io
import socket
import uuid
from dataclasses import dataclass, field
from typing import Literal
import numpy as np
import speech_recognition as sr
from gtts import gTTS
from loguru import logger
from openai import OpenAI
from pydub import AudioSegment
from .general_utils import retry
from .tokens import TokenUsageDatabase
@dataclass
class SpeechAndTextConfigs:
"""Configs for speech-to-text and text-to-speech."""
openai_client: OpenAI
general_token_usage_db: TokenUsageDatabase
token_usage_db: TokenUsageDatabase
engine: Literal["openai", "google"] = "google"
language: str = "en"
timeout: int = 10
@dataclass
class SpeechToText(SpeechAndTextConfigs):
"""Class for converting speech to text."""
speech: AudioSegment = None
_text: str = field(init=False, default="")
def __post_init__(self):
if not self.speech:
self.speech = AudioSegment.silent(duration=0)
self.recogniser = sr.Recognizer()
self.recogniser.operation_timeout = self.timeout
wav_buffer = io.BytesIO()
self.speech.export(wav_buffer, format="wav")
wav_buffer.seek(0)
with sr.AudioFile(wav_buffer) as source:
self.audio_data = self.recogniser.listen(source)
@property
def text(self) -> str:
"""Return the text from the speech."""
if not self._text:
self._text = self._stt()
return self._text
def _stt(self) -> str:
"""Perform speech-to-text."""
if not self.speech:
logger.debug("No speech detected")
return ""
if self.engine == "openai":
stt_function = self._stt_openai
fallback_stt_function = self._stt_google
fallback_name = "google"
else:
stt_function = self._stt_google
fallback_stt_function = self._stt_openai
fallback_name = "openai"
conversion_id = uuid.uuid4()
logger.debug(
"Converting audio to text ({} STT). Process {}.", self.engine, conversion_id
)
try:
rtn = stt_function()
except (
ConnectionResetError,
socket.timeout,
sr.exceptions.RequestError,
) as error:
logger.error(error)
logger.error(
"{}: Can't communicate with `{}` speech-to-text API right now",
conversion_id,
self.engine,
)
logger.warning(
"{}: Trying to use `{}` STT instead", conversion_id, fallback_name
)
rtn = fallback_stt_function()
except sr.exceptions.UnknownValueError:
logger.opt(colors=True).debug(
"<yellow>{}: Can't understand audio</yellow>", conversion_id
)
rtn = ""
self._text = rtn.strip()
logger.opt(colors=True).debug(
"<yellow>{}: Done with STT: {}</yellow>", conversion_id, self._text
)
return self._text
@retry()
def _stt_openai(self):
"""Perform speech-to-text using OpenAI's API."""
wav_buffer = io.BytesIO(self.audio_data.get_wav_data())
wav_buffer.name = "audio.wav"
with wav_buffer as audio_file_buffer:
transcript = self.openai_client.audio.transcriptions.create(
model="whisper-1",
file=audio_file_buffer,
language=self.language.split("-")[0], # put in ISO-639-1 format
prompt=f"The language is {self.language}. "
"Do not transcribe if you think the audio is noise.",
)
for db in [
self.general_token_usage_db,
self.token_usage_db,
]:
db.insert_data(
model="whisper-1",
n_input_tokens=int(np.ceil(self.speech.duration_seconds)),
)
return transcript.text
def _stt_google(self):
"""Perform speech-to-text using Google's API."""
return self.recogniser.recognize_google(
audio_data=self.audio_data, language=self.language
)
@dataclass
class TextToSpeech(SpeechAndTextConfigs):
"""Class for converting text to speech."""
text: str = ""
openai_tts_voice: str = ""
_speech: AudioSegment = field(init=False, default=None)
def __post_init__(self):
self.text = self.text.strip()
@property
def speech(self) -> AudioSegment:
"""Return the speech from the text."""
if not self._speech:
self._speech = self._tts()
return self._speech
def set_sample_rate(self, sample_rate: int):
"""Set the sample rate of the speech."""
self._speech = self.speech.set_frame_rate(sample_rate)
def _tts(self):
logger.debug("Running {} TTS on text '{}'", self.engine, self.text)
rtn = self._tts_openai() if self.engine == "openai" else self._tts_google()
logger.debug("Done with TTS for '{}'", self.text)
return rtn
def _tts_openai(self) -> AudioSegment:
"""Convert text to speech using OpenAI's TTS. Return an AudioSegment object."""
openai_tts_model = "tts-1"
@retry()
def _create_speech(*args, **kwargs):
for db in [
self.general_token_usage_db,
self.token_usage_db,
]:
db.insert_data(model=openai_tts_model, n_input_tokens=len(self.text))
return self.openai_client.audio.speech.create(*args, **kwargs)
response = _create_speech(
input=self.text,
model=openai_tts_model,
voice=self.openai_tts_voice,
response_format="mp3",
timeout=self.timeout,
)
mp3_buffer = io.BytesIO()
for mp3_stream_chunk in response.iter_bytes(chunk_size=4096):
mp3_buffer.write(mp3_stream_chunk)
mp3_buffer.seek(0)
audio = AudioSegment.from_mp3(mp3_buffer)
audio += 8 # Increase volume a bit
return audio
def _tts_google(self) -> AudioSegment:
"""Convert text to speech using Google's TTS. Return a WAV BytesIO object."""
tts = gTTS(self.text, lang=self.language)
mp3_buffer = io.BytesIO()
tts.write_to_fp(mp3_buffer)
mp3_buffer.seek(0)
return AudioSegment.from_mp3(mp3_buffer)
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/openai_utils.py | pyrobbot/openai_utils.py | """Utils for using the OpenAI API."""
import hashlib
import shutil
from typing import TYPE_CHECKING, Optional
import openai
from loguru import logger
from . import GeneralDefinitions
from .chat_configs import OpenAiApiCallOptions
from .general_utils import retry
from .tokens import get_n_tokens_from_msgs
if TYPE_CHECKING:
from .chat import Chat
class OpenAiClientWrapper(openai.OpenAI):
"""Wrapper for OpenAI API client."""
def __init__(self, *args, private_mode: bool = False, **kwargs):
"""Initialize the OpenAI API client wrapper."""
super().__init__(*args, **kwargs)
self.private_mode = private_mode
self.required_cache_files = [
"chat_token_usage.db",
"configs.json",
"embeddings.db",
"metadata.json",
]
self.clear_invalid_cache_dirs()
@property
def cache_dir(self):
"""Return client's cache dir according to the privacy configs."""
return self.get_cache_dir(private_mode=self.private_mode)
@property
def saved_chat_cache_paths(self):
"""Get the filepaths of saved chat contexts, sorted by last modified."""
yield from sorted(
(direc for direc in self.cache_dir.glob("chat_*/")),
key=lambda fpath: fpath.stat().st_ctime,
)
def clear_invalid_cache_dirs(self):
"""Remove cache directories that are missing required files."""
for directory in self.cache_dir.glob("chat_*/"):
if not all(
(directory / fname).exists() for fname in self.required_cache_files
):
logger.debug(f"Removing invalid cache directory: {directory}")
shutil.rmtree(directory, ignore_errors=True)
def get_cache_dir(self, private_mode: Optional[bool] = None):
"""Return the directory where the chats using the client will be stored."""
if private_mode is None:
private_mode = self.private_mode
if private_mode:
client_id = "demo"
parent_dir = GeneralDefinitions.PACKAGE_TMPDIR
else:
client_id = hashlib.sha256(self.api_key.encode("utf-8")).hexdigest()
parent_dir = GeneralDefinitions.PACKAGE_CACHE_DIRECTORY
directory = parent_dir / f"user_{client_id}"
directory.mkdir(parents=True, exist_ok=True)
return directory
def make_api_chat_completion_call(conversation: list, chat_obj: "Chat"):
"""Stream a chat completion from OpenAI API given a conversation and a chat object.
Args:
conversation (list): A list of messages passed as input for the completion.
chat_obj (Chat): Chat object containing the configurations for the chat.
Yields:
str: Chunks of text generated by the API in response to the conversation.
"""
api_call_args = {}
for field in OpenAiApiCallOptions.model_fields:
if getattr(chat_obj, field) is not None:
api_call_args[field] = getattr(chat_obj, field)
logger.trace(
"Making OpenAI API call with chat=<{}>, args {} and messages {}",
chat_obj.id,
api_call_args,
conversation,
)
@retry(error_msg="Problems connecting to OpenAI API")
def stream_reply(conversation, **api_call_args):
# Update the chat's token usage database with tokens used in chat input
# Do this here because every attempt consumes tokens, even if it fails
n_tokens = get_n_tokens_from_msgs(messages=conversation, model=chat_obj.model)
for db in [chat_obj.general_token_usage_db, chat_obj.token_usage_db]:
db.insert_data(model=chat_obj.model, n_input_tokens=n_tokens)
full_reply_content = ""
for completion_chunk in chat_obj.openai_client.chat.completions.create(
messages=conversation, stream=True, **api_call_args
):
reply_chunk = getattr(completion_chunk.choices[0].delta, "content", "")
if reply_chunk is None:
break
full_reply_content += reply_chunk
yield reply_chunk
# Update the chat's token usage database with tokens used in chat output
reply_as_msg = {"role": "assistant", "content": full_reply_content}
n_tokens = get_n_tokens_from_msgs(messages=[reply_as_msg], model=chat_obj.model)
for db in [chat_obj.general_token_usage_db, chat_obj.token_usage_db]:
db.insert_data(model=chat_obj.model, n_output_tokens=n_tokens)
logger.trace("Done with OpenAI API call")
yield from stream_reply(conversation, **api_call_args)
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/__init__.py | pyrobbot/__init__.py | #!/usr/bin/env python3
"""Unnoficial OpenAI API UI and CLI tool."""
import os
import sys
import tempfile
import uuid
from collections import defaultdict
from dataclasses import dataclass
from importlib.metadata import metadata, version
from pathlib import Path
import ipinfo
import requests
from loguru import logger
logger.remove()
logger.add(
sys.stderr,
level=os.environ.get("LOGLEVEL", os.environ.get("LOGURU_LEVEL", "INFO")),
)
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
@dataclass
class GeneralDefinitions:
"""General definitions for the package."""
# Main package info
RUN_ID = uuid.uuid4().hex
PACKAGE_NAME = __name__
VERSION = version(__name__)
PACKAGE_DESCRIPTION = metadata(__name__)["Summary"]
# Main package directories
PACKAGE_DIRECTORY = Path(__file__).parent
PACKAGE_CACHE_DIRECTORY = Path.home() / ".cache" / PACKAGE_NAME
_PACKAGE_TMPDIR = tempfile.TemporaryDirectory()
PACKAGE_TMPDIR = Path(_PACKAGE_TMPDIR.name)
# Constants related to the app
APP_NAME = "pyRobBot"
APP_DIR = PACKAGE_DIRECTORY / "app"
APP_PATH = APP_DIR / "app.py"
PARSED_ARGS_FILE = PACKAGE_TMPDIR / f"parsed_args_{RUN_ID}.pkl"
# Location info
IPINFO = defaultdict(lambda: "unknown")
try:
IPINFO = ipinfo.getHandler().getDetails().all
except (
requests.exceptions.ReadTimeout,
requests.exceptions.ConnectionError,
ipinfo.exceptions.RequestQuotaExceededError,
) as error:
logger.warning("Cannot get current location info. {}", error)
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/tokens.py | pyrobbot/tokens.py | """Management of token usage and costs for OpenAI API."""
import contextlib
import datetime
import sqlite3
from pathlib import Path
from typing import Optional
import pandas as pd
import tiktoken
# See <https://openai.com/pricing> for the latest prices.
PRICE_PER_K_TOKENS_LLM = {
# Continuous model upgrades (models that point to the latest versions)
"gpt-3.5-turbo": {"input": 0.0005, "output": 0.0015},
"gpt-4-turbo-preview": {"input": 0.01, "output": 0.03},
"gpt-4": {"input": 0.03, "output": 0.06},
"gpt-3.5-turbo-16k": {"input": 0.001, "output": 0.002}, # -> gpt-3.5-turbo-16k-0613
"gpt-4-32k": {"input": 0.06, "output": 0.12},
# Static model versions
# GPT 3
"gpt-3.5-turbo-0125": {"input": 0.0015, "output": 0.002},
"gpt-3.5-turbo-1106": {"input": 0.001, "output": 0.002},
"gpt-3.5-turbo-0613": {"input": 0.0015, "output": 0.002}, # Deprecated, 2024-06-13
"gpt-3.5-turbo-16k-0613": {"input": 0.001, "output": 0.002}, # Deprecated, 2024-06-13
# GPT 4
"gpt-4-0125-preview": {"input": 0.01, "output": 0.03},
"gpt-4-1106-preview": {"input": 0.01, "output": 0.03},
"gpt-4-0613": {"input": 0.03, "output": 0.06},
"gpt-4-32k-0613": {"input": 0.06, "output": 0.12},
}
PRICE_PER_K_TOKENS_EMBEDDINGS = {
"text-embedding-3-small": {"input": 0.00002, "output": 0.0},
"text-embedding-3-large": {"input": 0.00013, "output": 0.0},
"text-embedding-ada-002": {"input": 0.0001, "output": 0.0},
"text-embedding-ada-002-v2": {"input": 0.0001, "output": 0.0},
"text-davinci:002": {"input": 0.0020, "output": 0.020},
"full-history": {"input": 0.0, "output": 0.0},
}
PRICE_PER_K_TOKENS_TTS_AND_STT = {
"tts-1": {"input": 0.015, "output": 0.0},
"tts-1-hd": {"input": 0.03, "output": 0.0},
"whisper-1": {"input": 0.006, "output": 0.0},
}
PRICE_PER_K_TOKENS = (
PRICE_PER_K_TOKENS_LLM
| PRICE_PER_K_TOKENS_EMBEDDINGS
| PRICE_PER_K_TOKENS_TTS_AND_STT
)
class TokenUsageDatabase:
"""Manages a database to store estimated token usage and costs for OpenAI API."""
def __init__(self, fpath: Path):
"""Initialize a TokenUsageDatabase instance."""
self.fpath = fpath
self.token_price = {}
for model, price_per_k_tokens in PRICE_PER_K_TOKENS.items():
self.token_price[model] = {
k: v / 1000.0 for k, v in price_per_k_tokens.items()
}
self.create()
def create(self):
"""Create the database if it doesn't exist."""
self.fpath.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(self.fpath)
cursor = conn.cursor()
# Create a table to store the data with 'timestamp' as the primary key
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS token_costs (
timestamp INTEGER NOT NULL,
model TEXT NOT NULL,
n_input_tokens INTEGER NOT NULL,
n_output_tokens INTEGER NOT NULL,
cost_input_tokens REAL NOT NULL,
cost_output_tokens REAL NOT NULL
)
"""
)
conn.commit()
conn.close()
def insert_data(
self,
model: str,
n_input_tokens: int = 0,
n_output_tokens: int = 0,
timestamp: Optional[int] = None,
):
"""Insert the data into the token_costs table."""
if model is None:
return
conn = sqlite3.connect(self.fpath)
cursor = conn.cursor()
# Insert the data into the table
cursor.execute(
"""
INSERT INTO token_costs (
timestamp,
model,
n_input_tokens,
n_output_tokens,
cost_input_tokens,
cost_output_tokens
)
VALUES (?, ?, ?, ?, ?, ?)
""",
(
timestamp or int(datetime.datetime.utcnow().timestamp()),
model,
n_input_tokens,
n_output_tokens,
n_input_tokens * self.token_price[model]["input"],
n_output_tokens * self.token_price[model]["output"],
),
)
conn.commit()
conn.close()
def get_usage_balance_dataframe(self):
"""Get a dataframe with the accumulated token usage and costs."""
conn = sqlite3.connect(self.fpath)
query = """
SELECT
model as Model,
MIN(timestamp) AS "First Used",
SUM(n_input_tokens) AS "Tokens: In",
SUM(n_output_tokens) AS "Tokens: Out",
SUM(n_input_tokens + n_output_tokens) AS "Tokens: Tot.",
SUM(cost_input_tokens) AS "Cost ($): In",
SUM(cost_output_tokens) AS "Cost ($): Out",
SUM(cost_input_tokens + cost_output_tokens) AS "Cost ($): Tot."
FROM token_costs
GROUP BY model
ORDER BY "Cost ($): Tot." DESC
"""
usage_df = pd.read_sql_query(query, con=conn)
conn.close()
usage_df["First Used"] = pd.to_datetime(usage_df["First Used"], unit="s")
usage_df = _group_columns_by_prefix(_add_totals_row(usage_df))
# Add metadata to returned dataframe
usage_df.attrs["description"] = "Estimated token usage and associated costs"
link = "https://platform.openai.com/account/usage"
disclaimers = [
"Note: These are only estimates. Actual costs may vary.",
f"Please visit <{link}> to follow your actual usage and costs.",
]
usage_df.attrs["disclaimer"] = "\n".join(disclaimers)
return usage_df
def get_n_tokens_from_msgs(messages: list[dict], model: str):
"""Returns the number of tokens used by a list of messages."""
# Adapted from
# <https://platform.openai.com/docs/guides/text-generation/managing-tokens>
encoding = tiktoken.get_encoding("cl100k_base")
with contextlib.suppress(KeyError):
encoding = tiktoken.encoding_for_model(model)
# OpenAI's original function was implemented for gpt-3.5-turbo-0613, but we'll use
# it for all models for now. We are only interested in estimates, after all.
num_tokens = 0
for message in messages:
# every message follows <im_start>{role/name}\n{content}<im_end>\n
num_tokens += 4
for key, value in message.items():
if not isinstance(value, str):
raise TypeError(
f"Value for key '{key}' has type {type(value)}. Expected str: {value}"
)
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += -1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
def _group_columns_by_prefix(dataframe: pd.DataFrame):
dataframe = dataframe.copy()
col_tuples_for_multiindex = dataframe.columns.str.split(": ", expand=True).to_numpy()
dataframe.columns = pd.MultiIndex.from_tuples(
[("", x[0]) if pd.isna(x[1]) else x for x in col_tuples_for_multiindex]
)
return dataframe
def _add_totals_row(accounting_df: pd.DataFrame):
dtypes = accounting_df.dtypes
sums_df = accounting_df.sum(numeric_only=True).rename("Total").to_frame().T
return pd.concat([accounting_df, sums_df]).astype(dtypes).fillna(" ")
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/voice_chat.py | pyrobbot/voice_chat.py | """Code related to the voice chat feature."""
import contextlib
import io
import queue
import threading
import time
from collections import defaultdict, deque
from datetime import datetime
import chime
import numpy as np
import pydub
import pygame
import soundfile as sf
import webrtcvad
from loguru import logger
from pydub import AudioSegment
from .chat import Chat
from .chat_configs import VoiceChatConfigs
from .general_utils import _get_lower_alphanumeric, str2_minus_str1
from .sst_and_tts import TextToSpeech
try:
import sounddevice as sd
except OSError as error:
logger.exception(error)
logger.error(
"Can't use module `sounddevice`. Please check your system's PortAudio install."
)
_sounddevice_imported = False
else:
_sounddevice_imported = True
try:
# Test if pydub's AudioSegment can be used
with contextlib.suppress(pydub.exceptions.CouldntDecodeError):
AudioSegment.from_mp3(io.BytesIO())
except (ImportError, OSError, FileNotFoundError) as error:
logger.exception(error)
logger.error("Can't use module `pydub`. Please check your system's ffmpeg install.")
_pydub_usable = False
else:
_pydub_usable = True
class VoiceChat(Chat):
"""Class for converting text to speech and speech to text."""
default_configs = VoiceChatConfigs()
def __init__(self, configs: VoiceChatConfigs = default_configs, **kwargs):
"""Initializes a chat instance."""
super().__init__(configs=configs, **kwargs)
_check_needed_imports()
self.block_size = int((self.sample_rate * self.frame_duration) / 1000)
self.vad = webrtcvad.Vad(2)
self.default_chime_theme = "big-sur"
chime.theme(self.default_chime_theme)
# Create queues and threads for handling the chat
# 1. Watching for questions from the user
self.questions_queue = queue.Queue()
self.questions_listening_watcher_thread = threading.Thread(
target=self.handle_question_listening,
args=(self.questions_queue,),
daemon=True,
)
# 2. Converting assistant's text reply to speech and playing it
self.tts_conversion_queue = queue.Queue()
self.play_speech_queue = queue.Queue()
self.tts_conversion_watcher_thread = threading.Thread(
target=self.handle_tts_conversion_queue,
args=(self.tts_conversion_queue,),
daemon=True,
)
self.play_speech_thread = threading.Thread(
target=self.handle_play_speech_queue,
args=(self.play_speech_queue,),
daemon=True,
) # TODO: Do not start this in webchat
# 3. Watching for expressions that cancel the reply or exit the chat
self.check_for_interrupt_expressions_queue = queue.Queue()
self.check_for_interrupt_expressions_thread = threading.Thread(
target=self.check_for_interrupt_expressions_handler,
args=(self.check_for_interrupt_expressions_queue,),
daemon=True,
)
self.interrupt_reply = threading.Event()
self.exit_chat = threading.Event()
# Keep track of played audios to update the history db
self.current_answer_audios_queue = queue.Queue()
self.handle_update_audio_history_thread = threading.Thread(
target=self.handle_update_audio_history,
args=(self.current_answer_audios_queue,),
daemon=True,
)
@property
def mixer(self):
"""Return the mixer object."""
mixer = getattr(self, "_mixer", None)
if mixer is not None:
return mixer
self._mixer = pygame.mixer
try:
self.mixer.init(
frequency=self.sample_rate, channels=1, buffer=self.block_size
)
except pygame.error as error:
logger.exception(error)
logger.error(
"Can't initialize the mixer. Please check your system's audio settings."
)
logger.warning("Voice chat may not be available or may not work as expected.")
return self._mixer
def start(self):
"""Start the chat."""
# ruff: noqa: T201
self.tts_conversion_watcher_thread.start()
self.play_speech_thread.start()
if not self.skip_initial_greeting:
tts_entry = {"exchange_id": self.id, "text": self.initial_greeting}
self.tts_conversion_queue.put(tts_entry)
while self._assistant_still_replying():
pygame.time.wait(50)
self.questions_listening_watcher_thread.start()
self.check_for_interrupt_expressions_thread.start()
self.handle_update_audio_history_thread.start()
with contextlib.suppress(KeyboardInterrupt, EOFError):
while not self.exit_chat.is_set():
self.tts_conversion_queue.join()
self.play_speech_queue.join()
self.current_answer_audios_queue.join()
if self.interrupt_reply.is_set():
logger.opt(colors=True).debug(
"<yellow>Interrupting the reply</yellow>"
)
with self.check_for_interrupt_expressions_queue.mutex:
self.check_for_interrupt_expressions_queue.queue.clear()
with contextlib.suppress(pygame.error):
self.mixer.stop()
with self.questions_queue.mutex:
self.questions_queue.queue.clear()
chime.theme("material")
chime.error()
chime.theme(self.default_chime_theme)
time.sleep(0.25)
chime.warning()
self.interrupt_reply.clear()
logger.debug(f"{self.assistant_name}> Waiting for user input...")
question = self.questions_queue.get()
self.questions_queue.task_done()
if question is None:
self.exit_chat.set()
else:
chime.success()
for chunk in self.answer_question(question):
if chunk.chunk_type == "code":
print(chunk.content, end="", flush=True)
self.exit_chat.set()
chime.info()
logger.debug("Leaving chat")
def answer_question(self, question: str):
"""Answer a question."""
logger.debug("{}> Getting response to '{}'...", self.assistant_name, question)
sentence_for_tts = ""
any_code_chunk_yet = False
for answer_chunk in self.respond_user_prompt(prompt=question):
if self.interrupt_reply.is_set() or self.exit_chat.is_set():
logger.debug("Reply interrupted.")
raise StopIteration
yield answer_chunk
if not self.reply_only_as_text:
if answer_chunk.chunk_type not in ("text", "code"):
raise NotImplementedError(
"Unexpected chunk type: {}".format(answer_chunk.chunk_type)
)
if answer_chunk.chunk_type == "text":
# The answer chunk is to be spoken
sentence_for_tts += answer_chunk.content
stripd_chunk = answer_chunk.content.strip()
if stripd_chunk.endswith(("?", "!", ".")):
# Check if second last character is a number, to avoid splitting
if stripd_chunk.endswith("."):
with contextlib.suppress(IndexError):
previous_char = sentence_for_tts.strip()[-2]
if previous_char.isdigit():
continue
# Send sentence for TTS even if the request hasn't finished
tts_entry = {
"exchange_id": answer_chunk.exchange_id,
"text": sentence_for_tts,
}
self.tts_conversion_queue.put(tts_entry)
sentence_for_tts = ""
elif answer_chunk.chunk_type == "code" and not any_code_chunk_yet:
msg = self._translate("Code will be displayed in the text output.")
tts_entry = {"exchange_id": answer_chunk.exchange_id, "text": msg}
self.tts_conversion_queue.put(tts_entry)
any_code_chunk_yet = True
if sentence_for_tts and not self.reply_only_as_text:
tts_entry = {
"exchange_id": answer_chunk.exchange_id,
"text": sentence_for_tts,
}
self.tts_conversion_queue.put(tts_entry)
# Signal that the current answer is finished
tts_entry = {"exchange_id": answer_chunk.exchange_id, "text": None}
self.tts_conversion_queue.put(tts_entry)
def handle_update_audio_history(self, current_answer_audios_queue: queue.Queue):
"""Handle updating the chat history with the replies' audio file paths."""
# Merge all AudioSegments in self.current_answer_audios_queue into a single one
merged_audios = defaultdict(AudioSegment.empty)
while not self.exit_chat.is_set():
try:
logger.debug("Waiting for reply audio chunks to concatenate and save...")
audio_chunk_queue_item = current_answer_audios_queue.get()
reply_audio_chunk = audio_chunk_queue_item["speech"]
exchange_id = audio_chunk_queue_item["exchange_id"]
logger.debug("Received audio chunk for response ID {}", exchange_id)
if reply_audio_chunk is not None:
# Reply not yet finished
merged_audios[exchange_id] += reply_audio_chunk
logger.debug(
"Response ID {} audio: {}s so far",
exchange_id,
merged_audios[exchange_id].duration_seconds,
)
current_answer_audios_queue.task_done()
continue
# Now the reply has finished
logger.debug(
"Creating a single audio file for response ID {}...", exchange_id
)
merged_audio = merged_audios[exchange_id]
# Update the chat history with the audio file path
fpath = self.audio_cache_dir() / f"{datetime.now().isoformat()}.mp3"
logger.debug("Updating chat history with audio file path {}", fpath)
self.context_handler.database.insert_assistant_audio_file_path(
exchange_id=exchange_id, file_path=fpath
)
# Save the combined audio as an mp3 file in the cache directory
merged_audio.export(fpath, format="mp3")
logger.debug("File {} stored", fpath)
del merged_audios[exchange_id]
current_answer_audios_queue.task_done()
except Exception as error: # noqa: BLE001
logger.error(error)
logger.opt(exception=True).debug(error)
def speak(self, tts: TextToSpeech):
"""Reproduce audio from a pygame Sound object."""
tts.set_sample_rate(self.sample_rate)
self.mixer.Sound(tts.speech.raw_data).play()
audio_recorded_while_assistant_replies = self.listen(
duration_seconds=tts.speech.duration_seconds
)
msgs_to_compare = {
"assistant_txt": tts.text,
"user_audio": audio_recorded_while_assistant_replies,
}
self.check_for_interrupt_expressions_queue.put(msgs_to_compare)
while self.mixer.get_busy():
pygame.time.wait(100)
def check_for_interrupt_expressions_handler(
self, check_for_interrupt_expressions_queue: queue.Queue
):
"""Check for expressions that interrupt the assistant's reply."""
while not self.exit_chat.is_set():
try:
msgs_to_compare = check_for_interrupt_expressions_queue.get()
recorded_prompt = self.stt(speech=msgs_to_compare["user_audio"]).text
recorded_prompt = _get_lower_alphanumeric(recorded_prompt).strip()
assistant_msg = _get_lower_alphanumeric(
msgs_to_compare.get("assistant_txt", "")
).strip()
user_words = str2_minus_str1(
str1=assistant_msg, str2=recorded_prompt
).strip()
if user_words:
logger.debug(
"Detected user words while assistant was replying: {}",
user_words,
)
if any(
cancel_cmd in user_words for cancel_cmd in self.cancel_expressions
):
logger.debug(
"Heard '{}'. Signalling for reply to be cancelled...",
user_words,
)
self.interrupt_reply.set()
except Exception as error: # noqa: PERF203, BLE001
logger.opt(exception=True).debug(error)
finally:
check_for_interrupt_expressions_queue.task_done()
def listen(self, duration_seconds: float = np.inf) -> AudioSegment:
"""Record audio from the microphone until user stops."""
# Adapted from
# <https://python-sounddevice.readthedocs.io/en/0.4.6/examples.html#
# recording-with-arbitrary-duration>
debug_msg = "The assistant is listening"
if duration_seconds < np.inf:
debug_msg += f" for {duration_seconds} s"
debug_msg += "..."
inactivity_timeout_seconds = self.inactivity_timeout_seconds
if duration_seconds < np.inf:
inactivity_timeout_seconds = duration_seconds
q = queue.Queue()
def callback(indata, frames, time, status): # noqa: ARG001
"""This is called (from a separate thread) for each audio block."""
q.put(indata.copy())
raw_buffer = io.BytesIO()
start_time = datetime.now()
with self.get_sound_file(raw_buffer, mode="x") as sound_file, sd.InputStream(
samplerate=self.sample_rate,
blocksize=self.block_size,
channels=1,
callback=callback,
dtype="int16", # int16, i.e., 2 bytes per sample
):
logger.debug("{}", debug_msg)
# Recording will stop after inactivity_timeout_seconds of silence
voice_activity_detected = deque(
maxlen=int((1000.0 * inactivity_timeout_seconds) / self.frame_duration)
)
last_inactivity_checked = datetime.now()
continue_recording = True
speech_detected = False
elapsed_time = 0.0
with contextlib.suppress(KeyboardInterrupt):
while continue_recording and elapsed_time < duration_seconds:
new_data = q.get()
sound_file.write(new_data)
# Gather voice activity samples for the inactivity check
wav_buffer = _np_array_to_wav_in_memory(
sound_data=new_data,
sample_rate=self.sample_rate,
subtype="PCM_16",
)
vad_thinks_this_chunk_is_speech = self.vad.is_speech(
wav_buffer, self.sample_rate
)
voice_activity_detected.append(vad_thinks_this_chunk_is_speech)
# Decide if user has been inactive for too long
now = datetime.now()
if duration_seconds < np.inf:
continue_recording = True
elif (
now - last_inactivity_checked
).seconds >= inactivity_timeout_seconds:
speech_likelihood = 0.0
if len(voice_activity_detected) > 0:
speech_likelihood = sum(voice_activity_detected) / len(
voice_activity_detected
)
continue_recording = (
speech_likelihood >= self.speech_likelihood_threshold
)
if continue_recording:
speech_detected = True
last_inactivity_checked = now
elapsed_time = (now - start_time).seconds
if speech_detected or duration_seconds < np.inf:
return AudioSegment.from_wav(raw_buffer)
return AudioSegment.empty()
def handle_question_listening(self, questions_queue: queue.Queue):
"""Handle the queue of questions to be answered."""
minimum_prompt_duration_seconds = 0.05
while not self.exit_chat.is_set():
if self._assistant_still_replying():
pygame.time.wait(100)
continue
try:
audio = self.listen()
if audio is None:
questions_queue.put(None)
continue
if audio.duration_seconds < minimum_prompt_duration_seconds:
continue
question = self.stt(speech=audio).text
# Check for the exit expressions
if any(
_get_lower_alphanumeric(question).startswith(
_get_lower_alphanumeric(expr)
)
for expr in self.exit_expressions
):
questions_queue.put(None)
elif question:
questions_queue.put(question)
except sd.PortAudioError as error:
logger.opt(exception=True).debug(error)
except Exception as error: # noqa: BLE001
logger.opt(exception=True).debug(error)
logger.error(error)
def handle_play_speech_queue(self, play_speech_queue: queue.Queue[TextToSpeech]):
"""Handle the queue of audio segments to be played."""
while not self.exit_chat.is_set():
try:
play_speech_queue_item = play_speech_queue.get()
if play_speech_queue_item["speech"] and not self.interrupt_reply.is_set():
self.speak(play_speech_queue_item["tts_obj"])
except Exception as error: # noqa: BLE001, PERF203
logger.exception(error)
finally:
play_speech_queue.task_done()
def handle_tts_conversion_queue(self, tts_conversion_queue: queue.Queue):
"""Handle the text-to-speech queue."""
logger.debug("Chat {}: TTS conversion handler started.", self.id)
while not self.exit_chat.is_set():
try:
tts_entry = tts_conversion_queue.get()
if tts_entry["text"] is None:
# Signal that the current anwer is finished
play_speech_queue_item = {
"exchange_id": tts_entry["exchange_id"],
"speech": None,
}
self.play_speech_queue.put(play_speech_queue_item)
self.current_answer_audios_queue.put(play_speech_queue_item)
logger.debug(
"Reply ID {} notified that is has finished",
tts_entry["exchange_id"],
)
tts_conversion_queue.task_done()
continue
text = tts_entry["text"].strip()
if text and not self.interrupt_reply.is_set():
logger.debug(
"Reply ID {}: received text '{}' for TTS",
tts_entry["exchange_id"],
text,
)
tts_obj = self.tts(text)
# Trigger the TTS conversion
_ = tts_obj.speech
logger.debug(
"Reply ID {}: Sending speech for '{}' to the playing queue",
tts_entry["exchange_id"],
text,
)
play_speech_queue_item = {
"exchange_id": tts_entry["exchange_id"],
"tts_obj": tts_obj,
"speech": tts_obj.speech,
}
self.play_speech_queue.put(play_speech_queue_item)
self.current_answer_audios_queue.put(play_speech_queue_item)
# Pay attention to the indentation level
tts_conversion_queue.task_done()
except Exception as error: # noqa: BLE001
logger.opt(exception=True).debug(error)
logger.error(error)
logger.error("TTS conversion queue handler ended.")
def get_sound_file(self, wav_buffer: io.BytesIO, mode: str = "r"):
"""Return a sound file object."""
return sf.SoundFile(
wav_buffer,
mode=mode,
samplerate=self.sample_rate,
channels=1,
format="wav",
subtype="PCM_16",
)
def audio_cache_dir(self):
"""Return the audio cache directory."""
directory = self.cache_dir / "audio_files"
directory.mkdir(parents=True, exist_ok=True)
return directory
def _assistant_still_replying(self):
"""Check if the assistant is still talking."""
return (
self.mixer.get_busy()
or self.questions_queue.unfinished_tasks > 0
or self.tts_conversion_queue.unfinished_tasks > 0
or self.play_speech_queue.unfinished_tasks > 0
)
def _check_needed_imports():
"""Check if the needed modules are available."""
if not _sounddevice_imported:
logger.warning(
"Module `sounddevice`, needed for local audio recording, is not available."
)
if not _pydub_usable:
logger.error(
"Module `pydub`, needed for audio conversion, doesn't seem to be working. "
"Voice chat may not be available or may not work as expected."
)
def _np_array_to_wav_in_memory(
sound_data: np.ndarray, sample_rate: int, subtype="PCM_16"
):
"""Convert the recorded array to an in-memory wav file."""
wav_buffer = io.BytesIO()
wav_buffer.name = "audio.wav"
sf.write(wav_buffer, sound_data, sample_rate, subtype=subtype)
wav_buffer.seek(44) # Skip the WAV header
return wav_buffer.read()
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/app/app_utils.py | pyrobbot/app/app_utils.py | """Utility functions and classes for the app."""
import contextlib
import datetime
import os
import queue
import threading
from typing import TYPE_CHECKING
import streamlit as st
from loguru import logger
from PIL import Image
from pydub import AudioSegment
from streamlit.runtime.scriptrunner import add_script_run_ctx
from twilio.rest import Client as TwilioClient
from pyrobbot import GeneralDefinitions
from pyrobbot.chat import AssistantResponseChunk
from pyrobbot.voice_chat import VoiceChat
if TYPE_CHECKING:
from .app_page_templates import AppPage
class WebAppChat(VoiceChat):
"""A chat object for web apps."""
def __init__(self, **kwargs):
"""Initialize a new instance of the WebAppChat class."""
super().__init__(**kwargs)
self.tts_conversion_watcher_thread.start()
self.handle_update_audio_history_thread.start()
class AsyncReplier:
"""Asynchronously reply to a prompt and stream the text & audio reply."""
def __init__(self, app_page: "AppPage", prompt: str):
"""Initialize a new instance of the AsyncReplier class."""
self.app_page = app_page
self.prompt = prompt
self.chat_obj = app_page.chat_obj
self.question_answer_chunks_queue = queue.Queue()
self.threads = [
threading.Thread(name="queue_text_chunks", target=self.queue_text_chunks),
threading.Thread(name="play_queued_audios", target=self.play_queued_audios),
]
self.start()
def start(self):
"""Start the threads."""
for thread in self.threads:
add_script_run_ctx(thread)
thread.start()
def join(self):
"""Wait for all threads to finish."""
logger.debug("Waiting for {} to finish...", type(self).__name__)
for thread in self.threads:
thread.join()
logger.debug("All {} threads finished", type(self).__name__)
def queue_text_chunks(self):
"""Get chunks of the text reply to the prompt and queue them for display."""
exchange_id = None
for chunk in self.chat_obj.answer_question(self.prompt):
self.question_answer_chunks_queue.put(chunk)
exchange_id = chunk.exchange_id
self.question_answer_chunks_queue.put(
AssistantResponseChunk(exchange_id=exchange_id, content=None)
)
def play_queued_audios(self):
"""Play queued audio segments."""
while True:
try:
logger.debug(
"Waiting for item from the audio reply chunk queue ({}) items so far",
self.chat_obj.play_speech_queue.qsize(),
)
speech_queue_item = self.chat_obj.play_speech_queue.get()
audio = speech_queue_item["speech"]
if audio is None:
logger.debug("Got `None`. No more audio reply chunks to play")
self.chat_obj.play_speech_queue.task_done()
break
logger.debug("Playing audio reply chunk ({}s)", audio.duration_seconds)
self.app_page.render_custom_audio_player(
audio,
parent_element=self.app_page.status_msg_container,
autoplay=True,
hidden=True,
)
logger.debug(
"Done playing audio reply chunk ({}s)", audio.duration_seconds
)
self.chat_obj.play_speech_queue.task_done()
except Exception as error: # noqa: BLE001
logger.opt(exception=True).debug(
"Error playing audio reply chunk ({}s)", audio.duration_seconds
)
logger.error(error)
break
finally:
self.app_page.status_msg_container.empty()
def stream_text_and_audio_reply(self):
"""Stream the text and audio reply to the display."""
text_reply_container = st.empty()
audio_reply_container = st.empty()
chunk = AssistantResponseChunk(exchange_id=None, content="")
full_response = ""
text_reply_container.markdown("▌")
self.app_page.status_msg_container.empty()
while chunk.content is not None:
logger.trace("Waiting for text or audio chunks...")
# Render text
with contextlib.suppress(queue.Empty):
chunk = self.question_answer_chunks_queue.get_nowait()
if chunk.content is not None:
full_response += chunk.content
text_reply_container.markdown(full_response + "▌")
self.question_answer_chunks_queue.task_done()
text_reply_container.caption(datetime.datetime.now().replace(microsecond=0))
text_reply_container.markdown(full_response)
logger.debug("Waiting for the audio reply to finish...")
self.chat_obj.play_speech_queue.join()
logger.debug("Getting path to full audio file for the reply...")
history_entry_for_this_reply = (
self.chat_obj.context_handler.database.retrieve_history(
exchange_id=chunk.exchange_id
)
)
full_audio_fpath = history_entry_for_this_reply["reply_audio_file_path"].iloc[0]
if full_audio_fpath is None:
logger.warning("Path to full audio file not available")
else:
logger.debug("Got path to full audio file: {}", full_audio_fpath)
self.app_page.render_custom_audio_player(
full_audio_fpath, parent_element=audio_reply_container, autoplay=False
)
return {"text": full_response, "audio": full_audio_fpath}
@st.cache_data
def get_ice_servers():
"""Use Twilio's TURN server as recommended by the streamlit-webrtc developers."""
try:
account_sid = os.environ["TWILIO_ACCOUNT_SID"]
auth_token = os.environ["TWILIO_AUTH_TOKEN"]
except KeyError:
logger.warning(
"Twilio credentials are not set. Cannot use their TURN servers. "
"Falling back to a free STUN server from Google."
)
return [{"urls": ["stun:stun.l.google.com:19302"]}]
client = TwilioClient(account_sid, auth_token)
token = client.tokens.create()
return token.ice_servers
def filter_page_info_from_queue(app_page: "AppPage", the_queue: queue.Queue):
"""Filter `app_page`'s data from `queue` inplace. Return queue of items in `app_page`.
**Use with original_queue.mutex!!**
Args:
app_page: The page whose entries should be removed.
the_queue: The queue to be filtered.
Returns:
queue.Queue: The queue with only the entries from `app_page`.
Example:
```
with the_queue.mutex:
this_page_data = remove_page_info_from_queue(app_page, the_queue)
```
"""
queue_with_only_entries_from_other_pages = queue.Queue()
items_from_page_queue = queue.Queue()
while the_queue.queue:
original_queue_entry = the_queue.queue.popleft()
if original_queue_entry["page"].page_id == app_page.page_id:
items_from_page_queue.put(original_queue_entry)
else:
queue_with_only_entries_from_other_pages.put(original_queue_entry)
the_queue.queue = queue_with_only_entries_from_other_pages.queue
return items_from_page_queue
@st.cache_data
def get_avatar_images():
"""Return the avatar images for the assistant and the user."""
avatar_files_dir = GeneralDefinitions.APP_DIR / "data"
assistant_avatar_file_path = avatar_files_dir / "assistant_avatar.png"
user_avatar_file_path = avatar_files_dir / "user_avatar.png"
assistant_avatar_image = Image.open(assistant_avatar_file_path)
user_avatar_image = Image.open(user_avatar_file_path)
return {"assistant": assistant_avatar_image, "user": user_avatar_image}
@st.cache_data
def load_chime(chime_type: str) -> AudioSegment:
"""Load a chime sound from the data directory."""
return AudioSegment.from_file(
GeneralDefinitions.APP_DIR / "data" / f"{chime_type}.wav", format="wav"
)
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/app/multipage.py | pyrobbot/app/multipage.py | """Code for the creation streamlit apps with dynamically created pages."""
import contextlib
import datetime
import hashlib
import os
import queue
import sys
import threading
import time
from abc import ABC, abstractmethod, abstractproperty
from collections import defaultdict, deque
from json.decoder import JSONDecodeError
import streamlit as st
import streamlit_webrtc
from loguru import logger
from pydantic import ValidationError
from pydub import AudioSegment
from streamlit.runtime.scriptrunner import add_script_run_ctx
from streamlit_webrtc import WebRtcMode
from pyrobbot import GeneralDefinitions
from pyrobbot.chat_configs import VoiceChatConfigs
from pyrobbot.general_utils import trim_beginning
from pyrobbot.openai_utils import OpenAiClientWrapper
from .app_page_templates import AppPage, ChatBotPage, _RecoveredChat
from .app_utils import (
WebAppChat,
filter_page_info_from_queue,
get_avatar_images,
get_ice_servers,
)
incoming_frame_queue = queue.Queue()
possible_speech_chunks_queue = queue.Queue()
audio_playing_chunks_queue = queue.Queue()
continuous_user_prompt_queue = queue.Queue()
text_prompt_queue = queue.Queue()
reply_ongoing = threading.Event()
@st.cache_resource(show_spinner="Initialising listening engine...")
def listen(): # noqa: PLR0912, PLR0915
"""Listen for speech from the browser."""
# This deque will be employed to keep a moving window of audio chunks to monitor
# voice activity. The length of the deque is calculated such that the concatenated
# audio chunks will produce an audio at most inactivity_timeout_seconds long
#
# Mind that none of Streamlit's APIs are safe to call from any thread other than
# the main one. See, e.g., <https://discuss.streamlit.io/t/
# changing-session-state-not-reflecting-in-active-python-thread/37683
logger.debug("Listener thread started")
all_users_audio_chunks_moving_windows = {}
user_has_been_speaking = defaultdict(lambda: False)
all_users_moving_window_speech_likelihood = defaultdict(lambda: 0.0)
while True:
try:
logger.trace("Waiting for audio frame from the stream...")
received_audio_frame_info = incoming_frame_queue.get()
received_audio_frame = received_audio_frame_info["frame"]
app_page = received_audio_frame_info["page"]
chat_obj = app_page.chat_obj
try:
audio_chunks_moving_window = all_users_audio_chunks_moving_windows[
app_page.page_id
]
except KeyError:
audio_chunks_moving_window = deque(
maxlen=int(
(1000.0 * chat_obj.inactivity_timeout_seconds)
/ chat_obj.frame_duration
)
)
all_users_audio_chunks_moving_windows[app_page.page_id] = (
audio_chunks_moving_window
)
logger.trace(
"Received audio frame from the stream on page '{}', chat {}",
app_page.title,
chat_obj.id,
)
moving_window_speech_likelihood = all_users_moving_window_speech_likelihood[
app_page.page_id
]
if received_audio_frame.sample_rate != chat_obj.sample_rate:
raise ValueError(
f"audio_frame.sample_rate = {received_audio_frame.sample_rate} "
f"!= chat_obj.sample_rate = {chat_obj.sample_rate}"
)
# Convert the received audio frame to an AudioSegment object
raw_samples = received_audio_frame.to_ndarray()
audio_chunk = AudioSegment(
data=raw_samples.tobytes(),
sample_width=received_audio_frame.format.bytes,
frame_rate=received_audio_frame.sample_rate,
channels=len(received_audio_frame.layout.channels),
)
if audio_chunk.duration_seconds != chat_obj.frame_duration / 1000:
raise ValueError(
f"sound_chunk.duration_seconds = {audio_chunk.duration_seconds} "
"!= chat_obj.frame_duration / 1000 = "
f"{chat_obj.frame_duration / 1000}"
)
# Resample the AudioSegment to be compatible with the VAD engine
audio_chunk = audio_chunk.set_frame_rate(chat_obj.sample_rate).set_channels(1)
# Now do the VAD
# Check if the current sound chunk is likely to be speech
vad_thinks_this_chunk_is_speech = chat_obj.vad.is_speech(
audio_chunk.raw_data, chat_obj.sample_rate
)
# Monitor voice activity within moving window of length
# inactivity_timeout_seconds
audio_chunks_moving_window.append(
{"audio": audio_chunk, "is_speech": vad_thinks_this_chunk_is_speech}
)
all_users_audio_chunks_moving_windows[app_page.page_id] = (
audio_chunks_moving_window
)
moving_window_length = len(audio_chunks_moving_window)
if moving_window_length == audio_chunks_moving_window.maxlen:
voice_activity = (
chunk["is_speech"] for chunk in audio_chunks_moving_window
)
moving_window_speech_likelihood = (
sum(voice_activity) / moving_window_length
)
all_users_moving_window_speech_likelihood[app_page.page_id] = (
moving_window_speech_likelihood
)
user_speaking_now = (
moving_window_speech_likelihood >= chat_obj.speech_likelihood_threshold
)
logger.trace("User speaking: {}", user_speaking_now)
if user_has_been_speaking[app_page.page_id]:
speech_chunk_info = {"audio": audio_chunk, "page": app_page}
possible_speech_chunks_queue.put(speech_chunk_info)
if not user_speaking_now:
user_has_been_speaking[app_page.page_id] = False
speech_chunk_info = {"audio": None, "page": app_page}
possible_speech_chunks_queue.put(speech_chunk_info)
logger.info("No more voice activity detected. Signal end of speech.")
continue
elif user_speaking_now:
logger.info("Voice activity detected")
user_has_been_speaking[app_page.page_id] = True
for past_audio_chunk in audio_chunks_moving_window:
speech_chunk_info = {
"audio": past_audio_chunk["audio"],
"page": app_page,
}
possible_speech_chunks_queue.put(speech_chunk_info)
except Exception as error: # noqa: BLE001
logger.opt(exception=True).debug(error)
logger.error(error)
finally:
incoming_frame_queue.task_done()
@st.cache_resource(show_spinner="Initialising listening engine...")
def handle_continuous_user_prompt():
"""Play audio."""
logger.debug("Continuous user audio prompt handling thread started")
while True:
try:
logger.trace("Waiting for new speech chunk...")
new_audio_chunk_info = possible_speech_chunks_queue.get()
new_audio_chunk = new_audio_chunk_info["audio"]
app_page = new_audio_chunk_info["page"]
chat_obj = app_page.chat_obj
logger.trace("Processing new speech chunk for page '{}'", app_page.title)
if new_audio_chunk is None:
# User has stopped speaking. Concatenate all audios from
# play_audio_queue and send the result to be played
logger.debug(
"Gathering {} frames received to send as user input for page '{}'",
audio_playing_chunks_queue.qsize(),
app_page.title,
)
concatenated_audio = AudioSegment.empty()
with audio_playing_chunks_queue.mutex:
this_page_audio_chunks = filter_page_info_from_queue(
app_page=app_page, the_queue=audio_playing_chunks_queue
)
while this_page_audio_chunks.queue:
audio_chunk_info = this_page_audio_chunks.queue.popleft()
concatenated_audio += audio_chunk_info["audio"]
logger.debug(
"Done gathering frames ({}s) for page '{}'. Trimming...",
concatenated_audio.duration_seconds,
app_page.title,
)
concatenated_audio = trim_beginning(concatenated_audio)
if (
concatenated_audio.duration_seconds
>= chat_obj.min_speech_duration_seconds
):
logger.debug(
'Page "{}": Make sure the queue has only the latest audio...',
app_page.title,
)
with continuous_user_prompt_queue.mutex:
filter_page_info_from_queue(
app_page=app_page, the_queue=continuous_user_prompt_queue
)
new_info_for_stt = {"page": app_page, "audio": concatenated_audio}
continuous_user_prompt_queue.put(new_info_for_stt)
logger.debug("Audio input for page '{}' sent for STT", app_page.title)
else:
logger.debug(
'Page "{}": audio input too short ({} < {} sec). Discarding.',
app_page.title,
concatenated_audio.duration_seconds,
chat_obj.min_speech_duration_seconds,
)
else:
new_audio_chunk_info = {"page": app_page, "audio": new_audio_chunk}
audio_playing_chunks_queue.put(new_audio_chunk_info)
possible_speech_chunks_queue.task_done()
except Exception as error: # noqa: BLE001, PERF203
logger.opt(exception=True).debug(error)
logger.error(error)
@st.cache_resource(show_spinner="Initialising listening engine...")
def handle_stt():
"""Handle speech to text."""
logger.debug("Speech to text handling thread started")
while True:
try:
info_for_stt = continuous_user_prompt_queue.get()
audio = info_for_stt["audio"]
chat_obj = info_for_stt["page"].chat_obj
if audio.duration_seconds >= chat_obj.min_speech_duration_seconds:
recorded_prompt_as_txt = chat_obj.stt(audio).text
if recorded_prompt_as_txt:
logger.debug(
"Audio from page '{}' transcribed '{}'. Input ready to fetch.",
info_for_stt["page"].title,
recorded_prompt_as_txt,
)
text_prompt_queue.put(
{"page": info_for_stt["page"], "text": recorded_prompt_as_txt}
)
except Exception as error: # noqa: BLE001, PERF203
logger.error(error)
listen_thread = threading.Thread(name="listener_thread", target=listen, daemon=True)
continuous_user_prompt_thread = threading.Thread(
name="continuous_user_prompt_thread",
target=handle_continuous_user_prompt,
daemon=True,
)
handle_stt_thread = threading.Thread(
name="stt_handling_thread", target=handle_stt, daemon=True
)
class AbstractMultipageApp(ABC):
"""Framework for creating streamlite multipage apps.
Adapted from:
<https://towardsdatascience.com/
creating-multipage-applications-using-streamlit-efficiently-b58a58134030>.
"""
def __init__(self, **kwargs) -> None:
"""Initialise streamlit page configs."""
st.set_page_config(**kwargs)
self.listen_thread = listen_thread
self.continuous_user_prompt_thread = continuous_user_prompt_thread
self.handle_stt_thread = handle_stt_thread
if (
st.session_state.get("toggle_continuous_voice_input")
and not self.continuous_audio_input_engine_is_running
):
for thread in [
listen_thread,
continuous_user_prompt_thread,
handle_stt_thread,
]:
# See <https://github.com/streamlit/streamlit/issues/
# 1326#issuecomment-1597918085>
add_script_run_ctx(thread)
thread.start()
self.incoming_frame_queue = incoming_frame_queue
self.possible_speech_chunks_queue = possible_speech_chunks_queue
self.audio_playing_chunks_queue = audio_playing_chunks_queue
self.continuous_user_prompt_queue = continuous_user_prompt_queue
self.text_prompt_queue = text_prompt_queue
self.reply_ongoing = reply_ongoing
@property
def ice_servers(self):
"""Return the ICE servers for WebRTC."""
return get_ice_servers()
@property
def continuous_audio_input_engine_is_running(self):
"""Return whether the continuous audio input engine is running."""
return (
self.listen_thread.is_alive()
and self.continuous_user_prompt_thread.is_alive()
and self.handle_stt_thread.is_alive()
)
def render_continuous_audio_input_widget(self):
"""Render the continuous audio input widget using webrtc_streamer."""
try:
selected_page = self.selected_page
except StopIteration:
selected_page = None
def audio_frame_callback(frame):
try:
logger.trace("Received raw audio frame from the stream")
if selected_page is None:
logger.trace("No page selected. Discardig audio chunk")
return frame
if self.reply_ongoing.is_set():
logger.trace("Reply is ongoing. Discardig audio chunk")
return frame
if not self.continuous_user_prompt_queue.empty():
logger.trace(
"Audio input queue not empty {} items). Discardig chunk",
self.continuous_user_prompt_queue.qsize(),
)
return frame
except Exception as error: # noqa: BLE001
logger.opt(exception=True).debug(error)
logger.error(error)
else:
frame_info = {"frame": frame, "page": selected_page}
self.incoming_frame_queue.put(frame_info)
logger.trace("Raw audio frame sent to the processing queue")
return frame
add_script_run_ctx(audio_frame_callback)
logger.debug("Initialising input audio stream...")
hide_webrtc_streamer_button = """
<style>
.element-container:has(
iframe[title="streamlit_webrtc.component.webrtc_streamer"]
) {
display: none;
overflow: hidden;
max-height: 0;
}
</style>
"""
st.markdown(hide_webrtc_streamer_button, unsafe_allow_html=True)
try:
self.stream_audio_context = streamlit_webrtc.component.webrtc_streamer(
key="sendonly-audio",
mode=WebRtcMode.SENDONLY,
rtc_configuration={"iceServers": self.ice_servers},
media_stream_constraints={"audio": True, "video": False},
desired_playing_state=True,
audio_frame_callback=audio_frame_callback,
)
except TypeError:
logger.opt(exception=True).error("Failed to initialise audio stream")
logger.error("Failed to initialise audio stream")
self.stream_audio_context = None
else:
logger.debug("Audio stream initialised. Waiting for it to start...")
while not self.stream_audio_context.state.playing:
time.sleep(1)
logger.debug("Audio stream started")
return self.stream_audio_context
@property
def n_created_pages(self):
"""Return the number of pages created by the app, including deleted ones."""
return self.state.get("n_created_pages", 0)
@n_created_pages.setter
def n_created_pages(self, value):
self.state["n_created_pages"] = value
@property
def pages(self) -> dict[AppPage]:
"""Return the pages of the app."""
if "available_pages" not in self.state:
self.state["available_pages"] = {}
return self.state["available_pages"]
def add_page(self, page: AppPage, selected: bool = True, **page_obj_kwargs):
"""Add a page to the app."""
if page is None:
page = AppPage(parent=self, **page_obj_kwargs)
self.pages[page.page_id] = page
self.n_created_pages += 1
if selected:
self.selected_page = page
def _remove_page(self, page: AppPage):
"""Remove a page from the app."""
self.pages[page.page_id].chat_obj.clear_cache()
del self.pages[page.page_id]
try:
self.selected_page = next(iter(self.pages.values()))
except StopIteration:
self.add_page()
def remove_page(self, page: AppPage):
"""Remove a page from the app after confirmation."""
st.error("Are you sure you want to delete this chat?")
col1, col2 = st.columns([0.5, 0.5])
with col1:
st.button("No, take me back", use_container_width=True)
with col2:
st.button(
"Yes, delete chat",
on_click=self._remove_page,
kwargs={"page": page},
use_container_width=True,
)
@property
def selected_page(self) -> ChatBotPage:
"""Return the selected page."""
if "selected_page" not in self.state:
self.selected_page = next(iter(self.pages.values()), None)
return self.state["selected_page"]
@selected_page.setter
def selected_page(self, page: ChatBotPage):
self.state["selected_page"] = page
st.session_state["currently_active_page"] = page
def render(self, **kwargs):
"""Render the multipage app with focus on the selected page."""
self.handle_ui_page_selection(**kwargs)
self.selected_page.render()
self.state["last_rendered_page"] = self.selected_page.page_id
@abstractproperty
def state(self):
"""Return the state of the app, for persistence of data."""
@abstractmethod
def handle_ui_page_selection(self, **kwargs):
"""Control page selection in the UI sidebar."""
class MultipageChatbotApp(AbstractMultipageApp):
"""A Streamlit multipage app specifically for chatbot interactions.
Inherits from AbstractMultipageApp and adds chatbot-specific functionalities.
"""
@property
def current_user_id(self):
"""Return the user id."""
return hashlib.sha256(self.openai_api_key.encode("utf-8")).hexdigest()
@property
def current_user_st_state_id(self):
"""Return the user id for streamlit state."""
return f"app_state_{self.current_user_id}"
@property
def state(self):
"""Return the state of the app, for persistence of data."""
user_st_state_key = self.current_user_st_state_id
if user_st_state_key not in st.session_state:
st.session_state[user_st_state_key] = {}
return st.session_state[user_st_state_key]
@property
def openai_client(self) -> OpenAiClientWrapper:
"""Return the OpenAI client."""
if "openai_client" not in self.state:
logger.debug("Creating OpenAI client for multipage app")
self.state["openai_client"] = OpenAiClientWrapper(
api_key=self.openai_api_key, private_mode=self.chat_configs.private_mode
)
logger.debug("OpenAI client created for multipage app")
return self.state["openai_client"]
@property
def chat_configs(self) -> VoiceChatConfigs:
"""Return the configs used for the page's chat object."""
if "chat_configs" not in self.state:
try:
chat_options_file_path = sys.argv[-1]
self.state["chat_configs"] = VoiceChatConfigs.from_file(
chat_options_file_path
)
except (FileNotFoundError, JSONDecodeError):
logger.warning("Could not retrieve cli args. Using default chat options.")
self.state["chat_configs"] = VoiceChatConfigs()
return self.state["chat_configs"]
def create_api_key_element(self):
"""Create an input element for the OpenAI API key."""
self.openai_api_key = st.text_input(
label="OpenAI API Key (required)",
value=os.environ.get("OPENAI_API_KEY", ""),
placeholder="Enter your OpenAI API key",
key="openai_api_key",
type="password",
help="[OpenAI API auth key](https://platform.openai.com/account/api-keys). "
+ "Chats created with this key won't be visible to people using other keys.",
)
if not self.openai_api_key:
st.write(":red[You need a valid key to use the chat]")
def add_page(
self, page: ChatBotPage = None, selected: bool = True, **page_obj_kwargs
):
"""Adds a new ChatBotPage to the app.
If no page is specified, a new instance of ChatBotPage is created and added.
Args:
page: The ChatBotPage to be added. If None, a new page is created.
selected: Whether the added page should be selected immediately.
**page_obj_kwargs: Additional keyword arguments for ChatBotPage creation.
Returns:
The result of the superclass's add_page method.
"""
if page is None:
logger.debug("Resquest to add page without passing a page. Creating defaut.")
page = ChatBotPage(parent=self, **page_obj_kwargs)
else:
logger.debug("Resquest to a specific page. Adding it.")
return super().add_page(page=page, selected=selected)
def get_widget_previous_value(self, widget_key, default=None):
"""Get the previous value of a widget, if any."""
if "widget_previous_value" not in self.selected_page.state:
self.selected_page.state["widget_previous_value"] = {}
return self.selected_page.state["widget_previous_value"].get(widget_key, default)
def save_widget_previous_values(self, element_key):
"""Save a widget's 'previous value`, to be read by `get_widget_previous_value`."""
if "widget_previous_value" not in self.selected_page.state:
self.selected_page.state["widget_previous_value"] = {}
self.selected_page.state["widget_previous_value"][element_key] = (
st.session_state.get(element_key)
)
def handle_ui_page_selection(self):
"""Control page selection and removal in the UI sidebar."""
_set_button_style()
self._build_sidebar_tabs()
with self.sidebar_tabs["settings"]:
caption = f"\u2699\uFE0F {self.selected_page.title}"
st.caption(caption)
current_chat_configs = self.selected_page.chat_obj.configs
# Present the user with the model and instructions fields first
field_names = ["model", "ai_instructions", "context_model"]
field_names += list(VoiceChatConfigs.model_fields)
field_names = list(dict.fromkeys(field_names))
model_fields = {k: VoiceChatConfigs.model_fields[k] for k in field_names}
updates_to_chat_configs = self._handle_chat_configs_value_selection(
current_chat_configs, model_fields
)
if updates_to_chat_configs:
current_chat_configs = self.selected_page.chat_obj.configs.copy()
new_configs = current_chat_configs.model_dump()
new_configs.update(updates_to_chat_configs)
new_configs = self.selected_page.chat_obj.configs.model_validate(
new_configs
)
if new_configs != current_chat_configs:
logger.debug(
"Chat configs for page <{}> changed. Update page chat <{}>",
self.selected_page.sidebar_title,
self.selected_page.chat_obj.id,
)
self.selected_page.chat_obj = WebAppChat.from_dict(new_configs)
def render(self, **kwargs):
"""Renders the multipage chatbot app in the UI according to the selected page."""
with st.sidebar:
_left_col, centre_col, _right_col = st.columns([0.33, 0.34, 0.33])
with centre_col:
st.title(GeneralDefinitions.APP_NAME)
with contextlib.suppress(AttributeError, ValueError, OSError):
# st image raises some exceptions occasionally
avatars = get_avatar_images()
st.image(avatars["assistant"], use_column_width=True)
st.subheader(
GeneralDefinitions.PACKAGE_DESCRIPTION,
divider="rainbow",
help="https://github.com/paulovcmedeiros/pyRobBot",
)
self.create_api_key_element()
# Create a sidebar with tabs for chats and settings
tab1, tab2 = st.tabs(["Chats", "Settings for Current Chat"])
with tab1:
tab1_visible_container = st.container()
tab1_invisible_container = st.container(height=0, border=False)
self.sidebar_tabs = {"chats": tab1_visible_container, "settings": tab2}
with tab1_visible_container:
left, center, right = st.columns(3)
with left:
# Add button to show the costs table
st.toggle(
key="toggle_show_costs",
label=":moneybag:",
help="Show estimated token usage and associated costs",
)
with center:
# Add button to toggle voice output
speaking_head_in_silhouette = "\U0001F5E3"
st.toggle(
key="toggle_voice_output",
label=speaking_head_in_silhouette,
help="Allow the assistant to speak",
value=True,
)
with right:
# Add button to toggle continuous voice input
_infinity_emoji = "\U0000221E"
st.toggle(
key="toggle_continuous_voice_input",
label=":microphone:",
help="Speak to the assistant in a continuous manner, without "
"clicking the microphone button to start/stop recording",
value=False,
)
# Add button to create a new chat
new_chat_button = st.button(label=":heavy_plus_sign: New Chat")
# Reopen chats from cache (if any)
if not self.state.get("saved_chats_reloaded", False):
self.state["saved_chats_reloaded"] = True
for cache_dir_path in self.openai_client.saved_chat_cache_paths:
try:
chat = WebAppChat.from_cache(
cache_dir=cache_dir_path, openai_client=self.openai_client
)
except ValidationError:
st.warning(
f"Failed to load cached chat {cache_dir_path}: "
+ "Non-supported configs.",
icon="⚠️",
)
continue
logger.debug("Init chat from cache: {}", chat.id)
new_page = ChatBotPage(
parent=self,
chat_obj=chat,
page_title=chat.metadata.get("page_title", _RecoveredChat),
sidebar_title=chat.metadata.get("sidebar_title"),
)
new_page.state["messages"] = chat.load_history()
self.add_page(page=new_page)
self.selected_page = next(iter(self.pages.values()), None)
# Create a new chat upon request or if there is none yet
if new_chat_button or not self.pages:
self.add_page()
# We'l hide the webrtc input buttom because I don't know how to customise it.
# I'll use the component "toggle_continuous_voice_input" to toggle it
if st.session_state["toggle_continuous_voice_input"]:
with tab1_invisible_container:
self.render_continuous_audio_input_widget()
return super().render(**kwargs)
def _build_sidebar_tabs(self):
def toggle_change_chat_title(page):
page.state["edit_chat_text"] = not page.state.get("edit_chat_text", False)
def set_page_title(page):
page.state["edit_chat_text"] = False
title = st.session_state.get(f"edit_{page.page_id}_text_input", "").strip()
if not title:
return
page.title = title
page.sidebar_title = title
page.chat_obj.metadata["page_title"] = title
page.chat_obj.metadata["sidebar_title"] = title
with self.sidebar_tabs["chats"]:
for page in self.pages.values():
col1, col2, col3 = st.columns([0.1, 0.8, 0.1])
with col1:
st.button(
":wastebasket:",
key=f"delete_{page.page_id}",
type="primary",
use_container_width=True,
on_click=self.remove_page,
kwargs={"page": page},
help="Delete this chat",
)
with col2:
if page.state.get("edit_chat_text"):
st.text_input(
"Edit Chat Title",
value=page.sidebar_title,
key=f"edit_{page.page_id}_text_input",
on_change=set_page_title,
args=[page],
)
else:
mtime = None
with contextlib.suppress(FileNotFoundError):
mtime = page.chat_obj.context_file_path.stat().st_mtime
mtime = datetime.datetime.fromtimestamp(mtime)
mtime = mtime.replace(microsecond=0)
def _set_page(page):
"""Help setting the selected page."""
self.selected_page = page
st.button(
label=page.sidebar_title,
key=f"select_{page.page_id}",
help=f"Latest backup: {mtime}" if mtime else None,
on_click=_set_page,
kwargs={"page": page},
use_container_width=True,
disabled=page.page_id == self.selected_page.page_id,
)
with col3:
st.button(
":pencil:",
key=f"edit_{page.page_id}_button",
use_container_width=True,
on_click=toggle_change_chat_title,
args=[page],
help="Edit chat title",
)
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | true |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/app/app_page_templates.py | pyrobbot/app/app_page_templates.py | """Utilities for creating pages in a streamlit app."""
import base64
import contextlib
import datetime
import queue
import time
import uuid
from abc import ABC, abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Union
import streamlit as st
from audio_recorder_streamlit import audio_recorder
from loguru import logger
from pydub import AudioSegment
from pydub.exceptions import CouldntDecodeError
from streamlit_mic_recorder import mic_recorder
from pyrobbot.chat_configs import VoiceChatConfigs
from .app_utils import (
AsyncReplier,
WebAppChat,
filter_page_info_from_queue,
get_avatar_images,
load_chime,
)
if TYPE_CHECKING:
from .multipage import MultipageChatbotApp
# Sentinel object for when a chat is recovered from cache
_RecoveredChat = object()
class AppPage(ABC):
"""Abstract base class for a page within a streamlit application."""
def __init__(
self, parent: "MultipageChatbotApp", sidebar_title: str = "", page_title: str = ""
):
"""Initializes a new instance of the AppPage class.
Args:
parent (MultipageChatbotApp): The parent app of the page.
sidebar_title (str, optional): The title to be displayed in the sidebar.
Defaults to an empty string.
page_title (str, optional): The title to be displayed on the page.
Defaults to an empty string.
"""
self.page_id = str(uuid.uuid4())
self.parent = parent
self.page_number = self.parent.state.get("n_created_pages", 0) + 1
chat_number_for_title = f"Chat #{self.page_number}"
if page_title is _RecoveredChat:
self.fallback_page_title = f"{chat_number_for_title.strip('#')} (Recovered)"
page_title = None
else:
self.fallback_page_title = chat_number_for_title
if page_title:
self.title = page_title
self._fallback_sidebar_title = page_title if page_title else chat_number_for_title
if sidebar_title:
self.sidebar_title = sidebar_title
@property
def state(self):
"""Return the state of the page, for persistence of data."""
if self.page_id not in self.parent.state:
self.parent.state[self.page_id] = {}
return self.parent.state[self.page_id]
@property
def sidebar_title(self):
"""Get the title of the page in the sidebar."""
return self.state.get("sidebar_title", self._fallback_sidebar_title)
@sidebar_title.setter
def sidebar_title(self, value: str):
"""Set the sidebar title for the page."""
self.state["sidebar_title"] = value
@property
def title(self):
"""Get the title of the page."""
return self.state.get("page_title", self.fallback_page_title)
@title.setter
def title(self, value: str):
"""Set the title of the page."""
self.state["page_title"] = value
@abstractmethod
def render(self):
"""Create the page."""
def continuous_mic_recorder(self):
"""Record audio from the microphone in a continuous loop."""
audio_bytes = audio_recorder(
text="", icon_size="2x", energy_threshold=-1, key=f"AR_{self.page_id}"
)
if audio_bytes is None:
return AudioSegment.silent(duration=0)
return AudioSegment(data=audio_bytes)
def manual_switch_mic_recorder(self):
"""Record audio from the microphone."""
red_square = "\U0001F7E5"
microphone = "\U0001F3A4"
play_button = "\U000025B6"
recording = mic_recorder(
key=f"audiorecorder_widget_{self.page_id}",
start_prompt=play_button + microphone,
stop_prompt=red_square,
just_once=True,
use_container_width=True,
)
if recording is None:
return AudioSegment.silent(duration=0)
return AudioSegment(
data=recording["bytes"],
sample_width=recording["sample_width"],
frame_rate=recording["sample_rate"],
channels=1,
)
def render_custom_audio_player(
self,
audio: Union[AudioSegment, str, Path, None],
parent_element=None,
autoplay: bool = True,
hidden=False,
):
"""Autoplay an audio segment in the streamlit app."""
# Adaped from: <https://discuss.streamlit.io/t/
# how-to-play-an-audio-file-automatically-generated-using-text-to-speech-
# in-streamlit/33201/2>
if audio is None:
logger.debug("No audio to play. Not rendering audio player.")
return
if isinstance(audio, (str, Path)):
audio = AudioSegment.from_file(audio, format="mp3")
elif not isinstance(audio, AudioSegment):
raise TypeError(f"Invalid type for audio: {type(audio)}")
autoplay = "autoplay" if autoplay else ""
hidden = "hidden" if hidden else ""
data = audio.export(format="mp3").read()
b64 = base64.b64encode(data).decode()
md = f"""
<audio controls {autoplay} {hidden} preload="metadata">
<source src="data:audio/mpeg;base64,{b64}#" type="audio/mpeg">
</audio>
"""
parent_element = parent_element or st
parent_element.markdown(md, unsafe_allow_html=True)
if autoplay:
time.sleep(audio.duration_seconds)
class ChatBotPage(AppPage):
"""Implement a chatbot page in a streamlit application, inheriting from AppPage."""
def __init__(
self,
parent: "MultipageChatbotApp",
chat_obj: WebAppChat = None,
sidebar_title: str = "",
page_title: str = "",
):
"""Initialize new instance of the ChatBotPage class with an opt WebAppChat object.
Args:
parent (MultipageChatbotApp): The parent app of the page.
chat_obj (WebAppChat): The chat object. Defaults to None.
sidebar_title (str): The sidebar title for the chatbot page.
Defaults to an empty string.
page_title (str): The title for the chatbot page.
Defaults to an empty string.
"""
super().__init__(
parent=parent, sidebar_title=sidebar_title, page_title=page_title
)
if chat_obj:
logger.debug("Setting page chat to chat with ID=<{}>", chat_obj.id)
self.chat_obj = chat_obj
else:
logger.debug("ChatBotPage created wihout specific chat. Creating default.")
_ = self.chat_obj
logger.debug("Default chat id=<{}>", self.chat_obj.id)
self.avatars = get_avatar_images()
@property
def chat_configs(self) -> VoiceChatConfigs:
"""Return the configs used for the page's chat object."""
if "chat_configs" not in self.state:
self.state["chat_configs"] = self.parent.state["chat_configs"]
return self.state["chat_configs"]
@chat_configs.setter
def chat_configs(self, value: VoiceChatConfigs):
self.state["chat_configs"] = VoiceChatConfigs.model_validate(value)
if "chat_obj" in self.state:
del self.state["chat_obj"]
@property
def chat_obj(self) -> WebAppChat:
"""Return the chat object responsible for the queries on this page."""
if "chat_obj" not in self.state:
self.chat_obj = WebAppChat(
configs=self.chat_configs, openai_client=self.parent.openai_client
)
return self.state["chat_obj"]
@chat_obj.setter
def chat_obj(self, new_chat_obj: WebAppChat):
current_chat = self.state.get("chat_obj")
if current_chat:
logger.debug(
"Copy new_chat=<{}> into current_chat=<{}>. Current chat ID kept.",
new_chat_obj.id,
current_chat.id,
)
current_chat.save_cache()
new_chat_obj.id = current_chat.id
new_chat_obj.openai_client = self.parent.openai_client
self.state["chat_obj"] = new_chat_obj
self.state["chat_configs"] = new_chat_obj.configs
new_chat_obj.save_cache()
@property
def chat_history(self) -> list[dict[str, str]]:
"""Return the chat history of the page."""
if "messages" not in self.state:
self.state["messages"] = []
return self.state["messages"]
def render_chat_history(self):
"""Render the chat history of the page. Do not include system messages."""
with st.chat_message("assistant", avatar=self.avatars["assistant"]):
st.markdown(self.chat_obj.initial_greeting)
for message in self.chat_history:
role = message["role"]
if role == "system":
continue
with st.chat_message(role, avatar=self.avatars.get(role)):
with contextlib.suppress(KeyError):
if role == "assistant":
st.caption(message["chat_model"])
else:
st.caption(message["timestamp"])
st.markdown(message["content"])
with contextlib.suppress(KeyError):
if audio := message.get("reply_audio_file_path"):
with contextlib.suppress(CouldntDecodeError):
self.render_custom_audio_player(audio, autoplay=False)
def render_cost_estimate_page(self):
"""Render the estimated costs information in the chat."""
general_df = self.chat_obj.general_token_usage_db.get_usage_balance_dataframe()
chat_df = self.chat_obj.token_usage_db.get_usage_balance_dataframe()
dfs = {"All Recorded Chats": general_df, "Current Chat": chat_df}
st.header(dfs["Current Chat"].attrs["description"], divider="rainbow")
with st.container():
for category, df in dfs.items():
st.subheader(f"**{category}**")
st.dataframe(df)
st.write()
st.caption(df.attrs["disclaimer"])
@property
def voice_output(self) -> bool:
"""Return the state of the voice output toggle."""
return st.session_state.get("toggle_voice_output", False)
def play_chime(self, chime_type: str = "success", parent_element=None):
"""Sound a chime to send notificatons to the user."""
chime = load_chime(chime_type)
self.render_custom_audio_player(
chime, hidden=True, autoplay=True, parent_element=parent_element
)
def render_title(self):
"""Render the title of the chatbot page."""
with st.container(height=145, border=False):
self.title_container = st.empty()
self.title_container.subheader(self.title, divider="rainbow")
left, _ = st.columns([0.7, 0.3])
with left:
self.status_msg_container = st.empty()
@property
def direct_text_prompt(self):
"""Render chat inut widgets and return the user's input."""
placeholder = (
f"Send a message to {self.chat_obj.assistant_name} ({self.chat_obj.model})"
)
text_from_manual_audio_recorder = ""
with st.container():
left, right = st.columns([0.9, 0.1])
with left:
text_from_chat_input_widget = st.chat_input(placeholder=placeholder)
with right:
if not st.session_state.get("toggle_continuous_voice_input"):
audio = self.manual_switch_mic_recorder()
text_from_manual_audio_recorder = self.chat_obj.stt(audio).text
return text_from_chat_input_widget or text_from_manual_audio_recorder
@property
def continuous_text_prompt(self):
"""Wait until a promp from the continuous stream is ready and return it."""
if not st.session_state.get("toggle_continuous_voice_input"):
return None
if not self.parent.continuous_audio_input_engine_is_running:
logger.warning("Continuous audio input engine is not running!!!")
self.status_msg_container.error(
"The continuous audio input engine is not running!!!"
)
return None
logger.debug("Running on continuous audio prompt. Waiting user input...")
with self.status_msg_container:
self.play_chime(chime_type="warning")
with st.spinner(f"{self.chat_obj.assistant_name} is listening..."):
while True:
with self.parent.text_prompt_queue.mutex:
this_page_prompt_queue = filter_page_info_from_queue(
app_page=self, the_queue=self.parent.text_prompt_queue
)
with contextlib.suppress(queue.Empty):
if prompt := this_page_prompt_queue.get_nowait()["text"]:
this_page_prompt_queue.task_done()
break
logger.trace("Still waiting for user text prompt...")
time.sleep(0.1)
logger.debug("Done getting user input: {}", prompt)
return prompt
def _render_chatbot_page(self): # noqa: PLR0915
"""Render a chatbot page.
Adapted from:
<https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps>
"""
self.chat_obj.reply_only_as_text = not self.voice_output
self.render_title()
chat_msgs_container = st.container(height=550, border=False)
with chat_msgs_container:
self.render_chat_history()
# The inputs should be rendered after the chat history. There is a performance
# penalty otherwise, as rendering the history causes streamlit to rerun the
# entire page
direct_text_prompt = self.direct_text_prompt
continuous_stt_prompt = "" if direct_text_prompt else self.continuous_text_prompt
prompt = direct_text_prompt or continuous_stt_prompt
if prompt:
logger.opt(colors=True).debug("<yellow>Recived prompt: {}</yellow>", prompt)
self.parent.reply_ongoing.set()
if continuous_stt_prompt:
self.play_chime("success")
self.status_msg_container.success("Got your message!")
time.sleep(0.5)
elif continuous_stt_prompt:
self.status_msg_container.warning(
"Could not understand your message. Please try again."
)
logger.opt(colors=True).debug("<yellow>Received empty prompt</yellow>")
self.parent.reply_ongoing.clear()
if prompt:
with chat_msgs_container:
# Process user input
if prompt:
time_now = datetime.datetime.now().replace(microsecond=0)
self.state.update({"chat_started": True})
# Display user message in chat message container
with st.chat_message("user", avatar=self.avatars["user"]):
st.caption(time_now)
st.markdown(prompt)
self.chat_history.append(
{
"role": "user",
"name": self.chat_obj.username,
"content": prompt,
"timestamp": time_now,
}
)
# Display (stream) assistant response in chat message container
with st.chat_message("assistant", avatar=self.avatars["assistant"]):
# Process text and audio replies asynchronously
replier = AsyncReplier(self, prompt)
reply = replier.stream_text_and_audio_reply()
self.chat_history.append(
{
"role": "assistant",
"name": self.chat_obj.assistant_name,
"content": reply["text"],
"reply_audio_file_path": reply["audio"],
"chat_model": self.chat_obj.model,
}
)
# Reset title according to conversation initial contents
min_history_len_for_summary = 3
if (
"page_title" not in self.state
and len(self.chat_history) > min_history_len_for_summary
):
logger.debug("Working out conversation topic...")
prompt = "Summarize the previous messages in max 4 words"
title = "".join(self.chat_obj.respond_system_prompt(prompt))
self.chat_obj.metadata["page_title"] = title
self.chat_obj.metadata["sidebar_title"] = title
self.chat_obj.save_cache()
self.title = title
self.sidebar_title = title
self.title_container.header(title, divider="rainbow")
# Clear the prompt queue for this page, to remove old prompts
with self.parent.continuous_user_prompt_queue.mutex:
filter_page_info_from_queue(
app_page=self,
the_queue=self.parent.continuous_user_prompt_queue,
)
with self.parent.text_prompt_queue.mutex:
filter_page_info_from_queue(
app_page=self, the_queue=self.parent.text_prompt_queue
)
replier.join()
self.parent.reply_ongoing.clear()
if continuous_stt_prompt and not self.parent.reply_ongoing.is_set():
logger.opt(colors=True).debug(
"<yellow>Rerunning the app to wait for new input...</yellow>"
)
st.rerun()
def render(self):
"""Render the app's chatbot or costs page, depending on user choice."""
def _trim_page_padding():
md = """
<style>
.block-container {
padding-top: 0rem;
padding-bottom: 0rem;
padding-left: 5rem;
padding-right: 5rem;
}
</style>
"""
st.markdown(md, unsafe_allow_html=True)
_trim_page_padding()
if st.session_state.get("toggle_show_costs"):
self.render_cost_estimate_page()
else:
self._render_chatbot_page()
logger.debug("Reached the end of the chatbot page.")
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/app/__init__.py | pyrobbot/app/__init__.py | """UI for the package."""
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
paulovcmedeiros/pyRobBot | https://github.com/paulovcmedeiros/pyRobBot/blob/7e77d3b1aee052cfa350a806371de75b9b713ad6/pyrobbot/app/app.py | pyrobbot/app/app.py | """Entrypoint for the package's UI."""
from pyrobbot import GeneralDefinitions
from pyrobbot.app.multipage import MultipageChatbotApp
def run_app():
"""Create and run an instance of the pacage's app."""
MultipageChatbotApp(
page_title=GeneralDefinitions.APP_NAME,
page_icon=":speech_balloon:",
layout="wide",
).render()
if __name__ == "__main__":
run_app()
| python | MIT | 7e77d3b1aee052cfa350a806371de75b9b713ad6 | 2026-01-05T07:13:49.794839Z | false |
FudanDISC/SocioVerse | https://github.com/FudanDISC/SocioVerse/blob/143d61c2405baaf455f09d99841e48c7d6f85783/evaluation/electionSim_evaluation.py | evaluation/electionSim_evaluation.py | import json
import os
import numpy as np
def norm_and_rmse(pred, target):
pred = np.array([item/sum(pred) for item in pred])
target = np.array([item/sum(target) for item in target])
# print(pred)
# print(target)
return np.sqrt(((pred - target) ** 2).mean())
eval_folder = 'output/2020_1000'
year = '2020'
year_code_mapping = {
'2016': 'V162034a',
'2020': 'V201007a',
'2024': 'V201007a'
}
vote_question_code = year_code_mapping[year]
pattern= '31'
states = [item for item in os.listdir(eval_folder) if os.path.isdir(os.path.join(eval_folder, item))]
eval_data = {}
for state in states:
eval_path = os.path.join(eval_folder, state, 'final_output.jsonl')
with open(eval_path, 'r') as f:
eval_data[state] = [json.loads(line) for line in f]
# vote num
vote = {}
for state in states:
demo, rep = 0, 0
for agent_log in eval_data[state]:
if agent_log["answer_log"][vote_question_code] == 1:
demo += 1
elif agent_log["answer_log"][vote_question_code] == 2:
rep += 1
vote[state] = {'demo': demo,
'rep': rep}
# vote share
ratio = {}
for state, res in vote.items():
ratio[state] = {
'demo': round(100*res['demo']/(res['demo'] + res['rep']), 2),
'rep' : round(100*res['rep']/(res['demo'] + res['rep']), 2)
}
# state res
state_res = {}
for state, res in vote.items():
if res['demo'] > res['rep']:
state_res[state] = 'demo'
else:
state_res[state] = 'rep'
# statistics res
dir = f'res_statistics/{ratio}/{year}/{pattern}'
os.makedirs(dir, exist_ok=True)
with open(f'{dir}/count.json', 'w') as f:
json.dump(vote, f, ensure_ascii=False, indent=4)
with open(f'{dir}/ratio.json', 'w') as f:
json.dump(ratio, f, ensure_ascii=False, indent=4)
with open(f'{dir}/result.json', 'w') as f:
json.dump(state_res, f, ensure_ascii=False, indent=4)
ref_path = f'gt_election/{year}.json'
with open(ref_path, 'r') as f:
ref = json.load(f)
pred = ratio
battleground_states = ['Arizona', 'Colorado', 'Florida', 'Georgia', 'Iowa', 'Michigan', 'Minnesota', 'Nevada', 'New_Hampshire', 'North_Carolina', 'Ohio', 'Pennsylvania', 'Texas', 'Virginia', 'Wisconsin']
correct_count = {'battle': 0, 'total': 0}
rmses = {'battle': [], 'total': []}
for state, ref_ratio in ref.items():
pred_ratio = pred[state]
if (pred_ratio['demo'] - pred_ratio['rep'])*(ref_ratio['demo'] - ref_ratio['rep']) > 0:
correct_count['total'] += 1
if state in battleground_states:
correct_count['battle'] += 1
else:
# print(state)
continue
rmse_cur = norm_and_rmse(pred_ratio.values(), ref_ratio.values())
rmses['total'].append(rmse_cur)
if state in battleground_states:
rmses['battle'].append(rmse_cur)
print(f"Correct num:\ntotal:{correct_count['total']}\nbattle:{correct_count['battle']}\n")
print(f"RMSE:\ntotal:{np.mean(rmses['total'])}\nbattle:{np.mean(rmses['battle'])}\n") | python | Apache-2.0 | 143d61c2405baaf455f09d99841e48c7d6f85783 | 2026-01-05T07:13:41.899845Z | false |
FudanDISC/SocioVerse | https://github.com/FudanDISC/SocioVerse/blob/143d61c2405baaf455f09d99841e48c7d6f85783/evaluation/ecoSim_evaluation.py | evaluation/ecoSim_evaluation.py | import json
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import warnings
from scipy.stats import entropy
import os
warnings.filterwarnings("ignore")
def value_evaluation(dir):
data = []
with open(dir, 'r') as f:
for line in f:
data.append(json.loads(line[:-1]))
# mapping options into detailed numbers
modified_question_mapping = {
"category": ["food", "clothing", "household", "daily_service", "tansportation_communication", "education_culture_entertainment", "medical", "others"],
"q_id": ["q_1", "q_4", "q_7_0", "q_8", "q_10", "q_12", "q_15", "q_17"],
"choices":[
[250, 575, 725, 900, 1250],
[25, 75, 125, 175, 225],
[100, 350, 650, 1000, 1500],
[40, 100, 140, 180, 240],
[100, 250, 350, 450, 600],
[50, 150, 250, 350, 450],
[50, 150, 250, 350, 450],
[15, 45, 75, 105, 135]
],
"spending": [
[], [], [], [], [], [], [], []
]
}
choice_mapping = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4}
for user_idx, answer_list in enumerate(data):
for idx, q_id in enumerate(modified_question_mapping["q_id"]):
choice_index = choice_mapping[answer_list[q_id]["answer"][0]]
modified_question_mapping["spending"][idx].append(modified_question_mapping["choices"][idx][choice_index])
# calculate all categories
results = []
for i, category in enumerate(modified_question_mapping["category"]):
spendings = modified_question_mapping["spending"][i]
spending_sum = sum(sample for sample in spendings)
spending_avg = spending_sum / len(spendings)
variance = sum((sample - spending_avg)**2 for sample in spendings) / (len(spendings) - 1)
standard_err = np.sqrt(variance / len(spendings))
results.append({"category": category, "avg": round(spending_avg, 2), "standard_err": round(float(standard_err), 2)})
# print(results)
return results
def distribution_evaluation(predict, real):
df_estimated = pd.DataFrame(predict)
df_true = pd.DataFrame(real)
estimate = np.array(df_estimated["avg"])
true = np.array(df_true["avg"])
estimate_nromalize = estimate / np.sum(estimate)
true_normalize = true / np.sum(true)
# Calculate KL-entropy
P, Q = true_normalize, estimate_nromalize
kl_div = entropy(P, Q)
return kl_div
def main():
first_region = ['shanghai','beijing','zhejiang','tianjing','jiangsu','guangdong','fujian','shandong']
with open('./raw_data/monthly/spend_2023_monthly.json', 'r') as f:
labels = json.load(f)
models = ["gpt-4o", "gpt-4omini", "llama3", "qwen2.5", "deepseek-r1"]
kl_res = {"gpt-4o": {}, "gpt-4omini": {}, "llama3": {}, "qwen2.5": {}, "deepseek-r1": {}}
res_log = {"gpt-4o": {}, "gpt-4omini": {}, "llama3": {}, "qwen2.5": {}, "deepseek-r1": {}}
rmse_res = {}
rmse_item_res = {}
for model in models:
files = os.listdir("results/"+model)
rmse_predict = []
rmse_real = []
for file in files:
# get predicted results and ground-truth
region = file.split('.')[0]
# de-comment this for 1st-Region subset
# if region not in first_level:
# continue
directory = "./results/"+model+"/"+file
predict_res = value_evaluation(directory)
for item in labels:
if item["region"] == region:
region_labels = item["spend_detail_monthly"]
real_label = []
for key, value in region_labels.items():
real_label.append({"category": key, "avg": value})
res_log[model][region] = {"predict": predict_res, "true": real_label}
# distirbution evaluation
kl_value = distribution_evaluation(predict_res, real_label)
kl_res[model][region] = round(float(kl_value), 4)
# square-mean
predict_list = [item["avg"] for item in predict_res]
real_list = [item["avg"] for item in real_label]
rmse_predict.append([item/sum(predict_list) for item in predict_list])
rmse_real.append([item/sum(real_list) for item in real_list])
# rmse
y_true = np.array(rmse_real)
y_pred = np.array(rmse_predict)
squared_errors = (y_true - y_pred) ** 2
sample_mse = np.mean(squared_errors, axis=0)
overall_mse = np.mean(sample_mse)
rmse = np.sqrt(overall_mse)
rmse_res[model] = round(float(rmse), 4)
# kl-div saving
with open('./results/kl-res.json', 'a') as f:
f.write(json.dumps(kl_res, ensure_ascii=False, indent=4))
for model in kl_res.keys():
kl_value = [kl_res[model][region] for region in kl_res[model].keys()]
print(f"{model} KL: {float(np.mean(kl_value))}")
# rmse print:
print(rmse_res)
print(rmse_item_res)
# res log saving
with open('./results/overall.json', 'a') as f:
f.write(json.dumps(res_log, ensure_ascii=False, indent=4))
if __name__ == "__main__":
main()
| python | Apache-2.0 | 143d61c2405baaf455f09d99841e48c7d6f85783 | 2026-01-05T07:13:41.899845Z | false |
FudanDISC/SocioVerse | https://github.com/FudanDISC/SocioVerse/blob/143d61c2405baaf455f09d99841e48c7d6f85783/evaluation/pressSim_evaluation.py | evaluation/pressSim_evaluation.py | import numpy as np
from scipy.stats import entropy
import json
# normalize
def convert_to_scores(data):
converted_data = []
for row in data:
score = 0
for i in range(len(row)):
score += row[i]/10000*(i+1)
converted_data.append(score/5)
return np.array(converted_data)
if __name__ == "__main__":
with open("./result.json",'r') as file:
data = json.load(file)
models = ['llama','qwen','gpt4o','gpt4omini','deepseek']
for model in models:
model_data = data[model]
real_scores = convert_to_scores(model_data['真实'])
sim_scores = convert_to_scores(model_data['模拟'])
# RMSE
rmse_value = np.sqrt(np.mean((real_scores - sim_scores) ** 2))
print(f"{model} RMSE: {rmse_value}")
# KL-Div
kl_values = []
smoothing_factor=1e-10
for i in range(len(model_data['真实'])):
real = [smoothing_factor if item == 0 else item/10000 for item in model_data['真实'][i]]
sim = [smoothing_factor if item == 0 else item/10000 for item in model_data['模拟'][i]]
kl_values.append(entropy(real, sim))
average_kl_divergence = np.mean(kl_values)
print(f"{model} Average KL Divergence: {average_kl_divergence}") | python | Apache-2.0 | 143d61c2405baaf455f09d99841e48c7d6f85783 | 2026-01-05T07:13:41.899845Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/benchmarks/benchmarks.py | benchmarks/benchmarks.py | """Morecantile/Mercantile/Utiles comparison benchmark
The benchmark suite is adapted from jessekrubin/utiles
https://github.com/jessekrubin/utiles/blob/ea58b9a017a2e3528f03cc20f16ef531737b863f/utiles-pyo3/bench/test_bench.py#L17-L25
"""
# This file is a modified version of https://github.com/jessekrubin/utiles/blob/ea58b9a017a2e3528f03cc20f16ef531737b863f/utiles-pyo3/bench/test_bench.py.
#
# The original license follows.
#
# MIT License
#
# Copyright (c) 2023 jessekrubin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Callable, Tuple
import mercantile
import pytest
import utiles
import morecantile
tms = morecantile.tms.get("WebMercatorQuad")
TEST_TILES = (
(0, 0, 0),
(1, 0, 1),
(1, 1, 1),
(1, 40, 7),
(486, 332, 10),
# HIGH ZOOM
(486, 332, 20),
# OUTSIDE TMS Range
(486, 332, 30),
)
@pytest.mark.parametrize(
"tile",
[pytest.param(t, id=str(t)) for t in TEST_TILES],
)
@pytest.mark.parametrize(
"func",
[
pytest.param(mercantile.bounds, id="mercantile"),
pytest.param(tms.bounds, id="morecantile"),
pytest.param(utiles.bounds, id="utiles"),
],
)
@pytest.mark.benchmark(group="bounds")
def test_bounds(
tile: Tuple[int, int, int],
func: Callable[[Tuple[int, int, int]], Tuple[float, float]],
benchmark,
) -> None:
"""Benchmark bounds() method."""
_ = benchmark(func, *tile)
@pytest.mark.parametrize(
"tile",
[pytest.param(t, id=str(t)) for t in TEST_TILES],
)
@pytest.mark.parametrize(
"func",
[
pytest.param(mercantile.xy_bounds, id="mercantile"),
pytest.param(tms.xy_bounds, id="morecantile"),
pytest.param(utiles.xy_bounds, id="utiles"),
],
)
@pytest.mark.benchmark(group="xy_bounds")
def test_xy_bounds(
tile: Tuple[int, int, int],
func: Callable[[Tuple[int, int, int]], Tuple[float, float]],
benchmark,
) -> None:
"""Benchmark xy_bounds() method."""
_ = benchmark(func, *tile)
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/morecantile/defaults.py | morecantile/defaults.py | """Default Morecantile TMS."""
import os
import pathlib
from copy import copy, deepcopy
import attr
from morecantile.errors import InvalidIdentifier
from morecantile.models import TileMatrixSet
morecantile_tms_dir = pathlib.Path(__file__).parent.joinpath("data")
tms_paths = list(pathlib.Path(morecantile_tms_dir).glob("*.json"))
user_tms_dir = os.environ.get("TILEMATRIXSET_DIRECTORY", None)
if user_tms_dir:
tms_paths.extend(list(pathlib.Path(user_tms_dir).glob("*.json")))
default_tms: dict[str, TileMatrixSet | pathlib.Path] = {
tms.stem: tms for tms in sorted(tms_paths)
}
@attr.s(frozen=True)
class TileMatrixSets:
"""Default TileMatrixSets holder."""
tilematrixsets: dict = attr.ib()
def get(self, identifier: str) -> TileMatrixSet:
"""Fetch a TMS."""
if identifier not in self.tilematrixsets:
raise InvalidIdentifier(f"Invalid identifier: {identifier}")
tilematrix = self.tilematrixsets[identifier]
# We lazyload the TMS document only when called
if isinstance(tilematrix, pathlib.Path):
with tilematrix.open() as f:
tilematrix = TileMatrixSet.model_validate_json(f.read())
self.tilematrixsets[identifier] = tilematrix
return deepcopy(tilematrix)
def list(self) -> list[str]:
"""List registered TMS."""
return list(self.tilematrixsets.keys())
def register(
self,
custom_tms: dict[str, TileMatrixSet],
overwrite: bool = False,
) -> "TileMatrixSets":
"""Register TileMatrixSet(s)."""
for identifier in custom_tms.keys():
if identifier in self.tilematrixsets and not overwrite:
raise InvalidIdentifier(f"{identifier} is already a registered TMS.")
return TileMatrixSets({**self.tilematrixsets, **custom_tms})
tms = TileMatrixSets(copy(default_tms)) # noqa
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/morecantile/models.py | morecantile/models.py | """Pydantic modules for OGC TileMatrixSets (https://www.ogc.org/standards/tms)"""
import math
import os
import warnings
from collections.abc import Iterator, Sequence
from functools import cached_property, lru_cache
from typing import Any, Literal
import pyproj
from pydantic import (
AnyHttpUrl,
AnyUrl,
BaseModel,
Field,
PrivateAttr,
RootModel,
field_validator,
model_validator,
)
from pyproj.exceptions import CRSError
from typing_extensions import Annotated
from morecantile.commons import BoundingBox, Coords, Tile
from morecantile.errors import (
DeprecationError,
InvalidZoomError,
NoQuadkeySupport,
PointOutsideTMSBounds,
QuadKeyError,
)
from morecantile.utils import (
_parse_tile_arg,
bbox_to_feature,
check_quadkey_support,
lons_contain_antimeridian,
meters_per_unit,
point_in_bbox,
to_rasterio_crs,
truncate_coordinates,
)
NumType = float | int
BoundsType = tuple[NumType, NumType]
LL_EPSILON = 1e-11
axesInfo = Annotated[list[str], Field(min_length=2, max_length=2)]
WGS84_CRS = pyproj.CRS.from_epsg(4326)
DEFAULT_GEOGRAPHIC_CRS = os.environ.get("MORECANTILE_DEFAULT_GEOGRAPHIC_CRS")
TransformerFromCRS = lru_cache(pyproj.Transformer.from_crs)
class CRSUri(BaseModel):
"""Coordinate Reference System (CRS) from URI."""
uri: Annotated[
AnyUrl,
Field(
json_schema_extra={
"description": "Reference to one coordinate reference system (CRS) as URI",
"exemples": [
"http://www.opengis.net/def/crs/EPSG/0/3978",
"urn:ogc:def:crs:EPSG::2193",
],
}
),
]
class CRSWKT(BaseModel):
"""Coordinate Reference System (CRS) from WKT encoded as PROJJSON Object."""
wkt: Annotated[
dict,
Field(
json_schema_extra={
"description": "An object defining the CRS using the JSON encoding for Well-known text representation of coordinate reference systems 2.0",
}
),
]
class CRSRef(BaseModel):
"""CRS from referenceSystem."""
referenceSystem: Annotated[
dict[str, Any],
Field(
json_schema_extra={
"description": "A reference system data structure as defined in the MD_ReferenceSystem of the ISO 19115",
}
),
]
class CRS(RootModel[str | CRSUri | CRSWKT | CRSRef]):
"""CRS model.
Ref: https://github.com/opengeospatial/ogcapi-tiles/blob/master/openapi/schemas/common-geodata/crs.yaml
Code generated using https://github.com/koxudaxi/datamodel-code-generator/
"""
_pyproj_crs: pyproj.CRS = PrivateAttr()
def model_post_init(self, __context: Any) -> None:
"""Post Init: Set private attr."""
super().model_post_init(__context)
if isinstance(self.root, str):
self._pyproj_crs = pyproj.CRS.from_user_input(self.root)
elif isinstance(self.root, CRSUri):
self._pyproj_crs = pyproj.CRS.from_user_input(str(self.root.uri))
elif isinstance(self.root, CRSWKT):
self._pyproj_crs = pyproj.CRS.from_json_dict(self.root.wkt)
elif isinstance(self.root, CRSRef):
raise NotImplementedError(
"Morecantile does not support `MD_ReferenceSystem` defined CRS"
)
@property
def srs(self) -> str:
"""return the string form of the user input used to create the CRS."""
return self._pyproj_crs.srs
def to_epsg(self, *args: Any, **kwargs: Any) -> int | None:
"""return EPSG number of the CRS."""
return self._pyproj_crs.to_epsg(*args, **kwargs)
def to_wkt(self, *args: Any, **kwargs: Any) -> str:
"""return WKT version of the CRS."""
return self._pyproj_crs.to_wkt(*args, **kwargs)
def to_proj4(self, *args: Any, **kwargs: Any) -> str:
"""return PROJ4 version of the CRS."""
return self._pyproj_crs.to_proj4(*args, **kwargs)
def to_dict(self) -> dict:
"""return DICT version of the CRS."""
return self._pyproj_crs.to_dict()
def to_json(self, *args: Any, **kwargs: Any) -> str:
"""return JSON version of the CRS."""
return self._pyproj_crs.to_json(*args, **kwargs)
# For compatibility
CRSType = CRS
def CRS_to_uri(crs: pyproj.CRS) -> str:
"""Convert CRS to URI."""
authority = "EPSG"
code = None
version = "0"
# attempt to grab the authority, version, and code from the CRS
authority_code = crs.to_authority(min_confidence=20)
if authority_code is not None:
authority, code = authority_code
# if we have a version number in the authority, split it out
if "_" in authority:
authority, version = authority.split("_")
return f"http://www.opengis.net/def/crs/{authority}/{version}/{code}"
def crs_axis_inverted(crs: pyproj.CRS) -> bool:
"""Check if CRS has inverted AXIS (lat,lon) instead of (lon,lat)."""
return crs.axis_info[0].abbrev.upper() in ["Y", "LAT", "N"]
def ordered_axis_inverted(ordered_axes: list[str]) -> bool:
"""Check if ordered axes have inverted AXIS (lat,lon) instead of (lon,lat)."""
return ordered_axes[0].upper() in ["Y", "LAT", "N"]
class TMSBoundingBox(BaseModel, arbitrary_types_allowed=True):
"""Bounding box
ref: https://github.com/opengeospatial/2D-Tile-Matrix-Set/blob/master/schemas/tms/2.0/json/2DBoundingBox.json
"""
lowerLeft: Annotated[
BoundsType,
Field(
json_schema_extra={
"description": "A 2D Point in the CRS indicated elsewhere",
}
),
]
upperRight: Annotated[
BoundsType,
Field(
json_schema_extra={
"description": "A 2D Point in the CRS indicated elsewhere",
}
),
]
crs: Annotated[
CRS | None,
Field(
json_schema_extra={
"description": "Coordinate Reference System (CRS)",
}
),
] = None
orderedAxes: Annotated[
axesInfo | None,
Field(
json_schema_extra={
"description": "Ordered list of names of the dimensions defined in the CRS",
}
),
] = None
class variableMatrixWidth(BaseModel):
"""Variable Matrix Width Definition
ref: https://github.com/opengeospatial/2D-Tile-Matrix-Set/blob/master/schemas/tms/2.0/json/variableMatrixWidth.json
"""
coalesce: Annotated[
int,
Field(
ge=2,
multiple_of=1,
json_schema_extra={
"description": "Number of tiles in width that coalesce in a single tile for these rows",
},
),
]
minTileRow: Annotated[
int,
Field(
ge=0,
multiple_of=1,
json_schema_extra={
"description": "First tile row where the coalescence factor applies for this tilematrix",
},
),
]
maxTileRow: Annotated[
int,
Field(
ge=0,
multiple_of=1,
json_schema_extra={
"description": "Last tile row where the coalescence factor applies for this tilematrix",
},
),
]
class TileMatrix(BaseModel, extra="forbid"):
"""Tile Matrix Definition
A tile matrix, usually corresponding to a particular zoom level of a TileMatrixSet.
ref: https://github.com/opengeospatial/2D-Tile-Matrix-Set/blob/master/schemas/tms/2.0/json/tileMatrix.json
"""
title: Annotated[
str | None,
Field(
json_schema_extra={
"description": "Title of this tile matrix, normally used for display to a human",
}
),
] = None
description: Annotated[
str | None,
Field(
json_schema_extra={
"description": "Brief narrative description of this tile matrix set, normally available for display to a human",
}
),
] = None
keywords: Annotated[
list[str] | None,
Field(
json_schema_extra={
"description": "Unordered list of one or more commonly used or formalized word(s) or phrase(s) used to describe this dataset",
}
),
] = None
id: Annotated[
str,
Field(
pattern=r"^\-?[0-9]+$",
json_schema_extra={
"description": "Identifier selecting one of the scales defined in the TileMatrixSet and representing the scaleDenominator the tile. Implementation of 'identifier'",
},
),
]
scaleDenominator: Annotated[
float,
Field(
json_schema_extra={
"description": "Scale denominator of this tile matrix",
}
),
]
cellSize: Annotated[
float,
Field(
json_schema_extra={
"description": "Cell size of this tile matrix",
}
),
]
cornerOfOrigin: Annotated[
Literal["topLeft", "bottomLeft"],
Field(
json_schema_extra={
"description": "The corner of the tile matrix (_topLeft_ or _bottomLeft_) used as the origin for numbering tile rows and columns. This corner is also a corner of the (0, 0) tile.",
}
),
] = "topLeft"
pointOfOrigin: Annotated[
BoundsType,
Field(
json_schema_extra={
"description": "Precise position in CRS coordinates of the corner of origin (e.g. the top-left corner) for this tile matrix. This position is also a corner of the (0, 0) tile. In previous version, this was 'topLeftCorner' and 'cornerOfOrigin' did not exist.",
}
),
]
tileWidth: Annotated[
int,
Field(
ge=1,
multiple_of=1,
json_schema_extra={
"description": "Width of each tile of this tile matrix in pixels",
},
),
]
tileHeight: Annotated[
int,
Field(
ge=1,
multiple_of=1,
json_schema_extra={
"description": "Height of each tile of this tile matrix in pixels",
},
),
]
matrixWidth: Annotated[
int,
Field(
ge=1,
multiple_of=1,
json_schema_extra={
"description": "Width of the matrix (number of tiles in width)",
},
),
]
matrixHeight: Annotated[
int,
Field(
ge=1,
multiple_of=1,
json_schema_extra={
"description": "Height of the matrix (number of tiles in height)",
},
),
]
variableMatrixWidths: Annotated[
list[variableMatrixWidth] | None,
Field(
json_schema_extra={
"description": "Describes the rows that has variable matrix width",
}
),
] = None
def get_coalesce_factor(self, row: int) -> int:
"""Get Coalesce value for TileMatrix."""
if not self.variableMatrixWidths:
raise ValueError("TileMatrix has not variableMatrixWidths")
if row < 0:
raise ValueError(f"Cannot find coalesce factor for Negative Row ({row})")
if row > self.matrixHeight - 1:
raise ValueError(
f"Row {row} is greater than the TileMatrix height ({self.matrixHeight})"
)
for matrix_width in self.variableMatrixWidths:
if matrix_width.maxTileRow >= row >= matrix_width.minTileRow:
return matrix_width.coalesce
return 1
class TileMatrixSet(BaseModel, arbitrary_types_allowed=True, extra="ignore"):
"""Tile Matrix Set Definition
A definition of a tile matrix set following the Tile Matrix Set standard.
For tileset metadata, such a description (in `tileMatrixSet` property) is only required for offline use,
as an alternative to a link with a `http://www.opengis.net/def/rel/ogc/1.0/tiling-scheme` relation type.
ref: https://github.com/opengeospatial/2D-Tile-Matrix-Set/blob/master/schemas/tms/2.0/json/tileMatrixSet.json
"""
title: Annotated[
str | None,
Field(
json_schema_extra={
"description": "Title of this tile matrix set, normally used for display to a human",
},
frozen=True,
),
] = None
description: Annotated[
str | None,
Field(
json_schema_extra={
"description": "Brief narrative description of this tile matrix set, normally available for display to a human",
},
frozen=True,
),
] = None
keywords: Annotated[
list[str] | None,
Field(
json_schema_extra={
"description": "Unordered list of one or more commonly used or formalized word(s) or phrase(s) used to describe this tile matrix set",
},
frozen=True,
),
] = None
id: Annotated[
str | None,
Field(
pattern=r"^[\w\d_\-]+$",
json_schema_extra={
"description": "Tile matrix set identifier. Implementation of 'identifier'",
},
frozen=True,
),
] = None
uri: Annotated[
str | None,
Field(
json_schema_extra={
"description": "Reference to an official source for this tileMatrixSet",
},
frozen=True,
),
] = None
orderedAxes: Annotated[
axesInfo | None,
Field(
json_schema_extra={
"description": "Ordered list of names of the dimensions defined in the CRS",
},
frozen=True,
),
] = None
crs: Annotated[
CRS,
Field(
json_schema_extra={
"description": "Coordinate Reference System (CRS)",
},
frozen=True,
),
]
wellKnownScaleSet: Annotated[
AnyHttpUrl | None,
Field(
json_schema_extra={
"description": "Reference to a well-known scale set",
},
frozen=True,
),
] = None
boundingBox: Annotated[
TMSBoundingBox | None,
Field(
json_schema_extra={
"description": "Minimum bounding rectangle surrounding the tile matrix set, in the supported CRS",
},
frozen=True,
),
] = None
tileMatrices: Annotated[
list[TileMatrix],
Field(
json_schema_extra={
"description": "Describes scale levels and its tile matrices",
},
frozen=True,
),
]
# Private attributes
_geographic_crs: pyproj.CRS = PrivateAttr()
_tile_matrices_idx: dict[int, int] = PrivateAttr()
def __init__(self, **data):
"""Set private attributes."""
super().__init__(**data)
self._tile_matrices_idx = {
int(mat.id): idx for idx, mat in enumerate(self.tileMatrices)
}
# Default Geographic CRS from TMS's CRS
self._geographic_crs = (
pyproj.CRS.from_user_input(DEFAULT_GEOGRAPHIC_CRS)
if DEFAULT_GEOGRAPHIC_CRS
else self.crs._pyproj_crs.geodetic_crs
)
@model_validator(mode="before")
def check_for_old_specification(cls, data):
"""Check for TMS V1.0 keywords."""
if {"supportedCRS", "topLeftCorner"}.intersection(data):
raise DeprecationError(
"Tile Matrix Set must be version 2.0. Use morecantile <4.0 for TMS 1.0 support"
)
return data
@field_validator("tileMatrices")
def sort_tile_matrices(cls, v):
"""Sort matrices by identifier"""
return sorted(v, key=lambda m: int(m.id))
@cached_property
def is_quadtree(self) -> bool:
"""Check for quadtree support."""
return check_quadkey_support(self.tileMatrices)
@cached_property
def is_variable(self) -> bool:
"""Check if TMS has variable width matrix."""
return any(
True if matrix.variableMatrixWidths is not None else False
for matrix in self.tileMatrices
)
def __iter__(self):
"""Iterate over matrices"""
for matrix in self.tileMatrices:
yield matrix
def __repr__(self):
"""Simplify default pydantic model repr."""
return f"<TileMatrixSet title='{self.title}' id='{self.id}' crs='{CRS_to_uri(self.crs._pyproj_crs)}>"
@cached_property
def rasterio_crs(self):
"""Return rasterio CRS."""
return to_rasterio_crs(self.crs._pyproj_crs)
def set_geographic_crs(self, crs: pyproj.CRS) -> None:
"""Overwrite Geographic CRS for the TMS."""
self._geographic_crs = crs
@property
def _to_geographic(self) -> pyproj.Transformer:
return TransformerFromCRS(
self.crs._pyproj_crs, self.geographic_crs, always_xy=True
)
@property
def _from_geographic(self) -> pyproj.Transformer:
return TransformerFromCRS(
self.geographic_crs, self.crs._pyproj_crs, always_xy=True
)
@property
def geographic_crs(self) -> pyproj.CRS:
"""Return the TMS's geographic CRS."""
return self._geographic_crs
@property
def rasterio_geographic_crs(self):
"""Return the geographic CRS as a rasterio CRS."""
return to_rasterio_crs(self._geographic_crs)
@property
def minzoom(self) -> int:
"""TileMatrixSet minimum TileMatrix identifier"""
return int(self.tileMatrices[0].id)
@property
def maxzoom(self) -> int:
"""TileMatrixSet maximum TileMatrix identifier"""
return int(self.tileMatrices[-1].id)
@cached_property
def _invert_axis(self) -> bool:
"""Check if CRS has inverted AXIS (lat,lon) instead of (lon,lat)."""
return (
ordered_axis_inverted(self.orderedAxes)
if self.orderedAxes
else crs_axis_inverted(self.crs._pyproj_crs)
)
@classmethod
def from_v1(cls, tms: dict) -> "TileMatrixSet":
"""
Makes a TMS from a v1 TMS definition
Attributes
----------
supportedCRS: CRS
Tile Matrix Set coordinate reference system
title: str
Title of TMS
abstract: str (optional)
Abstract of CRS
keywords: str (optional)
Keywords
identifier: str
TMS Identifier
wellKnownScaleSet: AnyHttpUrl (optional)
WKSS URL
boundingBox: TMSBoundingBox (optional)
Bounding box of TMS
tileMatrix: list[TileMatrix]
List of Tile Matrices
Returns:
--------
TileMatrixSet
"""
v2_tms = tms.copy()
del v2_tms["type"]
if tms_bbox := v2_tms.pop("boundingBox", None):
del tms_bbox["type"]
tms_bbox["lowerLeft"] = tms_bbox.pop("lowerCorner")
tms_bbox["upperRight"] = tms_bbox.pop("upperCorner")
v2_tms["boundingBox"] = tms_bbox
v2_tms["crs"] = v2_tms.pop("supportedCRS")
v2_tms["tileMatrices"] = v2_tms.pop("tileMatrix")
v2_tms["id"] = v2_tms.pop("identifier")
mpu = meters_per_unit(pyproj.CRS.from_user_input(v2_tms["crs"]))
for i in range(len(v2_tms["tileMatrices"])):
v2_tms["tileMatrices"][i]["cellSize"] = (
v2_tms["tileMatrices"][i]["scaleDenominator"] * 0.28e-3 / mpu
)
v2_tms["tileMatrices"][i]["pointOfOrigin"] = v2_tms["tileMatrices"][i].pop(
"topLeftCorner"
)
v2_tms["tileMatrices"][i]["id"] = v2_tms["tileMatrices"][i].pop(
"identifier"
)
del v2_tms["tileMatrices"][i]["type"]
return TileMatrixSet(**v2_tms)
@classmethod
def custom(
cls,
extent: list[float],
crs: pyproj.CRS,
tile_width: int = 256,
tile_height: int = 256,
matrix_scale: list | None = None,
extent_crs: pyproj.CRS | None = None,
minzoom: int = 0,
maxzoom: int = 24,
title: str | None = None,
id: str | None = None,
ordered_axes: list[str] | None = None,
screen_pixel_size: float = 0.28e-3,
decimation_base: int = 2,
corner_of_origin: Literal["topLeft", "bottomLeft"] = "topLeft",
point_of_origin: list[float] = None,
**kwargs: Any,
):
"""
Construct a custom TileMatrixSet.
Attributes
----------
extent: list
Bounding box of the Tile Matrix Set, (left, bottom, right, top).
crs: pyproj.CRS
Tile Matrix Set coordinate reference system
tile_width: int
Width of each tile of this tile matrix in pixels (default is 256).
tile_height: int
Height of each tile of this tile matrix in pixels (default is 256).
matrix_scale: list
Tiling schema coalescence coefficient (default: [1, 1] for EPSG:3857).
Should be set to [2, 1] for EPSG:4326.
see: http://docs.opengeospatial.org/is/17-083r2/17-083r2.html#14
extent_crs: pyproj.CRS
Extent's coordinate reference system, as a pyproj CRS object.
(default: same as input crs)
minzoom: int
Tile Matrix Set minimum zoom level (default is 0).
maxzoom: int
Tile Matrix Set maximum zoom level (default is 24).
title: str, optional
Tile Matrix Set title
id: str, optional
Tile Matrix Set identifier
ordered_axes: list of str, optional
Override Axis order (e.g `["N", "S"]`) else default to CRS's metadata
screen_pixel_size: float, optional
Rendering pixel size. 0.28 mm was the actual pixel size of a common display from 2005 and considered as standard by OGC.
decimation_base: int, optional
How tiles are divided at each zoom level (default is 2). Must be greater than 1.
corner_of_origin: str, optional
Corner of origin for the TMS, either 'topLeft' or 'bottomLeft'
point_of_origin: list, optional
Point of origin for the TMS, (x, y) coordinates in the TMS CRS.
kwargs: Any
Attributes to forward to the TileMatrixSet
Returns:
--------
TileMatrixSet
"""
matrix_scale = matrix_scale or [1, 1]
if ordered_axes:
is_inverted = ordered_axis_inverted(ordered_axes)
else:
is_inverted = crs_axis_inverted(crs)
if extent_crs:
transform = pyproj.Transformer.from_crs(extent_crs, crs, always_xy=True)
left, bottom, right, top = extent
extent = list(
transform.transform_bounds(left, bottom, right, top, densify_pts=21)
)
if decimation_base <= 1:
raise ValueError(
"Custom TileMatrixSet requires a decimation base that is greater than 1."
)
bbox = BoundingBox(*extent)
if not point_of_origin:
if corner_of_origin == "topLeft":
x_origin = bbox.left if not is_inverted else bbox.top
y_origin = bbox.top if not is_inverted else bbox.left
point_of_origin = [x_origin, y_origin]
elif corner_of_origin == "bottomLeft":
x_origin = bbox.left if not is_inverted else bbox.bottom
y_origin = bbox.bottom if not is_inverted else bbox.left
point_of_origin = [x_origin, y_origin]
else:
raise ValueError(
f"Invalid `corner_of_origin` value: {corner_of_origin}, must be either 'topLeft' or 'bottomLeft'"
)
width = abs(bbox.right - bbox.left)
height = abs(bbox.top - bbox.bottom)
mpu = meters_per_unit(crs)
tile_matrices: list[TileMatrix] = []
for zoom in range(minzoom, maxzoom + 1):
res = max(
width / (tile_width * matrix_scale[0]) / float(decimation_base) ** zoom,
height
/ (tile_height * matrix_scale[1])
/ float(decimation_base) ** zoom,
)
tile_matrices.append(
TileMatrix(
**{
"id": str(zoom),
"scaleDenominator": res * mpu / screen_pixel_size,
"cellSize": res,
"cornerOfOrigin": corner_of_origin,
"pointOfOrigin": point_of_origin,
"tileWidth": tile_width,
"tileHeight": tile_height,
"matrixWidth": matrix_scale[0] * decimation_base**zoom,
"matrixHeight": matrix_scale[1] * decimation_base**zoom,
}
)
)
if crs.to_authority(min_confidence=20):
crs_data: Any = CRS_to_uri(crs)
# Some old Proj version might not support URI
# so we fall back to wkt
try:
pyproj.CRS.from_user_input(crs_data)
except CRSError:
crs_data = {"wkt": crs.to_json_dict()}
else:
crs_data = {"wkt": crs.to_json_dict()}
return cls(
crs=crs_data,
tileMatrices=tile_matrices,
id=id,
title=title,
**kwargs,
)
def matrix(self, zoom: int) -> TileMatrix:
"""Return the TileMatrix for a specific zoom."""
if (idx := self._tile_matrices_idx.get(zoom, None)) is not None:
return self.tileMatrices[idx]
#######################################################################
# If user wants a deeper matrix we calculate it
#######################################################################
if self.is_variable:
raise InvalidZoomError(
f"TileMatrix not found for level: {zoom} - Unable to construct tileMatrix for TMS with variable width"
)
matrix_scale = list(
{
round(
self.tileMatrices[idx].scaleDenominator
/ self.tileMatrices[idx - 1].scaleDenominator,
2,
)
for idx in range(1, len(self.tileMatrices))
}
)
if len(matrix_scale) > 1:
raise InvalidZoomError(
f"TileMatrix not found for level: {zoom} - Unable to construct tileMatrix for TMS with variable scale"
)
warnings.warn(
f"TileMatrix not found for level: {zoom} - Creating values from TMS Scale.",
UserWarning,
stacklevel=1,
)
# TODO: what if we want to construct a matrix for a level up ?
tile_matrix = self.tileMatrices[-1]
factor = 1 / matrix_scale[0]
while not str(zoom) == tile_matrix.id:
tile_matrix = TileMatrix(
id=str(int(tile_matrix.id) + 1),
scaleDenominator=tile_matrix.scaleDenominator / factor,
cellSize=tile_matrix.cellSize / factor,
cornerOfOrigin=tile_matrix.cornerOfOrigin,
pointOfOrigin=tile_matrix.pointOfOrigin,
tileWidth=tile_matrix.tileWidth,
tileHeight=tile_matrix.tileHeight,
matrixWidth=int(tile_matrix.matrixWidth * factor),
matrixHeight=int(tile_matrix.matrixHeight * factor),
)
return tile_matrix
def _matrix_origin(self, matrix: TileMatrix) -> Coords:
"""Return the Origin coordinates of the matrix."""
origin_x = (
matrix.pointOfOrigin[1] if self._invert_axis else matrix.pointOfOrigin[0]
)
origin_y = (
matrix.pointOfOrigin[0] if self._invert_axis else matrix.pointOfOrigin[1]
)
return Coords(origin_x, origin_y)
def zoom_for_res(
self,
res: float,
max_z: int | None = None,
zoom_level_strategy: str = "auto",
min_z: int | None = None,
) -> int:
"""Get TMS zoom level corresponding to a specific resolution.
Args:
res (float): Resolution in TMS unit.
max_z (int): Maximum zoom level (default is tms maxzoom).
zoom_level_strategy (str): Strategy to determine zoom level (same as in GDAL 3.2).
LOWER will select the zoom level immediately below the theoretical computed non-integral zoom level.
On the contrary, UPPER will select the immediately above zoom level.
Defaults to AUTO which selects the closest zoom level.
ref: https://gdal.org/drivers/raster/cog.html#raster-cog
min_z (int): Minimum zoom level (default is tms minzoom).
Returns:
int: TMS zoom for a given resolution.
Examples:
>>> zoom_for_res(430.021)
"""
if max_z is None:
max_z = self.maxzoom
if min_z is None:
min_z = self.minzoom
# Freely adapted from https://github.com/OSGeo/gdal/blob/dc38aa64d779ecc45e3cd15b1817b83216cf96b8/gdal/frmts/gtiff/cogdriver.cpp#L272-L305
for zoom_level in range(min_z, max_z + 1):
matrix_res = self.matrix(zoom_level).cellSize
if res > matrix_res or abs(res - matrix_res) / matrix_res <= 1e-8:
break
if zoom_level > 0 and abs(res - matrix_res) / matrix_res > 1e-8:
if zoom_level_strategy.lower() == "lower":
zoom_level = max(zoom_level - 1, min_z)
elif zoom_level_strategy.lower() == "upper":
zoom_level = min(zoom_level, max_z)
elif zoom_level_strategy.lower() == "auto":
if (self.matrix(max(zoom_level - 1, min_z)).cellSize / res) < (
res / matrix_res
):
zoom_level = max(zoom_level - 1, min_z)
else:
raise ValueError(
f"Invalid strategy: {zoom_level_strategy}. Should be one of lower|upper|auto"
)
return zoom_level
def intersect_tms(self, bbox: BoundingBox) -> bool:
"""Check if a bounds intersects with the TMS bounds."""
tms_bounds = self.xy_bbox
return (
(bbox[0] < tms_bounds[2])
and (bbox[2] > tms_bounds[0])
and (bbox[3] > tms_bounds[1])
and (bbox[1] < tms_bounds[3])
)
def lnglat(self, x: float, y: float, truncate: bool = False) -> Coords:
"""Transform point(x,y) to geographic longitude and latitude."""
inside = point_in_bbox(Coords(x, y), self.xy_bbox)
if not inside:
warnings.warn(
f"Point ({x}, {y}) is outside TMS bounds {list(self.xy_bbox)}.",
PointOutsideTMSBounds,
stacklevel=1,
)
lng, lat = self._to_geographic.transform(x, y)
if truncate:
lng, lat = truncate_coordinates(lng, lat, self.bbox)
return Coords(lng, lat)
def xy(self, lng: float, lat: float, truncate: bool = False) -> Coords:
"""Transform geographic longitude and latitude coordinates to TMS CRS."""
if truncate:
lng, lat = truncate_coordinates(lng, lat, self.bbox)
inside = point_in_bbox(Coords(lng, lat), self.bbox)
if not inside:
warnings.warn(
f"Point ({lng}, {lat}) is outside TMS bounds {list(self.bbox)}.",
PointOutsideTMSBounds,
stacklevel=1,
)
x, y = self._from_geographic.transform(lng, lat)
return Coords(x, y)
def _tile(
self,
xcoord: float,
ycoord: float,
zoom: int,
ignore_coalescence: bool = True,
) -> Tile:
"""
Get the tile containing a Point (in TMS CRS).
Parameters
----------
xcoord, ycoord : float
A `X` and `Y` pair in TMS coordinate reference system.
zoom : int
The zoom level.
Returns
-------
Tile
"""
matrix = self.matrix(zoom)
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | true |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/morecantile/commons.py | morecantile/commons.py | """Morecantile commons."""
from typing import NamedTuple
class BoundingBox(NamedTuple):
"""A xmin,ymin,xmax,ymax coordinates tuple.
Args:
left (number): min horizontal coordinate.
bottom (number):min vertical coordinate.
right (number): max horizontal coordinate.
top (number): max vertical coordinate.
Examples:
>>> BoundingBox(-180.0, -90.0, 180.0, 90.0)
"""
left: float
bottom: float
right: float
top: float
class Coords(NamedTuple):
"""A x,y Coordinates pair.
Args:
x (number): horizontal coordinate input projection unit.
y (number): vertical coordinate input projection unit.
Examples:
>>> Coords(-90.3, 10.5)
"""
x: float
y: float
class Tile(NamedTuple):
"""TileMatrixSet X,Y,Z tile indices.
Args:
x (int): horizontal index.
y (int): verctical index.
z (int): zoom level.
Examples:
>>> Tile(0, 0, 0)
"""
x: int
y: int
z: int
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/morecantile/utils.py | morecantile/utils.py | """morecantile utils."""
import math
from pyproj import CRS
from pyproj.enums import WktVersion
from morecantile.commons import BoundingBox, Coords, Tile
from morecantile.errors import TileArgParsingError
def _parse_tile_arg(*args) -> Tile:
"""
Parse the *tile arg of module functions
Copy from https://github.com/mapbox/mercantile/blob/master/mercantile/__init__.py
Parameters
----------
tile : Tile or sequence of int
May be be either an instance of Tile or 3 ints, X, Y, Z.
Returns
-------
Tile
Raises
------
TileArgParsingError
"""
if len(args) == 1:
args = args[0]
if len(args) == 3:
return Tile(*args)
else:
raise TileArgParsingError(
"the tile argument may have 1 or 3 values. Note that zoom is a keyword-only argument"
)
def lons_contain_antimeridian(lon1: float, lon2: float) -> bool:
"""
Check if the antimeridian (180th meridian) is between two longitude points
Parameters
----------
lon1: float
The first longitude.
lon2: float
The second longitude
Returns
-------
A bool representing whether two longs contain the 180th meridian.
"""
lon1_clipped = max(-180.0, min(lon1, 180))
lon2_clipped = max(-180.0, min(lon2, 180))
lon1_converted = (lon1_clipped + 360) % 360
lon2_converted = (lon2_clipped + 360) % 360
ws = [lon1_converted, lon2_converted]
sorted(ws)
return ws[0] < 180 < ws[1]
def meters_per_unit(crs: CRS) -> float:
"""
Coefficient to convert the coordinate reference system (CRS)
units into meters (metersPerUnit).
From note g in http://docs.opengeospatial.org/is/17-083r2/17-083r2.html#table_2:
If the CRS uses meters as units of measure for the horizontal dimensions,
then metersPerUnit=1; if it has degrees, then metersPerUnit=2pa/360
(a is the Earth maximum radius of the ellipsoid).
"""
unit_factors = {
"metre": 1.0,
"degree": 2 * math.pi * crs.ellipsoid.semi_major_metre / 360.0,
"foot": 0.3048,
"US survey foot": 0.30480060960121924,
}
unit_name = crs.axis_info[0].unit_name
try:
return unit_factors[unit_name]
except KeyError as e:
raise Exception(
f"CRS {crs} with Unit Name `{unit_name}` is not supported, please fill an issue in developmentseed/morecantile"
) from e
def bbox_to_feature(west: float, south: float, east: float, north: float) -> dict:
"""Create a GeoJSON feature from a bbox."""
return {
"type": "Polygon",
"coordinates": [
[[west, south], [west, north], [east, north], [east, south], [west, south]]
],
}
def point_in_bbox(point: Coords, bbox: BoundingBox, precision: int = 5) -> bool:
"""Check if a point is in a bounding box."""
return (
round(point.x, precision) >= round(bbox.left, precision)
and round(point.x, precision) <= round(bbox.right, precision)
and round(point.y, precision) >= round(bbox.bottom, precision)
and round(point.y, precision) <= round(bbox.top, precision)
)
def truncate_coordinates(
lng: float, lat: float, bbox: BoundingBox
) -> tuple[float, float]:
"""
Truncate coordinates to a given bbox.
Adapted from https://github.com/mapbox/mercantile/blob/master/mercantile/__init__.py
"""
if lng > bbox.right:
lng = bbox.right
elif lng < bbox.left:
lng = bbox.left
if lat > bbox.top:
lat = bbox.top
elif lat < bbox.bottom:
lat = bbox.bottom
return lng, lat
def is_power_of_two(number: int) -> bool:
"""Check if a number is a power of 2"""
return (number & (number - 1) == 0) and number != 0
def check_quadkey_support(tms: list) -> bool:
"""Check if a Tile Matrix Set supports quadkeys"""
return all(
(t.matrixWidth == t.matrixHeight)
and is_power_of_two(t.matrixWidth)
and ((t.matrixWidth * 2) == tms[i + 1].matrixWidth)
for i, t in enumerate(tms[:-1])
)
def to_rasterio_crs(incrs: CRS):
"""Convert a pyproj CRS to a rasterio CRS"""
from rasterio import crs
from rasterio.env import GDALVersion
if GDALVersion.runtime().major < 3:
return crs.CRS.from_wkt(incrs.to_wkt(WktVersion.WKT1_GDAL))
else:
return crs.CRS.from_wkt(incrs.to_wkt())
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/morecantile/errors.py | morecantile/errors.py | """Morecantile errors."""
class MorecantileError(Exception):
"""Base error for Morecantile."""
class InvalidIdentifier(MorecantileError):
"""Invalid TileMatrixSet indentifier."""
class InvalidLatitudeError(MorecantileError):
"""Raised when math errors occur beyond ~85 degrees N or S"""
class TileArgParsingError(MorecantileError):
"""Raised when errors occur in parsing a function's tile arg(s)"""
class PointOutsideTMSBounds(UserWarning):
"""Point is outside TMS bounds."""
class NoQuadkeySupport(MorecantileError):
"""Raised when a custom TileMatrixSet doesn't support quadkeys"""
class QuadKeyError(MorecantileError):
"""Raised when errors occur in computing or parsing quad keys"""
class InvalidZoomError(MorecantileError):
"""Raised when input zoom is invalid."""
class DeprecationError(MorecantileError):
"""Raised when TMS version is not 2.0"""
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/morecantile/__init__.py | morecantile/__init__.py | """
morecantile is an adaptation of mapbox/mercantile to work with custom projection.
Refs:
- mapproxy: https://github.com/mapproxy/mapproxy
- mercantile: https://github.com/mapbox/mercantile
- tiletanic: https://github.com/DigitalGlobe/tiletanic
"""
__version__ = "7.0.1"
from .commons import BoundingBox, Coords, Tile # noqa
from .defaults import TileMatrixSets, tms # noqa
from .models import TileMatrixSet # noqa
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/morecantile/scripts/cli.py | morecantile/scripts/cli.py | """Morecantile command line interface"""
import json
import logging
import sys
import click
from pyproj import CRS
import morecantile
logger = logging.getLogger(__name__)
WGS84_CRS = CRS.from_epsg(4326)
def configure_logging(verbosity):
"""Configure log verbosity.
Original code from https://github.com/mapbox/mercantile/blob/71bb3dbdaeb4ccf0e14bfabf1f58d36465cd5289/mercantile/scripts/__init__.py#L13-L26
License: BSD-3 Original work Copyright 2021 Mapbox
"""
log_level = max(10, 30 - 10 * verbosity)
logging.basicConfig(stream=sys.stderr, level=log_level)
def normalize_input(input):
"""Normalize file or string input.
Original code from https://github.com/mapbox/mercantile/blob/71bb3dbdaeb4ccf0e14bfabf1f58d36465cd5289/mercantile/scripts/__init__.py#L34-L40
License: BSD-3 Original work Copyright 2021 Mapbox
"""
try:
src = click.open_file(input).readlines()
except IOError:
src = [input]
return src
def iter_lines(lines):
"""Iterate over lines of input, stripping and skipping.
Original code from https://github.com/mapbox/mercantile/blob/71bb3dbdaeb4ccf0e14bfabf1f58d36465cd5289/mercantile/scripts/__init__.py#L43-L48
License: BSD-3 Original work Copyright 2021 Mapbox
"""
for line in lines:
line = line.strip()
if line:
yield line
def normalize_source(input):
"""Yield features from GeoJSON source."""
src = iter(normalize_input(input))
first_line = next(src)
# If input is RS-delimited JSON sequence.
if first_line.startswith("\x1e"):
def feature_gen():
buffer = first_line.strip("\x1e")
for line in src:
if line.startswith("\x1e"):
if buffer:
yield json.loads(buffer)
buffer = line.strip("\x1e")
else:
buffer += line
else:
yield json.loads(buffer)
else:
def feature_gen():
yield json.loads(first_line)
for line in src:
yield json.loads(line)
return feature_gen()
def coords(obj):
"""Yield all coordinate coordinate tuples from a geometry or feature.
From python-geojson package.
Original code from https://github.com/mapbox/rasterio/blob/3910956d6cfadd55ea085dd60790246c167967cd/rasterio/rio/helpers.py
License: Copyright (c) 2013, MapBox
"""
if isinstance(obj, (tuple, list)):
coordinates = obj
elif "geometry" in obj:
coordinates = obj["geometry"]["coordinates"]
else:
coordinates = obj.get("coordinates", obj)
for e in coordinates:
if isinstance(e, (float, int)):
yield tuple(coordinates)
break
else:
for f in coords(e):
yield f
# The CLI command group.
@click.group(help="Command line interface for the Morecantile Python package.")
@click.option("--verbose", "-v", count=True, help="Increase verbosity.")
@click.option("--quiet", "-q", count=True, help="Decrease verbosity.")
@click.version_option(version=morecantile.__version__, message="%(version)s")
@click.pass_context
def cli(ctx, verbose, quiet):
"""Execute the main morecantile command"""
verbosity = verbose - quiet
configure_logging(verbosity)
ctx.obj = {}
ctx.obj["verbosity"] = verbosity
################################################################################
# The shapes command.
@cli.command(short_help="Print the shapes of tiles as GeoJSON.")
# This input is either a filename, stdin, or a string.
@click.argument("input", default="-", required=False)
@click.option(
"--identifier",
type=click.Choice(morecantile.tms.list()),
default="WebMercatorQuad",
help="TileMatrixSet identifier.",
)
# Coordinate precision option.
@click.option(
"--precision", type=int, default=None, help="Decimal precision of coordinates."
)
# JSON formatting options.
@click.option(
"--indent", default=None, type=int, help="Indentation level for JSON output"
)
@click.option(
"--compact/--no-compact", default=False, help="Use compact separators (',', ':')."
)
@click.option(
"--projected/--geographic",
"projected",
default=False,
help="Output coordinate system",
)
@click.option(
"--seq",
is_flag=True,
default=False,
help="Write a RS-delimited JSON sequence (default is LF).",
)
# GeoJSON feature (default) or collection switch. Meaningful only
# when --x-json-seq is used.
@click.option(
"--feature",
"output_mode",
flag_value="feature",
default=True,
help="Output as sequence of GeoJSON features (the default).",
)
@click.option(
"--bbox",
"output_mode",
flag_value="bbox",
help="Output as sequence of GeoJSON bbox arrays.",
)
@click.option(
"--collect",
is_flag=True,
default=False,
help="Output as a GeoJSON feature collections.",
)
# Optionally write out bboxen in a form that goes
# straight into GDAL utilities like gdalwarp.
@click.option(
"--extents/--no-extents",
default=False,
help="Write shape extents as ws-separated strings (default is " "False).",
)
# Optionally buffer the shapes by shifting the x and y values of each
# vertex by a constant number of decimal degrees or meters (depending
# on whether --geographic or --mercator is in effect).
@click.option(
"--buffer",
type=float,
default=None,
help="Shift shape x and y values by a constant number",
)
@click.option(
"--tms",
help="Path to TileMatrixSet JSON file.",
type=click.Path(),
)
@click.option(
"--crs",
help="Geographic CRS. Default to WGS84.",
type=str,
)
@click.pass_context
def shapes( # noqa: C901
ctx,
input,
identifier,
precision,
indent,
compact,
projected,
seq,
output_mode,
collect,
extents,
buffer,
tms,
crs,
):
"""
Reads one or more Web Mercator tile descriptions
from stdin and writes either a GeoJSON feature collection (the
default) or a JSON sequence of GeoJSON features/collections to
stdout.
Input may be a compact newline-delimited sequences of JSON or
a pretty-printed ASCII RS-delimited sequence of JSON (like
https://tools.ietf.org/html/rfc8142 and
https://tools.ietf.org/html/rfc7159).
Tile descriptions may be either an [x, y, z] array or a JSON
object of the form {"tile": [x, y, z], "properties": {"name": "foo", ...}}
In the latter case, the properties object will be used to update
the properties object of the output feature.
"""
tilematrixset = morecantile.tms.get(identifier)
if tms:
with open(tms, "r") as f:
tilematrixset = morecantile.TileMatrixSet(**json.load(f))
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
src = normalize_input(input)
features = []
col_xs = []
col_ys = []
for _i, line in enumerate(iter_lines(src)):
obj = json.loads(line)
if isinstance(obj, dict):
x, y, z = obj["tile"][:3]
props = obj.get("properties")
fid = obj.get("id")
elif isinstance(obj, list):
x, y, z = obj[:3]
props = {}
fid = None
else:
raise click.BadParameter("{0}".format(obj), param=input, param_hint="input")
feature = tilematrixset.feature(
(x, y, z),
fid=fid,
props=props,
projected=projected,
buffer=buffer,
precision=precision,
geographic_crs=CRS.from_user_input(crs) if crs else WGS84_CRS,
)
bbox = feature["bbox"]
w, s, e, n = bbox
col_xs.extend([w, e])
col_ys.extend([s, n])
if collect:
features.append(feature)
elif extents:
click.echo(" ".join(map(str, bbox)))
else:
if seq:
click.echo("\x1e")
if output_mode == "bbox":
click.echo(json.dumps(bbox, **dump_kwds))
elif output_mode == "feature":
click.echo(json.dumps(feature, **dump_kwds))
if collect and features:
bbox = [min(col_xs), min(col_ys), max(col_xs), max(col_ys)]
click.echo(
json.dumps(
{"type": "FeatureCollection", "bbox": bbox, "features": features},
**dump_kwds,
)
)
################################################################################
# The tiles command.
@cli.command(
short_help=(
"Print tiles that overlap or contain a lng/lat point, "
"bounding box, or GeoJSON objects."
)
)
# Mandatory Mercator zoom level argument.
@click.argument("zoom", type=int, default=-1)
# This input is either a filename, stdin, or a string.
# Has to follow the zoom arg.
@click.argument("input", default="-", required=False)
@click.option(
"--identifier",
type=click.Choice(morecantile.tms.list()),
default="WebMercatorQuad",
help="TileMatrixSet identifier.",
)
@click.option(
"--seq/--lf",
default=False,
help="Write a RS-delimited JSON sequence (default is LF).",
)
@click.option(
"--tms",
help="Path to TileMatrixSet JSON file.",
type=click.Path(),
)
@click.pass_context
def tiles(ctx, zoom, input, identifier, seq, tms): # noqa: C901
"""
Lists TMS tiles at ZOOM level intersecting
GeoJSON [west, south, east, north] bounding boxen, features, or
collections read from stdin. Output is a JSON
[x, y, z] array.
Input may be a compact newline-delimited sequences of JSON or
a pretty-printed ASCII RS-delimited sequence of JSON (like
https://tools.ietf.org/html/rfc8142 and
https://tools.ietf.org/html/rfc7159).
Example:
$ echo "[-105.05, 39.95, -105, 40]" | morecantiles tiles 12
Output:
[852, 1550, 12]
[852, 1551, 12]
[853, 1550, 12]
[853, 1551, 12]
"""
tilematrixset = morecantile.tms.get(identifier)
if tms:
with open(tms, "r") as f:
tilematrixset = morecantile.TileMatrixSet(**json.load(f))
for obj in normalize_source(input):
if isinstance(obj, list):
bbox = obj
if len(bbox) == 2:
bbox += bbox
if len(bbox) != 4:
raise click.BadParameter(
"{0}".format(bbox), param=input, param_hint="input"
)
elif isinstance(obj, dict):
if "bbox" in obj:
bbox = obj["bbox"]
else:
box_xs = []
box_ys = []
for feat in obj.get("features", [obj]):
lngs, lats = zip(*list(coords(feat)))
box_xs.extend([min(lngs), max(lngs)])
box_ys.extend([min(lats), max(lats)])
bbox = min(box_xs), min(box_ys), max(box_xs), max(box_ys)
west, south, east, north = bbox
epsilon = 1.0e-10
if east != west and north != south:
# 2D bbox
# shrink the bounds a small amount so that
# shapes/tiles round trip.
west += epsilon
south += epsilon
east -= epsilon
north -= epsilon
for tile in tilematrixset.tiles(
west, south, east, north, [zoom], truncate=False
):
vals = (tile.x, tile.y, zoom)
output = json.dumps(vals)
if seq:
click.echo("\x1e")
click.echo(output)
################################################################################
# The tms command.
@cli.command(short_help="Print TileMatrixSet JSON document.")
@click.option(
"--identifier",
type=click.Choice(morecantile.tms.list()),
help="TileMatrixSet identifier.",
required=True,
)
def tms(identifier):
"""Print TMS JSON."""
tms = morecantile.tms.get(identifier)
click.echo(tms.model_dump_json(exclude_none=True))
################################################################################
# The custom command.
@cli.command(short_help="Create Custom TileMatrixSet")
@click.option(
"--epsg",
type=int,
help="EPSG number.",
required=True,
)
@click.option(
"--extent",
type=float,
nargs=4,
help="left, bottom, right, top Bounding box of the Tile Matrix Set.",
required=True,
)
@click.option(
"--name",
type=str,
help="Identifier of the custom TMS.",
default="CustomTileMatrixSet",
)
@click.option("--minzoom", type=int, default=0, help="Minumum Zoom level.")
@click.option("--maxzoom", type=int, default=24, help="Maximum Zoom level.")
@click.option("--tile-width", type=int, default=256, help="Width of each tile.")
@click.option("--tile-height", type=int, default=256, help="Height of each tile.")
@click.option(
"--extent-epsg",
type=int,
help="EPSG number for the bounding box.",
)
@click.option(
"--title",
type=str,
help="Tile Matrix Set title.",
)
def custom(
epsg, extent, name, minzoom, maxzoom, tile_width, tile_height, extent_epsg, title
):
"""Create Custom TMS."""
tms = morecantile.TileMatrixSet.custom(
extent,
CRS.from_epsg(epsg),
id=name,
minzoom=minzoom,
maxzoom=maxzoom,
tile_width=tile_width,
tile_height=tile_height,
extent_crs=CRS.from_epsg(extent_epsg) if extent_epsg else None,
title=title or "Custom TileMatrixSet",
)
click.echo(tms.model_dump_json(exclude_none=True))
################################################################################
# The tms_to_geojson command.
@cli.command(short_help="Print TileMatrixSet MatrixSet as GeoJSON.")
@click.argument("input", type=click.File(mode="r"), default="-", required=False)
@click.option("--level", type=int, required=True, help="Zoom/Matrix level.")
# Coordinate precision option.
@click.option(
"--precision", type=int, default=None, help="Decimal precision of coordinates."
)
# JSON formatting options.
@click.option(
"--indent", default=None, type=int, help="Indentation level for JSON output"
)
@click.option(
"--compact/--no-compact", default=False, help="Use compact separators (',', ':')."
)
@click.option(
"--projected/--geographic",
"projected",
default=False,
help="Output coordinate system",
)
@click.option(
"--seq",
is_flag=True,
default=False,
help="Write a RS-delimited JSON sequence (default is LF).",
)
# GeoJSON feature (default) or collection switch. Meaningful only
# when --x-json-seq is used.
@click.option(
"--feature",
"output_mode",
flag_value="feature",
default=True,
help="Output as sequence of GeoJSON features (the default).",
)
@click.option(
"--bbox",
"output_mode",
flag_value="bbox",
help="Output as sequence of GeoJSON bbox arrays.",
)
@click.option(
"--collect",
is_flag=True,
default=False,
help="Output as a GeoJSON feature collections.",
)
# Optionally write out bboxen in a form that goes
# straight into GDAL utilities like gdalwarp.
@click.option(
"--extents/--no-extents",
default=False,
help="Write shape extents as ws-separated strings (default is " "False).",
)
# Optionally buffer the shapes by shifting the x and y values of each
# vertex by a constant number of decimal degrees or meters (depending
# on whether --geographic or --mercator is in effect).
@click.option(
"--buffer",
type=float,
default=None,
help="Shift shape x and y values by a constant number",
)
@click.option(
"--crs",
help="Geographic CRS. Default to WGS84.",
type=str,
)
def tms_to_geojson( # noqa: C901
input,
level,
precision,
indent,
compact,
projected,
seq,
output_mode,
collect,
extents,
buffer,
crs,
):
"""Print TMS document as GeoJSON."""
tms = morecantile.TileMatrixSet(**json.load(input))
matrix = tms.matrix(level)
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
features = []
col_xs = []
col_ys = []
for y in range(0, matrix.matrixHeight):
cf = (
matrix.get_coalesce_factor(y)
if matrix.variableMatrixWidths is not None
else 1
)
for x in range(0, matrix.matrixWidth):
if cf != 1 and x % cf:
continue
feature = tms.feature(
(x, y, level),
projected=projected,
buffer=buffer,
precision=precision,
geographic_crs=CRS.from_user_input(crs) if crs else WGS84_CRS,
)
bbox = feature["bbox"]
w, s, e, n = bbox
col_xs.extend([w, e])
col_ys.extend([s, n])
if collect:
features.append(feature)
elif extents:
click.echo(" ".join(map(str, bbox)))
else:
if seq:
click.echo("\x1e")
if output_mode == "bbox":
click.echo(json.dumps(bbox, **dump_kwds))
elif output_mode == "feature":
click.echo(json.dumps(feature, **dump_kwds))
if collect and features:
bbox = [min(col_xs), min(col_ys), max(col_xs), max(col_ys)]
feature_collection = {
"type": "FeatureCollection",
"bbox": bbox,
"features": features,
}
click.echo(json.dumps(feature_collection, **dump_kwds))
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/morecantile/scripts/__init__.py | morecantile/scripts/__init__.py | """morecantile CLI."""
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/tests/test_morecantile.py | tests/test_morecantile.py | """Tests for morecantile."""
import math
import warnings
import mercantile
import pytest
from pyproj import CRS
import morecantile
from morecantile.errors import (
InvalidIdentifier,
InvalidZoomError,
PointOutsideTMSBounds,
)
from morecantile.utils import is_power_of_two, meters_per_unit
DEFAULT_GRID_COUNT = 13
def test_default_grids():
"""Morecantile.default_grids should return the correct list of grids."""
assert len(morecantile.tms.list()) == DEFAULT_GRID_COUNT
with pytest.raises(InvalidIdentifier):
morecantile.tms.get("ANotValidName")
def test_register():
"""Test register a new grid."""
assert len(morecantile.tms.list()) == DEFAULT_GRID_COUNT
crs = CRS.from_epsg(3031)
extent = [-948.75, -543592.47, 5817.41, -3333128.95] # From https:///epsg.io/3031
tms = morecantile.TileMatrixSet.custom(extent, crs, id="MyCustomGrid3031")
# Make sure we don't update the default tms (frozen set)
_ = morecantile.tms.register({"MyCustomGrid3031": tms})
assert len(morecantile.tms.list()) == DEFAULT_GRID_COUNT
defaults = morecantile.tms.register({"MyCustomGrid3031": tms})
assert len(defaults.list()) == DEFAULT_GRID_COUNT + 1
assert "MyCustomGrid3031" in defaults.list()
# Check it will raise an exception if TMS is already registered
with pytest.raises(InvalidIdentifier):
defaults = defaults.register({"MyCustomGrid3031": tms})
# Do not raise is overwrite=True
defaults = defaults.register({"MyCustomGrid3031": tms}, overwrite=True)
assert len(defaults.list()) == DEFAULT_GRID_COUNT + 1
# add tms in morecantile defaults (not something to do anyway)
epsg3031 = morecantile.TileMatrixSet.custom(extent, crs, id="epsg3031")
morecantile.defaults.default_tms["epsg3031"] = epsg3031
assert len(morecantile.defaults.default_tms.keys()) == DEFAULT_GRID_COUNT + 1
# make sure updating the default_tms dict has no effect on the default TileMatrixSets
assert len(morecantile.tms.list()) == DEFAULT_GRID_COUNT
# Update internal TMS dict
morecantile.tms.tilematrixsets["MyCustomGrid3031"] = tms
assert len(morecantile.tms.list()) == DEFAULT_GRID_COUNT + 1
# make sure it doesn't propagate to the default dict
assert "MyCustomGrid3031" not in morecantile.defaults.default_tms
def test_TMSproperties():
"""Test TileSchema()."""
tms = morecantile.tms.get("WebMercatorQuad")
assert tms.crs._pyproj_crs == CRS.from_epsg(3857)
assert meters_per_unit(tms.crs._pyproj_crs) == 1.0
assert tms.minzoom == 0
assert tms.maxzoom == 24
def test_tile_coordinates():
"""Test coordinates to tile index utils."""
tms = morecantile.tms.get("WebMercatorQuad")
assert tms.tile(-179, 85, 5) == (0, 0, 5)
# Check equivalence between mercantile and morecantile
# wlon, wlat = mercantile.xy(20.0, 15.0)
assert tms.tile(20.0, 15.0, 5) == mercantile.tile(20.0, 15.0, 5)
@pytest.mark.parametrize(
"args", [(486, 332, 10), [(486, 332, 10)], [morecantile.Tile(486, 332, 10)]]
)
def test_bounds(args):
"""
TileMatrixSet.bounds should return the correct coordinates.
test form https://github.com/mapbox/mercantile/blob/master/tests/test_funcs.py
"""
expected = (-9.140625, 53.12040528310657, -8.7890625, 53.33087298301705)
tms = morecantile.tms.get("WebMercatorQuad")
bbox = tms.bounds(*args)
for a, b in zip(expected, bbox):
assert round(a - b, 6) == 0
assert bbox.left == bbox[0]
assert bbox.bottom == bbox[1]
assert bbox.right == bbox[2]
assert bbox.top == bbox[3]
@pytest.mark.parametrize(
"args", [(486, 332, 10), [(486, 332, 10)], [morecantile.Tile(486, 332, 10)]]
)
def test_xy_bounds(args):
"""
TileMatrixSet.xy_bounds should return the correct coordinates.
test form https://github.com/mapbox/mercantile/blob/master/tests/test_funcs.py
"""
expected = (
-1017529.7205322663,
7005300.768279833,
-978393.962050256,
7044436.526761846,
)
tms = morecantile.tms.get("WebMercatorQuad")
bounds = tms.xy_bounds(*args)
for a, b in zip(expected, bounds):
assert round(a - b, 6) == 0
def test_ul_tile():
"""
TileMatrixSet.ul should return the correct coordinates.
test form https://github.com/mapbox/mercantile/blob/master/tests/test_funcs.py
"""
tms = morecantile.tms.get("WebMercatorQuad")
xy = tms.ul(486, 332, 10)
expected = (-9.140625, 53.33087298301705)
for a, b in zip(expected, xy):
assert round(a - b, 6) == 0
def test_projul_tile():
"""
TileMatrixSet._ul should return the correct coordinates in input projection.
test form https://github.com/mapbox/mercantile/blob/master/tests/test_funcs.py
"""
tms = morecantile.tms.get("WebMercatorQuad")
xy = tms._ul(486, 332, 10)
expected = (-1017529.7205322663, 7044436.526761846)
for a, b in zip(expected, xy):
assert round(a - b, 6) == 0
def test_projtile():
"""TileSchema._tile should return the correct tile."""
tms = morecantile.tms.get("WebMercatorQuad")
assert tms._tile(1000, 1000, 1) == morecantile.Tile(1, 0, 1)
def test_feature():
"""TileSchema.feature should create proper geojson feature."""
tms = morecantile.tms.get("WebMercatorQuad")
feat = tms.feature(morecantile.Tile(1, 0, 1))
assert feat["bbox"]
assert feat["id"]
assert feat["geometry"]
assert len(feat["properties"].keys()) == 3
feat = tms.feature(
morecantile.Tile(1, 0, 1),
buffer=-10,
precision=4,
fid="1",
props={"some": "thing"},
)
assert feat["bbox"]
assert feat["id"] == "1"
assert feat["geometry"]
assert len(feat["properties"].keys()) == 4
with pytest.warns(UserWarning):
feat = tms.feature(
morecantile.Tile(1, 0, 1), projected=True, fid="1", props={"some": "thing"}
)
assert feat["crs"]
assert feat["bbox"]
assert feat["id"] == "1"
assert feat["geometry"]
assert len(feat["properties"].keys()) == 4
# These extent coordinates are in EPSG:2056 (CH)
custom_tms = morecantile.TileMatrixSet.custom(
[2696082.04374708, 1289407.53195196, 2696210.04374708, 1289535.53195196],
CRS.from_epsg("2056"),
)
assert custom_tms.geographic_crs != CRS.from_epsg(4326)
# Warn when geographic CRS is not WGS84
with pytest.warns(UserWarning):
feat = custom_tms.feature(
morecantile.Tile(1, 0, 1),
projected=False,
geographic_crs=custom_tms.geographic_crs,
)
assert feat["crs"]
# By default we use WGS84 CRS (as per GeoJSON spec)
with warnings.catch_warnings():
warnings.simplefilter("error")
feat = custom_tms.feature(
morecantile.Tile(1, 0, 1),
projected=False,
)
assert not feat.get("crs")
################################################################################
# replicate mercantile tests
# https://github.com/mapbox/mercantile/blob/master/tests/test_funcs.py
@pytest.mark.parametrize(
"args", [(486, 332, 10), [(486, 332, 10)], [morecantile.Tile(486, 332, 10)]]
)
def test_ul(args):
"""test args."""
tms = morecantile.tms.get("WebMercatorQuad")
expected = (-9.140625, 53.33087298301705)
lnglat = tms.ul(*args)
for a, b in zip(expected, lnglat):
assert round(a - b, 6) == 0
assert lnglat[0] == lnglat.x
assert lnglat[1] == lnglat.y
@pytest.mark.parametrize(
"args", [(486, 332, 10), [(486, 332, 10)], [mercantile.Tile(486, 332, 10)]]
)
def test_bbox(args):
"""test bbox."""
tms = morecantile.tms.get("WebMercatorQuad")
expected = (-9.140625, 53.12040528310657, -8.7890625, 53.33087298301705)
bbox = tms.bounds(*args)
for a, b in zip(expected, bbox):
assert round(a - b, 6) == 0
assert bbox.left == bbox[0]
assert bbox.bottom == bbox[1]
assert bbox.right == bbox[2]
assert bbox.top == bbox[3]
def test_xy_tile():
"""x, y for the 486-332-10 tile is correctly calculated."""
tms = morecantile.tms.get("WebMercatorQuad")
ul = tms.ul(486, 332, 10)
xy = tms.xy(*ul)
expected = (-1017529.7205322663, 7044436.526761846)
for a, b in zip(expected, xy):
assert round(a - b, 6) == 0
def test_xy_null_island():
"""x, y for (0, 0) is correctly calculated"""
tms = morecantile.tms.get("WebMercatorQuad")
xy = tms.xy(0.0, 0.0)
expected = (0.0, 0.0)
for a, b in zip(expected, xy):
assert round(a - b, 6) == 0
@pytest.mark.xfail
def test_xy_south_pole():
"""Return -inf for y at South Pole
Note: mercantile returns (0.0, inf)
"""
tms = morecantile.tms.get("WebMercatorQuad")
with pytest.warns(PointOutsideTMSBounds):
xy = tms.xy(0.0, -90)
assert xy.x == 0.0
assert xy.y == float("inf")
@pytest.mark.xfail
def test_xy_north_pole():
"""Return inf for y at North Pole.
Note: mercantile returns (0.0, -inf)
"""
tms = morecantile.tms.get("WebMercatorQuad")
with pytest.warns(PointOutsideTMSBounds):
xy = tms.xy(0.0, 90)
assert xy.x == 0.0
assert xy.y == float("inf")
def test_xy_truncate():
"""Input is truncated"""
tms = morecantile.tms.get("WebMercatorQuad")
assert tms.xy(-181.0, 0.0, truncate=True) == tms.xy(tms.bbox.left, 0.0)
@pytest.mark.xfail
def test_lnglat():
"""test lnglat."""
tms = morecantile.tms.get("WebMercatorQuad")
# Make sure not warning is raised
with warnings.catch_warnings():
warnings.simplefilter("error")
xy = (-8366731.739810849, -1655181.9927159143)
lnglat = tms.lnglat(*xy)
assert round(lnglat.x, 5) == -75.15963
assert round(lnglat.y, 5) == -14.70462
with pytest.warns(PointOutsideTMSBounds):
xy = (-28366731.739810849, -1655181.9927159143)
lnglat = tms.lnglat(*xy, truncate=True)
assert round(lnglat.x, 5) == -180.0 # in Mercantile (105.17731 in Morecantile)
assert round(lnglat.y, 5) == -14.70462 # in Mercantile
@pytest.mark.parametrize("tms_name", morecantile.tms.list())
def test_axis_inverted(tms_name):
"""Test axis inversion check"""
tms = morecantile.tms.get(tms_name)
if tms.orderedAxes:
assert morecantile.models.crs_axis_inverted(
tms.crs._pyproj_crs
) == morecantile.models.ordered_axis_inverted(tms.orderedAxes)
def test_lnglat_gdal3():
"""test lnglat."""
# PROJ>=7 returns (105.17731317609572, -14.704620000000013)
tms = morecantile.tms.get("WebMercatorQuad")
with pytest.warns(PointOutsideTMSBounds):
xy = (-28366731.739810849, -1655181.9927159143)
lnglat = tms.lnglat(*xy, truncate=True)
assert round(lnglat.x, 5) == 105.17731
assert round(lnglat.y, 5) == -14.70462
def test_lnglat_xy_roundtrip():
"""Test roundtrip."""
tms = morecantile.tms.get("WebMercatorQuad")
lnglat = (-105.0844, 40.5853)
roundtrip = tms.lnglat(*tms.xy(*lnglat))
for a, b in zip(roundtrip, lnglat):
assert round(a - b, 6) == 0
@pytest.mark.parametrize(
"args", [(486, 332, 10), [(486, 332, 10)], [mercantile.Tile(486, 332, 10)]]
)
def test_xy_bounds_mercantile(args):
"""test xy_bounds."""
tms = morecantile.tms.get("WebMercatorQuad")
expected = (
-1017529.7205322663,
7005300.768279833,
-978393.962050256,
7044436.526761846,
)
bounds = tms.xy_bounds(*args)
for a, b in zip(expected, bounds):
assert round(a - b, 6) == 0
def test_tile_not_truncated():
"""test tile."""
tms = morecantile.tms.get("WebMercatorQuad")
tile = tms.tile(20.6852, 40.1222, 9)
expected = (285, 193)
assert tile[0] == expected[0]
assert tile[1] == expected[1]
def test_tile_truncate():
"""Input is truncated"""
tms = morecantile.tms.get("WebMercatorQuad")
assert tms.tile(-181.0, 0.0, 9, truncate=True) == tms.tile(-180.0, 0.0, 9)
def test_tiles():
"""Test tiles from bbox."""
tms = morecantile.tms.get("WebMercatorQuad")
# replicate mercantile tests
# https://github.com/mapbox/mercantile/blob/master/tests/test_funcs.py#L115-L178
bounds = (-105, 39.99, -104.99, 40)
tiles = list(tms.tiles(*bounds, zooms=[14]))
expect = [
morecantile.Tile(x=3413, y=6202, z=14),
morecantile.Tile(x=3413, y=6203, z=14),
]
assert sorted(tiles) == sorted(expect)
# Single zoom
bounds = (-105, 39.99, -104.99, 40)
tiles = list(tms.tiles(*bounds, zooms=14))
expect = [
morecantile.Tile(x=3413, y=6202, z=14),
morecantile.Tile(x=3413, y=6203, z=14),
]
assert sorted(tiles) == sorted(expect)
# Input is truncated
assert list(tms.tiles(-181.0, 0.0, -170.0, 10.0, zooms=[2], truncate=True)) == list(
tms.tiles(-180.0, 0.0, -170.0, 10.0, zooms=[2])
)
assert list(tms.tiles(-180.0, -90.0, 180.0, 90.0, zooms=[0])) == [
morecantile.Tile(x=0, y=0, z=0)
]
assert list(tms.tiles(-180.0, -90.0, 180.0, 90.0, zooms=[0], truncate=True)) == [
morecantile.Tile(x=0, y=0, z=0)
]
# Antimeridian-crossing bounding boxes are handled
bounds = (175.0, 5.0, -175.0, 10.0)
assert len(list(tms.tiles(*bounds, zooms=[2]))) == 2
@pytest.mark.parametrize(
("bounds", "expected", "crs", "tms_bbox"),
[
# case where east tms bbox crosses antimeridian
(
(119.1, -32.86, 119.2, -32.82),
6,
32750,
(100.23646734667152, -79.99407435445299, -158.6052850376368, 0.0),
),
# case where west tms bbox crosses antimeridian
(
(11.700978, 52.056474, 11.711114, 52.062706),
4,
32632,
(-17.582877658817317, 0.0, 95.87417095917766, 83.95429547980198),
),
],
)
def test_tiles_when_tms_bounds_and_provided_bounds_cross_antimeridian(
bounds: tuple, expected: int, crs: int, tms_bbox: tuple
):
utm = CRS.from_epsg(crs)
rs_extent = utm.area_of_use.bounds
tms = morecantile.TileMatrixSet.custom(
crs=utm, extent_crs=CRS.from_epsg(4326), extent=list(rs_extent)
)
# tms.tiles needs to be aware if tms bounds and input bounds crosses the
# antimeridian e.g. min(119.2, -158.605) clamps to much larger area. Now
# that we check to see if lons contain antimeridian, we build tiles that
# actually overlap the provided bounds to tiles.
assert tms.bbox == tms_bbox
for a, b in zip(tms.bbox, tms_bbox):
assert round(a - b, 6) == 0
assert len(list(tms.tiles(*bounds, zooms=11))) == expected
def test_tiles_for_tms_with_non_standard_row_col_order():
"""Test tiles from bbox when TMS has non-standard row/col alignment with lat/lon."""
crs = CRS.from_proj4(
"+proj=s2 +lat_0=0.0 +lon_0=-90.0 +ellps=WGS84 +UVtoST=quadratic"
)
extent = [0.0, 0.0, 1.0, 1.0]
s2f4 = morecantile.TileMatrixSet.custom(extent, crs, id="S2F4")
overlapping_tiles = s2f4.tiles(-100, 27, -95, 33, [6])
assert len(list(overlapping_tiles)) == 30
def test_global_tiles_clamped():
"""Y is clamped to (0, 2 ** zoom - 1)."""
tms = morecantile.tms.get("WebMercatorQuad")
tiles = list(tms.tiles(-180, -90, 180, 90, [1]))
assert len(tiles) == 4
assert min(t.y for t in tiles) == 0
assert max(t.y for t in tiles) == 1
def test_tiles_roundtrip_children():
"""tiles(bounds(tile)) gives the tile's children"""
tms = morecantile.tms.get("WebMercatorQuad")
t = morecantile.Tile(x=3413, y=6202, z=14)
res = list(tms.tiles(*tms.bounds(t), zooms=[15]))
assert len(res) == 4
@pytest.mark.parametrize(
"t",
[
morecantile.Tile(x=3413, y=6202, z=14),
morecantile.Tile(486, 332, 10),
morecantile.Tile(10, 10, 10),
],
)
def test_tiles_roundtrip(t):
"""Tiles(bounds(tile)) gives the tile."""
tms = morecantile.tms.get("WebMercatorQuad")
res = list(tms.tiles(*tms.bounds(t), zooms=[t.z]))
assert len(res) == 1
val = res.pop()
assert val.x == t.x
assert val.y == t.y
assert val.z == t.z
def test_tiles_nan_bounds():
"""
nan bounds should raise an error instead of getting clamped to avoid
unintentionally generating tiles for the entire TMS' extent.
"""
tms = morecantile.tms.get("WebMercatorQuad")
bounds = (-105, math.nan, -104.99, 40)
with pytest.raises(ValueError):
list(tms.tiles(*bounds, zooms=[14]))
def test_extend_zoom():
"""TileMatrixSet.ul should return the correct coordinates."""
tms = morecantile.tms.get("WebMercatorQuad")
merc = mercantile.xy_bounds(1000, 1000, 25)
with pytest.warns(UserWarning):
more = tms.xy_bounds(1000, 1000, 25)
for a, b in zip(more, merc):
assert round(a - b, 6) == 0
merc = mercantile.xy_bounds(2000, 2000, 26)
with pytest.warns(UserWarning):
more = tms.xy_bounds(2000, 2000, 26)
for a, b in zip(more, merc):
assert round(a - b, 6) == 0
merc = mercantile.xy_bounds(2000, 2000, 27)
with pytest.warns(UserWarning):
more = tms.xy_bounds(2000, 2000, 27)
for a, b in zip(more, merc):
assert round(a - b, 6) == 0
merc = mercantile.xy_bounds(2000, 2000, 30)
with pytest.warns(UserWarning):
more = tms.xy_bounds(2000, 2000, 30)
for a, b in zip(more, merc):
assert round(a - b, 6) == 0
def test_is_power_of_two():
"""is power ot 2?"""
assert is_power_of_two(8)
assert not is_power_of_two(7)
@pytest.mark.parametrize(
"t,res",
[
# X Y Z
(morecantile.Tile(0, 0, 0), True),
# zoom 0 has only tile 0,0,0 valid
(morecantile.Tile(1, 0, 0), False),
# MinZoom is 0
(morecantile.Tile(0, 0, -1), False),
# MaxZoom is 24
(morecantile.Tile(0, 0, 24), True),
(morecantile.Tile(0, 0, 25), False),
# Negative X
(morecantile.Tile(-1, 0, 1), False),
# Negative Y
(morecantile.Tile(0, -1, 1), False),
],
)
def test_is_valid_tile(t, res):
"""test if tile are valid."""
tms = morecantile.tms.get("WebMercatorQuad")
assert tms.is_valid(t) == res
def test_is_valid_overzoom():
"""test if tile are valid."""
tms = morecantile.tms.get("WebMercatorQuad")
t = morecantile.Tile(0, 0, 25)
assert tms.is_valid(t, strict=False)
assert not tms.is_valid(t, strict=True)
tms = morecantile.tms.get("GNOSISGlobalGrid")
t = morecantile.Tile(0, 0, 28)
assert tms.is_valid(t, strict=False)
t = morecantile.Tile(0, 0, 29)
assert not tms.is_valid(t, strict=False)
# We can't overzoom VariableMatrixWidth TMS
t = morecantile.Tile(0, 0, 29)
assert not tms.is_valid(t)
def test_neighbors():
"""test neighbors."""
tms = morecantile.tms.get("WebMercatorQuad")
x, y, z = 243, 166, 9
tiles = tms.neighbors(x, y, z)
assert len(tiles) == 8
assert all(t.z == z for t in tiles)
assert all(t.x - x in (-1, 0, 1) for t in tiles)
assert all(t.y - y in (-1, 0, 1) for t in tiles)
def test_neighbors_invalid():
"""test neighbors."""
tms = morecantile.tms.get("WebMercatorQuad")
x, y, z = 0, 166, 9
tiles = tms.neighbors(x, y, z)
assert len(tiles) == 8 - 3 # no top-left, left, bottom-left
assert all(t.z == z for t in tiles)
assert all(t.x - x in (-1, 0, 1) for t in tiles)
assert all(t.y - y in (-1, 0, 1) for t in tiles)
def test_root_neighbors_invalid():
"""test neighbors."""
tms = morecantile.tms.get("WebMercatorQuad")
x, y, z = 0, 0, 0
tiles = tms.neighbors(x, y, z)
assert len(tiles) == 0 # root tile has no neighbors
def test_parent():
"""test parent"""
tms = morecantile.tms.get("WebMercatorQuad")
parent = tms.parent(486, 332, 10)
assert parent[0] == morecantile.Tile(243, 166, 9)
with pytest.raises(InvalidZoomError):
tms.parent(486, 332, 10, zoom=11)
assert tms.parent(0, 0, 0) == []
def test_parent_multi():
"""test parent"""
tms = morecantile.tms.get("WebMercatorQuad")
parent = tms.parent(486, 332, 10, zoom=8)
assert parent[0] == morecantile.Tile(121, 83, 8)
def test_children():
"""test children."""
tms = morecantile.tms.get("WebMercatorQuad")
x, y, z = 243, 166, 9
children = tms.children(x, y, z)
assert len(children) == 4
assert morecantile.Tile(2 * x, 2 * y, z + 1) in children
assert morecantile.Tile(2 * x + 1, 2 * y, z + 1) in children
assert morecantile.Tile(2 * x + 1, 2 * y + 1, z + 1) in children
assert morecantile.Tile(2 * x, 2 * y + 1, z + 1) in children
def test_children_multi():
"""test children multizoom."""
tms = morecantile.tms.get("WebMercatorQuad")
children = tms.children(243, 166, 9, zoom=11)
assert len(children) == 16
targets = [
(972, 664, 11),
(973, 664, 11),
(973, 665, 11),
(972, 665, 11),
(974, 664, 11),
(975, 664, 11),
(975, 665, 11),
(974, 665, 11),
(974, 666, 11),
(975, 666, 11),
(975, 667, 11),
(974, 667, 11),
(972, 666, 11),
(973, 666, 11),
(973, 667, 11),
(972, 667, 11),
]
for target in targets:
assert target in children
def test_children_invalid_zoom():
"""invalid zoom."""
tms = morecantile.tms.get("WebMercatorQuad")
with pytest.raises(InvalidZoomError):
tms.children(243, 166, 9, zoom=8)
with pytest.raises(InvalidZoomError):
tms.children((243, 166, 9), zoom=8)
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/tests/test_utils.py | tests/test_utils.py | """test morecantile utils."""
import math
import pytest
from pyproj import CRS
from morecantile import utils
@pytest.mark.parametrize(
"crs,unit",
[
(CRS.from_epsg(4326), 2 * math.pi * 6378137 / 360.0),
(CRS.from_epsg(3857), 1.0),
(CRS.from_epsg(2276), 0.30480060960121924),
(CRS.from_epsg(2222), 0.3048),
# Mars in Meter
(
CRS.from_proj4(
"+proj=tmerc +lat_0=17 +lon_0=76.5 +k=0.9996 +x_0=0 +y_0=0 +a=3396190 +b=3376200 +units=m +no_defs"
),
1.0,
),
# Mars in Degrees
# proj4 from https://github.com/AndrewAnnex/planetcantile/blob/5ea2577f5dc4a3bc91b0443ef0633a5f89b15e03/planetcantile/data/generate.py#L45-L47
(
CRS.from_proj4("+proj=longlat +R=3396190 +no_defs +type=crs"),
2 * math.pi * 3396190 / 360.0,
),
],
)
def test_mpu(crs, unit):
"""test meters_per_unit."""
assert utils.meters_per_unit(crs) == unit
@pytest.mark.parametrize(
"lon1, lon2, contains", [(-180, 180, False), (179, -179, True)]
)
def test_lons_contain_antimeridian(lon1: float, lon2: float, contains: bool):
assert utils.lons_contain_antimeridian(lon1, lon2) == contains
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/tests/test_mercantile_conform.py | tests/test_mercantile_conform.py | """Test Conformance with Mercantile."""
from random import sample
import mercantile
import pytest
import morecantile
tms = morecantile.tms.get("WebMercatorQuad")
@pytest.mark.parametrize("zoom", range(0, 20))
def test_get_tile(zoom: int):
"""Make sure mercantile and morecantile returns the same thing."""
tile = mercantile.tile(-10, 10, zoom=zoom)
morecantile_tile = tms.tile(-10, 10, zoom=zoom)
assert tile == morecantile_tile
@pytest.mark.parametrize("zoom", range(0, 20))
def test_bounds(zoom: int):
"""Make sure mercantile and morecantile returns the same thing."""
# get random x,y index
x = sample(range(0, tms.matrix(zoom).matrixWidth), 1)[0]
y = sample(range(0, tms.matrix(zoom).matrixHeight), 1)[0]
for a, b in zip(
mercantile.xy_bounds(x, y, zoom), tms.xy_bounds(morecantile.Tile(x, y, zoom))
):
assert round(a - b, 6) == 0
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/tests/test_cli.py | tests/test_cli.py | """Tests of the morecantile CLI"""
import json
import pytest
from click.testing import CliRunner
from morecantile.scripts.cli import cli
def test_cli_shapes():
"""
Test shapes.
From https://github.com/mapbox/mercantile/blob/master/tests/test_cli.py
"""
runner = CliRunner()
result = runner.invoke(cli, ["shapes", "--precision", "6"], "[106, 193, 9]")
assert result.exit_code == 0
assert (
result.output
== '{"bbox": [-105.46875, 39.909736, -104.765625, 40.446947], "geometry": {"coordinates": [[[-105.46875, 39.909736], [-105.46875, 40.446947], [-104.765625, 40.446947], [-104.765625, 39.909736], [-105.46875, 39.909736]]], "type": "Polygon"}, "id": "(106, 193, 9)", "properties": {"title": "XYZ tile (106, 193, 9)", "tms": "WebMercatorQuad", "tms_crs": "http://www.opengis.net/def/crs/EPSG/0/3857"}, "type": "Feature"}\n'
)
result = runner.invoke(
cli, ["shapes", "--precision", "6", "--geographic"], "[106, 193, 9]"
)
assert result.exit_code == 0
assert (
result.output
== '{"bbox": [-105.46875, 39.909736, -104.765625, 40.446947], "geometry": {"coordinates": [[[-105.46875, 39.909736], [-105.46875, 40.446947], [-104.765625, 40.446947], [-104.765625, 39.909736], [-105.46875, 39.909736]]], "type": "Polygon"}, "id": "(106, 193, 9)", "properties": {"title": "XYZ tile (106, 193, 9)", "tms": "WebMercatorQuad", "tms_crs": "http://www.opengis.net/def/crs/EPSG/0/3857"}, "type": "Feature"}\n'
)
# With TMS's CRS
with pytest.warns(UserWarning):
result = runner.invoke(
cli, ["shapes", "--precision", "6", "--projected"], "[106, 193, 9]"
)
assert result.exit_code == 0
feature = json.loads(result.output)
assert feature["crs"]
# geographic CRS (non WGS84)
with pytest.warns(UserWarning):
result = runner.invoke(
cli, ["shapes", "--precision", "6", "--crs", "epsg:4150"], "[106, 193, 9]"
)
assert result.exit_code == 0
feature = json.loads(result.output)
assert feature["crs"]
# tile as arg
result = runner.invoke(cli, ["shapes", "[106, 193, 9]", "--precision", "6"])
assert result.exit_code == 0
assert (
result.output
== '{"bbox": [-105.46875, 39.909736, -104.765625, 40.446947], "geometry": {"coordinates": [[[-105.46875, 39.909736], [-105.46875, 40.446947], [-104.765625, 40.446947], [-104.765625, 39.909736], [-105.46875, 39.909736]]], "type": "Polygon"}, "id": "(106, 193, 9)", "properties": {"title": "XYZ tile (106, 193, 9)", "tms": "WebMercatorQuad", "tms_crs": "http://www.opengis.net/def/crs/EPSG/0/3857"}, "type": "Feature"}\n'
)
# buffer
result = runner.invoke(
cli, ["shapes", "[106, 193, 9]", "--buffer", "1.0", "--precision", "6"]
)
assert result.exit_code == 0
assert (
result.output
== '{"bbox": [-106.46875, 38.909736, -103.765625, 41.446947], "geometry": {"coordinates": [[[-106.46875, 38.909736], [-106.46875, 41.446947], [-103.765625, 41.446947], [-103.765625, 38.909736], [-106.46875, 38.909736]]], "type": "Polygon"}, "id": "(106, 193, 9)", "properties": {"title": "XYZ tile (106, 193, 9)", "tms": "WebMercatorQuad", "tms_crs": "http://www.opengis.net/def/crs/EPSG/0/3857"}, "type": "Feature"}\n'
)
# Output is compact
result = runner.invoke(cli, ["shapes", "--compact"], "[106, 193, 9]")
assert result.exit_code == 0
assert '"type":"Feature"' in result.output.strip()
# Output is indented
result = runner.invoke(cli, ["shapes", "--indent", "8"], "[106, 193, 9]")
assert result.exit_code == 0
assert ' "type": "Feature"' in result.output.strip()
# Shapes are collected into a feature collection
result = runner.invoke(cli, ["shapes", "--collect", "--feature"], "[106, 193, 9]")
assert result.exit_code == 0
assert "FeatureCollection" in result.output
# geojson is in WebMercator Projection
with pytest.warns(UserWarning):
result = runner.invoke(
cli,
["shapes", "[106, 193, 9]", "--extents", "--projected", "--precision", "3"],
)
assert result.exit_code == 0
assert result.output == "-11740727.545 4852834.052 -11662456.028 4931105.569\n"
with pytest.warns(UserWarning):
# JSON text sequences of bboxes are output.
result = runner.invoke(
cli,
[
"shapes",
"[106, 193, 9]",
"--seq",
"--bbox",
"--projected",
"--precision",
"3",
],
)
assert result.exit_code == 0
assert (
result.output
== "\x1e\n[-11740727.545, 4852834.052, -11662456.028, 4931105.569]\n"
)
# shapes_props_fid
result = runner.invoke(
cli,
[
"shapes",
'{"tile": [106, 193, 9], "properties": {"title": "foo"}, "id": "42"}',
],
)
assert result.exit_code == 0
assert '"title": "foo"' in result.output
assert '"id": "42"' in result.output
def test_cli_shapesWGS84():
"""Test shapes with other projection."""
runner = CliRunner()
result = runner.invoke(
cli,
["shapes", "--precision", "6", "--identifier", "WorldMercatorWGS84Quad"],
"[106, 193, 9]",
)
assert result.exit_code == 0
assert (
result.output
== '{"bbox": [-105.46875, 40.099155, -104.765625, 40.636956], "geometry": {"coordinates": [[[-105.46875, 40.099155], [-105.46875, 40.636956], [-104.765625, 40.636956], [-104.765625, 40.099155], [-105.46875, 40.099155]]], "type": "Polygon"}, "id": "(106, 193, 9)", "properties": {"title": "XYZ tile (106, 193, 9)", "tms": "WorldMercatorWGS84Quad", "tms_crs": "http://www.opengis.net/def/crs/EPSG/0/3395"}, "type": "Feature"}\n'
)
def test_cli_tiles_ok():
"""Test tile with correct bounds."""
runner = CliRunner()
result = runner.invoke(cli, ["tiles", "14"], "[-105, 39.99, -104.99, 40]")
assert result.exit_code == 0
assert result.output == "[3413, 6202, 14]\n[3413, 6203, 14]\n"
def test_cli_tiles_bad_bounds():
"""Bounds of len 3 are bad."""
runner = CliRunner()
result = runner.invoke(cli, ["tiles", "14"], "[-105, 39.99, -104.99]")
assert result.exit_code == 2
def test_cli_tiles_multi_bounds():
"""A LF-delimited sequence can be used as input."""
runner = CliRunner()
result = runner.invoke(
cli, ["tiles", "14"], "[-105, 39.99, -104.99, 40]\n[-105, 39.99, -104.99, 40]"
)
assert result.exit_code == 0
assert len(result.output.strip().split("\n")) == 4
def test_cli_tiles_multi_bounds_seq():
"""A JSON text sequence can be used as input."""
runner = CliRunner()
result = runner.invoke(
cli,
["tiles", "14"],
"\x1e\n[-105, 39.99, -104.99, 40]\n\x1e\n[-105, 39.99, -104.99, 40]",
)
assert result.exit_code == 0
assert len(result.output.strip().split("\n")) == 4
def test_cli_tiles_implicit_stdin():
"""stdin."""
runner = CliRunner()
result = runner.invoke(cli, ["tiles", "14"], "[-105, 39.99, -104.99, 40]")
assert result.exit_code == 0
assert result.output == "[3413, 6202, 14]\n[3413, 6203, 14]\n"
def test_cli_tiles_arg():
"""tiles arg."""
runner = CliRunner()
result = runner.invoke(cli, ["tiles", "14", "[-105, 39.99, -104.99, 40]"])
assert result.exit_code == 0
assert result.output == "[3413, 6202, 14]\n[3413, 6203, 14]\n"
def test_cli_tiles_geosjon():
"""Geojson input."""
collection = '{"features": [{"geometry": {"coordinates": [[[-105.46875, 39.909736], [-105.46875, 40.446947], [-104.765625, 40.446947], [-104.765625, 39.909736], [-105.46875, 39.909736]]], "type": "Polygon"}, "id": "(106, 193, 9)", "properties": {"title": "XYZ tile (106, 193, 9)"}, "type": "Feature"}], "type": "FeatureCollection"}'
runner = CliRunner()
result = runner.invoke(cli, ["tiles", "9"], collection)
assert result.exit_code == 0
assert result.output == "[106, 193, 9]\n[106, 194, 9]\n"
def test_cli_strict_overlap_contain():
"""Input from shapes."""
runner = CliRunner()
result1 = runner.invoke(cli, ["shapes"], "[2331,1185,12]")
assert result1.exit_code == 0
result2 = runner.invoke(cli, ["tiles", "12"], result1.output)
assert result2.exit_code == 0
assert result2.output == "[2331, 1185, 12]\n"
def test_cli_tiles_seq():
"""return a sequence of tiles."""
runner = CliRunner()
result = runner.invoke(cli, ["tiles", "14", "--seq"], "[14.0859, 5.798]")
assert result.exit_code == 0
assert result.output == "\x1e\n[8833, 7927, 14]\n"
def test_cli_tiles_points():
"""Create tile from a point."""
runner = CliRunner()
result = runner.invoke(cli, ["tiles", "14"], "[14.0859, 5.798]")
assert result.exit_code == 0
assert result.output == "[8833, 7927, 14]\n"
result = runner.invoke(
cli, ["tiles", "14"], '{"type":"geometry","coordinates":[14.0859, 5.798]}'
)
assert result.exit_code == 0
assert result.output == "[8833, 7927, 14]\n"
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/tests/test_models.py | tests/test_models.py | """Test TileMatrixSet model."""
import json
import os
import random
from collections.abc import Iterable
import pyproj
import pytest
from pydantic import ValidationError
import morecantile
from morecantile.commons import Tile
from morecantile.errors import InvalidIdentifier
from morecantile.models import CRS, CRSWKT, CRSUri, TileMatrix, TileMatrixSet
data_dir = os.path.join(os.path.dirname(__file__), "../morecantile/data")
tilesets = [
os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith(".json")
]
tms_v1_dir = os.path.join(os.path.dirname(__file__), "fixtures", "v1_tms")
@pytest.mark.parametrize("tileset", tilesets)
def test_tile_matrix_set(tileset):
"""Load TileMatrixSet in models."""
# Confirm model validation is working
with open(tileset, "r") as f:
ts = TileMatrixSet.model_validate_json(f.read())
# This would fail if `crs` isn't supported by PROJ
assert isinstance(ts.crs._pyproj_crs, pyproj.CRS)
assert isinstance(ts.geographic_crs, pyproj.CRS)
assert repr(ts)
@pytest.mark.parametrize("tileset", tilesets)
def test_geographic_crs_bbox(tileset):
"""check that geographic bounds are correct."""
with open(tileset, "r") as f:
ts = TileMatrixSet.model_validate_json(f.read())
if not pyproj.CRS.from_epsg(4326) == ts.geographic_crs:
_to_geographic = pyproj.Transformer.from_crs(
ts.crs._pyproj_crs, pyproj.CRS.from_epsg(4326), always_xy=True
)
bbox = _to_geographic.transform_bounds(*ts.xy_bbox, densify_pts=21)
assert bbox == ts.bbox
def test_tile_matrix_iter():
"""Test iterator"""
tms = morecantile.tms.get("WebMercatorQuad")
assert isinstance(tms, Iterable)
for matrix in tms:
assert isinstance(matrix, TileMatrix)
def test_tile_matrix_order():
"""Test matrix order"""
tms = morecantile.tms.get("WebMercatorQuad")
matrices = tms.tileMatrices[:]
random.shuffle(matrices)
tms_ordered = TileMatrixSet(
title=tms.title,
id=tms.id,
crs=tms.crs,
tileMatrices=matrices,
)
# Confirm sort
assert [matrix.id for matrix in tms.tileMatrices] == [
matrix.id for matrix in tms_ordered.tileMatrices
]
# Confirm sort direction
assert int(tms_ordered.tileMatrices[-1].id) > int(tms_ordered.tileMatrices[0].id)
def test_invalid_tms():
"""should raise an error when tms name is not found."""
with pytest.raises(InvalidIdentifier):
morecantile.tms.get("ANotValidName")
@pytest.mark.parametrize(
"name,result",
[
("LINZAntarticaMapTilegrid", False),
("EuropeanETRS89_LAEAQuad", True),
("CanadianNAD83_LCC", False),
("UPSArcticWGS84Quad", True),
("NZTM2000Quad", True),
("UTM31WGS84Quad", False),
("UPSAntarcticWGS84Quad", True),
("WorldMercatorWGS84Quad", True),
("WGS1984Quad", False),
("WorldCRS84Quad", False),
("WebMercatorQuad", True),
("CDB1GlobalGrid", False),
("GNOSISGlobalGrid", False),
],
)
def test_quadkey_support(name, result):
"""test for Quadkey support."""
tms = morecantile.tms.get(name)
assert tms.is_quadtree == result
def test_quadkey():
"""Test tile to quadkey."""
tms = morecantile.tms.get("WebMercatorQuad")
expected = "0313102310"
assert tms.quadkey(486, 332, 10) == expected
def test_quadkey_to_tile():
"""Test quadkey to tile."""
tms = morecantile.tms.get("WebMercatorQuad")
qk = "0313102310"
expected = Tile(486, 332, 10)
assert tms.quadkey_to_tile(qk) == expected
def test_empty_quadkey_to_tile():
"""Empty qk should give tile 0,0,0."""
tms = morecantile.tms.get("WebMercatorQuad")
qk = ""
expected = Tile(0, 0, 0)
assert tms.quadkey_to_tile(qk) == expected
def test_quadkey_failure():
"""makde sure we don't support stupid quadkeys."""
tms = morecantile.tms.get("WebMercatorQuad")
with pytest.raises(morecantile.errors.QuadKeyError):
tms.quadkey_to_tile("lolwut")
def test_findMatrix():
"""Should raise an error when TileMatrix is not found."""
tms = morecantile.tms.get("WebMercatorQuad")
m = tms.matrix(0)
assert m.id == "0"
with pytest.warns(UserWarning):
tms.matrix(26)
def test_Custom():
"""Create custom TMS grid."""
tms = morecantile.tms.get("WebMercatorQuad")
# Web Mercator Extent
extent = (-20037508.3427892, -20037508.3427892, 20037508.3427892, 20037508.3427892)
custom_tms = TileMatrixSet.custom(extent, pyproj.CRS.from_epsg(3857))
assert tms.tile(20.0, 15.0, 5) == custom_tms.tile(20.0, 15.0, 5)
wmMat = tms.matrix(5)
cusMat = custom_tms.matrix(5)
assert wmMat.matrixWidth == cusMat.matrixWidth
assert wmMat.matrixHeight == cusMat.matrixHeight
assert round(wmMat.scaleDenominator, 6) == round(cusMat.scaleDenominator, 6)
assert round(wmMat.pointOfOrigin[0], 6) == round(cusMat.pointOfOrigin[0], 6)
extent = (-180.0, -85.051128779806, 180.0, 85.051128779806)
custom_tms = TileMatrixSet.custom(
extent, pyproj.CRS.from_epsg(3857), extent_crs=pyproj.CRS.from_epsg(4326)
)
assert tms.tile(20.0, 15.0, 5) == custom_tms.tile(20.0, 15.0, 5)
wmMat = tms.matrix(5)
cusMat = custom_tms.matrix(5)
assert wmMat.matrixWidth == cusMat.matrixWidth
assert wmMat.matrixHeight == cusMat.matrixHeight
assert round(wmMat.scaleDenominator, 6) == round(cusMat.scaleDenominator, 6)
assert round(wmMat.pointOfOrigin[0], 6) == round(cusMat.pointOfOrigin[0], 6)
extent = (-20037508.3427892, -20037508.3427892, 20037508.3427892, 20037508.3427892)
custom_tms = TileMatrixSet.custom(extent, pyproj.CRS.from_epsg(3857))
assert isinstance(custom_tms.geographic_crs, pyproj.CRS)
assert custom_tms.geographic_crs == pyproj.CRS.from_epsg(4326)
extent = (-20037508.3427892, -20037508.3427892, 20037508.3427892, 20037508.3427892)
custom_tms = TileMatrixSet.custom(extent, pyproj.CRS.from_epsg(3857))
assert isinstance(custom_tms.geographic_crs, pyproj.CRS)
def test_custom_tms_bounds_epsg4326():
"""Check bounds with epsg4326."""
custom_tms = TileMatrixSet.custom((-120, 30, -110, 40), pyproj.CRS.from_epsg(4326))
assert custom_tms.xy_bbox == (-120, 30, -110, 40)
assert custom_tms.bbox == (-120, 30, -110, 40)
assert custom_tms.xy_bounds(0, 0, 0) == (-120, 30, -110, 40)
assert custom_tms.bounds(0, 0, 0) == (-120, 30, -110, 40)
# When using `from_user_input`, `morecantile.models.crs_axis_inverted` should return the valid result.
def test_custom_tms_bounds_user_crs():
"""Check bounds with epsg4326."""
custom_tms = TileMatrixSet.custom(
(-120, 30, -110, 40),
pyproj.CRS.from_epsg(4326),
)
assert custom_tms.xy_bbox == (-120, 30, -110, 40)
assert custom_tms.bbox == (-120, 30, -110, 40)
assert custom_tms.xy_bounds(0, 0, 0) == (-120, 30, -110, 40)
assert custom_tms.bounds(0, 0, 0) == (-120, 30, -110, 40)
def test_custom_tms_decimation():
"""Check bounds with epsg6342 and custom decimation base."""
extent = (238170, 4334121, 377264, 4473215)
left, bottom, right, top = extent
for decimation_base in [2, 3, 4, 5]:
custom_tms = TileMatrixSet.custom(
extent,
pyproj.CRS.from_epsg(6342),
decimation_base=decimation_base,
)
if decimation_base == 2:
assert custom_tms.is_quadtree
else:
assert not custom_tms.is_quadtree
for zoom in [0, 1, 2, 3]:
tile_width = (right - left) / decimation_base**zoom
tile_height = (top - bottom) / decimation_base**zoom
expected = (left, top - tile_height, left + tile_width, top)
tile_bounds = custom_tms.xy_bounds(0, 0, zoom)
for a, b in zip(expected, tile_bounds):
assert round(a - b, 4) == 0
def test_nztm_quad_is_quad():
"""Test NZTM2000Quad."""
tms = morecantile.tms.get("NZTM2000Quad")
bound = tms.xy_bounds(morecantile.Tile(0, 0, 0))
expected = (-3260586.7284, 419435.9938, 6758167.443, 10438190.1652)
for a, b in zip(expected, bound):
assert round(a - b, 4) == 0
# NZTM2000Quad should use all the WebMercatorQuad zoom scales
def test_nztm_quad_scales():
"""Test NZTM2000Quad."""
nztm_tms = morecantile.tms.get("NZTM2000Quad")
google_tms = morecantile.tms.get("WebMercatorQuad")
for z in range(2, nztm_tms.maxzoom + 2):
assert (
round(
google_tms.matrix(z).scaleDenominator
- nztm_tms.matrix(z - 2).scaleDenominator,
4,
)
== 0
)
def test_InvertedLatLonGrids():
"""Check Inverted LatLon grids."""
tms = morecantile.tms.get("LINZAntarticaMapTilegrid")
assert tms.xy_bbox == (
-918457.73,
-22441670.269999996,
28441670.269999996,
6918457.73,
)
def test_zoom_for_res():
"""Get TMS zoom level corresponding to a specific resolution."""
tms = morecantile.tms.get("WebMercatorQuad")
# native resolution of zoom 7 is 1222.9924525628178
# native resolution of zoom 8 is 611.4962262814075
assert tms.zoom_for_res(612.0) == 8
assert tms.zoom_for_res(612.0, zoom_level_strategy="lower") == 7
assert tms.zoom_for_res(612.0, zoom_level_strategy="upper") == 8
assert tms.zoom_for_res(610.0) == 8
# native resolution of zoom 24 is 0.009330691929342784
assert tms.zoom_for_res(0.0001) == 24
# theoritical resolution of zoom 25 is 0.004665345964671392
with pytest.warns(UserWarning):
assert tms.zoom_for_res(0.0001, max_z=25) == 25
# minzoom greater than 0
crs = pyproj.CRS.from_epsg(3857)
extent = [-20026376.39, -20048966.10, 20026376.39, 20048966.10]
tms = morecantile.TileMatrixSet.custom(
extent, crs, id="MyCustomTmsEPSG3857", minzoom=6
)
assert tms.zoom_for_res(10) == 14
assert tms.zoom_for_res(5000) == 6
def test_schema():
"""Translate Model to Schema."""
tms = morecantile.tms.get("WebMercatorQuad")
assert tms.model_json_schema()
assert tms.model_dump_json(exclude_none=True)
assert tms.model_dump(exclude_none=True)
crs = pyproj.CRS.from_proj4(
"+proj=stere +lat_0=90 +lon_0=0 +k=2 +x_0=0 +y_0=0 +R=3396190 +units=m +no_defs"
)
extent = [-13584760.000, -13585240.000, 13585240.000, 13584760.000]
tms = morecantile.TileMatrixSet.custom(extent, crs, id="MarsNPolek2MOLA5k")
assert tms.model_json_schema()
assert tms.model_dump(exclude_none=True)
json_doc = json.loads(tms.model_dump_json(exclude_none=True))
assert json_doc["crs"] == "http://www.opengis.net/def/crs/IAU/2015/49930"
crs = pyproj.CRS.from_epsg(3031)
extent = [-948.75, -543592.47, 5817.41, -3333128.95] # From https:///epsg.io/3031
tms = morecantile.TileMatrixSet.custom(extent, crs, id="MyCustomTmsEPSG3031")
assert tms.model_json_schema()
assert tms.model_dump_json(exclude_none=True)
json_doc = json.loads(tms.model_dump_json(exclude_none=True))
assert json_doc["crs"] == "http://www.opengis.net/def/crs/EPSG/0/3031"
def test_mars_tms():
"""The Mars global mercator scheme should broadly align with the Earth
Web Mercator CRS, despite the different planetary radius and scale.
"""
MARS2000_SPHERE = pyproj.CRS.from_proj4("+proj=longlat +R=3396190 +no_defs")
MARS_MERCATOR = pyproj.CRS.from_proj4(
"+proj=merc +R=3396190 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +no_defs"
)
# same boundaries as Earth mercator
mars_tms = TileMatrixSet.custom(
[
-179.9999999999996,
-85.05112877980656,
179.9999999999996,
85.05112877980656,
],
MARS_MERCATOR,
extent_crs=MARS2000_SPHERE,
title="Web Mercator Mars",
)
assert mars_tms.geographic_crs == MARS2000_SPHERE
pos = (35, 40, 3)
mars_tile = mars_tms.tile(*pos)
mercator_tms = morecantile.tms.get("WebMercatorQuad")
earth_tile = mercator_tms.tile(*pos)
assert mars_tile.x == earth_tile.x
assert mars_tile.y == earth_tile.y
assert mars_tile.z == earth_tile.z == 3
_to_geographic = pyproj.Transformer.from_crs(
mars_tms.crs._pyproj_crs, MARS2000_SPHERE, always_xy=True
)
bbox = _to_geographic.transform_bounds(*mars_tms.xy_bbox, densify_pts=21)
assert bbox == mars_tms.bbox
def test_mars_local_tms():
"""Local TMS using Mars CRS"""
MARS2000_SPHERE = pyproj.CRS.from_proj4("+proj=longlat +R=3396190 +no_defs")
# A transverse mercator projection for the landing site of the Perseverance rover.
SYRTIS_TM = pyproj.CRS.from_proj4(
"+proj=tmerc +lat_0=17 +lon_0=76.5 +k=0.9996 +x_0=0 +y_0=0 +a=3396190 +b=3376200 +units=m +no_defs"
)
# 100km grid centered on 17N, 76.5E
syrtis_tms = TileMatrixSet.custom(
[-5e5, -5e5, 5e5, 5e5],
SYRTIS_TM,
title="Web Mercator Mars",
)
assert SYRTIS_TM == syrtis_tms.crs._pyproj_crs
assert syrtis_tms.geographic_crs
assert syrtis_tms.model_dump(mode="json")
center = syrtis_tms.ul(1, 1, 1)
assert round(center.x, 6) == 76.5
assert round(center.y, 6) == 17
_to_geographic = pyproj.Transformer.from_crs(
syrtis_tms.crs._pyproj_crs, MARS2000_SPHERE, always_xy=True
)
bbox = _to_geographic.transform_bounds(*syrtis_tms.xy_bbox, densify_pts=21)
assert bbox == syrtis_tms.bbox
def test_mars_tms_construction():
mars_sphere_crs = pyproj.CRS.from_user_input("IAU_2015:49900")
extent = [-180.0, -90.0, 180.0, 90.0]
mars_tms = morecantile.TileMatrixSet.custom(
extent,
crs=mars_sphere_crs,
id="MarsGeographicCRS",
matrix_scale=[2, 1],
)
assert "4326" not in mars_tms.geographic_crs.to_wkt()
assert mars_tms.xy_bbox.left == pytest.approx(-180.0)
assert mars_tms.xy_bbox.bottom == pytest.approx(-90.0)
assert mars_tms.xy_bbox.right == pytest.approx(180.0)
assert mars_tms.xy_bbox.top == pytest.approx(90.0)
def test_mars_web_mercator_long_lat():
wkt_mars_web_mercator = 'PROJCRS["Mars (2015) - Sphere XY / Pseudo-Mercator",BASEGEOGCRS["Mars (2015) - Sphere",DATUM["Mars (2015) - Sphere",ELLIPSOID["Mars (2015) - Sphere",3396190,0,LENGTHUNIT["metre",1,ID["EPSG",9001]]],ANCHOR["Viking 1 lander : 47.95137 W"]],PRIMEM["Reference Meridian",0,ANGLEUNIT["degree",0.0174532925199433,ID["EPSG",9122]]]],CONVERSION["Popular Visualisation Pseudo-Mercator",METHOD["Popular Visualisation Pseudo Mercator",ID["EPSG",1024]],PARAMETER["Latitude of natural origin",0,ANGLEUNIT["degree",0.0174532925199433],ID["EPSG",8801]],PARAMETER["Longitude of natural origin",0,ANGLEUNIT["degree",0.0174532925199433],ID["EPSG",8802]],PARAMETER["False easting",0,LENGTHUNIT["metre",1],ID["EPSG",8806]],PARAMETER["False northing",0,LENGTHUNIT["metre",1],ID["EPSG",8807]]],CS[Cartesian,2],AXIS["easting (X)",east,ORDER[1],LENGTHUNIT["metre",1,ID["EPSG",9001]]],AXIS["northing (Y)",north,ORDER[2],LENGTHUNIT["metre",1,ID["EPSG",9001]]],USAGE[SCOPE["Web mapping and visualisation."],AREA["World between 85.06 S and 85.06 N."],BBOX[-85.050511287,-180,85.050511287,180]],REMARK["Use semi-major radius as sphere radius for interoperability. Source of IAU Coordinate systems: doi:10.1007/s10569-017-9805-5"]]'
crs_mars_web_mercator = pyproj.CRS.from_wkt(wkt_mars_web_mercator)
extent_wm = [
-10669445.554195119,
-10669445.554195119,
10669445.554195119,
10669445.554195119,
]
mars_tms_wm = morecantile.TileMatrixSet.custom(
extent_wm,
crs=crs_mars_web_mercator,
id="MarsWebMercator",
)
assert "4326" not in mars_tms_wm.geographic_crs.to_wkt()
assert mars_tms_wm.bbox.left == pytest.approx(-180.0)
assert mars_tms_wm.bbox.bottom == pytest.approx(-85.0511287)
assert mars_tms_wm.bbox.right == pytest.approx(180.0)
assert mars_tms_wm.bbox.top == pytest.approx(85.0511287)
extent_wm_geog = [
-179.9999999999996,
-85.05112877980656,
179.9999999999996,
85.05112877980656,
]
mars_sphere_crs = pyproj.CRS.from_user_input("IAU_2015:49900")
mars_tms_wm_geog_ext = morecantile.TileMatrixSet.custom(
extent_wm_geog,
extent_crs=mars_sphere_crs,
crs=crs_mars_web_mercator,
id="MarsWebMercator",
)
assert mars_tms_wm_geog_ext.bbox.left == pytest.approx(-180.0)
assert mars_tms_wm_geog_ext.bbox.bottom == pytest.approx(-85.0511287)
assert mars_tms_wm_geog_ext.bbox.right == pytest.approx(180.0)
assert mars_tms_wm_geog_ext.bbox.top == pytest.approx(85.0511287)
@pytest.mark.parametrize(
"identifier, file, crs",
[
(
"UPSAntarcticWGS84Quad",
os.path.join(tms_v1_dir, "UPSAntarcticWGS84Quad.json"),
5042,
),
("CanadianNAD83_LCC", os.path.join(tms_v1_dir, "CanadianNAD83_LCC.json"), 3978),
("WebMercatorQuad", os.path.join(tms_v1_dir, "WebMercatorQuad.json"), 3857),
],
)
def test_from_v1(identifier, file, crs):
"""
Test from_v1 class method
"""
with open(file) as fp:
v1_tms = json.load(fp)
tms = TileMatrixSet.from_v1(v1_tms)
assert tms.id == identifier
assert tms.crs._pyproj_crs == pyproj.CRS.from_epsg(crs)
@pytest.mark.parametrize(
"id,result",
[
("LINZAntarticaMapTilegrid", True),
("EuropeanETRS89_LAEAQuad", True),
("CanadianNAD83_LCC", False),
("UPSArcticWGS84Quad", False),
("NZTM2000Quad", True),
("UTM31WGS84Quad", False),
("UPSAntarcticWGS84Quad", False),
("WorldMercatorWGS84Quad", False),
("WorldCRS84Quad", False),
("WGS1984Quad", True),
("WebMercatorQuad", False),
("CDB1GlobalGrid", True),
("GNOSISGlobalGrid", True),
],
)
def test_inverted_tms(id, result):
"""Make sure _invert_axis return the correct result."""
assert morecantile.tms.get(id)._invert_axis == result
@pytest.mark.parametrize(
"id,result",
[
("LINZAntarticaMapTilegrid", False),
("EuropeanETRS89_LAEAQuad", False),
("CanadianNAD83_LCC", False),
("UPSArcticWGS84Quad", False),
("NZTM2000Quad", False),
("UTM31WGS84Quad", False),
("UPSAntarcticWGS84Quad", False),
("WorldMercatorWGS84Quad", False),
("WorldCRS84Quad", False),
("WGS1984Quad", False),
("WebMercatorQuad", False),
("CDB1GlobalGrid", True),
("GNOSISGlobalGrid", True),
],
)
def test_variable_tms(id, result):
"""Make sure is_variable return the correct result."""
assert morecantile.tms.get(id).is_variable == result
@pytest.mark.parametrize(
"authority,code,result",
[
("EPSG", "4326", "EPSG/0/4326"),
("ESRI", "102001", "ESRI/0/102001"),
("IAU_2015", "49910", "IAU/2015/49910"),
("IGNF", "AMANU49", "IGNF/0/AMANU49"),
("NKG", "ETRF00", "NKG/0/ETRF00"),
("OGC", "CRS84", "OGC/0/CRS84"),
],
)
def test_crs_uris(authority, code, result):
"""Test CRS URIS."""
assert (
morecantile.models.CRS_to_uri(pyproj.CRS((authority, code)))
== f"http://www.opengis.net/def/crs/{result}"
)
@pytest.mark.parametrize("tilematrixset", morecantile.tms.list())
def test_crs_uris_for_defaults(tilematrixset):
"""Test CRS URIS."""
t = morecantile.tms.get(tilematrixset)
assert t.crs._pyproj_crs == morecantile.models.CRS_to_uri(t.crs._pyproj_crs)
def test_rasterio_crs():
"""Check rasterio CRS methods."""
_ = pytest.importorskip("rasterio")
from rasterio.crs import CRS as rioCRS
tms = morecantile.tms.get("WebMercatorQuad")
assert isinstance(tms.rasterio_crs, rioCRS)
assert isinstance(tms.rasterio_geographic_crs, rioCRS)
assert tms.rasterio_crs == rioCRS.from_epsg(3857)
assert tms.rasterio_geographic_crs == rioCRS.from_epsg(4326)
tms = morecantile.tms.get("WGS1984Quad")
assert tms.rasterio_crs == rioCRS.from_epsg(4326)
assert tms.rasterio_geographic_crs == rioCRS.from_epsg(4326)
def test_boundingbox():
"""Test boundingbox support."""
with pytest.raises(ValidationError):
TileMatrixSet(
**{
"crs": "http://www.opengis.net/def/crs/EPSG/0/3857",
"boundingBox": {
"lowerLeft": [],
"upperRight": [],
"crs": "http://www.opengis.net/def/crs/EPSG/0/3857",
"orderedAxes": ["X", "Y"],
},
"tileMatrices": [
{
"id": "0",
"scaleDenominator": 559082264.028717,
"cellSize": 156543.033928041,
"pointOfOrigin": [-20037508.342789244, 20037508.342789244],
"tileWidth": 256,
"tileHeight": 256,
"matrixWidth": 1,
"matrixHeight": 1,
},
],
}
)
assert TileMatrixSet(
**{
"crs": "http://www.opengis.net/def/crs/EPSG/0/3857",
"boundingBox": {
"lowerLeft": [-20037508.342789244, -20037508.34278919],
"upperRight": [20037508.34278919, 20037508.342789244],
},
"tileMatrices": [
{
"id": "0",
"scaleDenominator": 559082264.028717,
"cellSize": 156543.033928041,
"pointOfOrigin": [-20037508.342789244, 20037508.342789244],
"tileWidth": 256,
"tileHeight": 256,
"matrixWidth": 1,
"matrixHeight": 1,
},
],
}
)
assert TileMatrixSet(
**{
"crs": "http://www.opengis.net/def/crs/EPSG/0/3857",
"boundingBox": {
"lowerLeft": [-20037508.342789244, -20037508.34278919],
"upperRight": [20037508.34278919, 20037508.342789244],
"crs": "http://www.opengis.net/def/crs/EPSG/0/3857",
"orderedAxes": ["X", "Y"],
},
"tileMatrices": [
{
"id": "0",
"scaleDenominator": 559082264.028717,
"cellSize": 156543.033928041,
"pointOfOrigin": [-20037508.342789244, 20037508.342789244],
"tileWidth": 256,
"tileHeight": 256,
"matrixWidth": 1,
"matrixHeight": 1,
},
],
}
)
def test_private_attr():
"""Check private attr."""
tms = morecantile.tms.get("WebMercatorQuad")
assert "_geographic_crs" in tms.__private_attributes__
assert "_tile_matrices_idx" in tms.__private_attributes__
def test_crs_type():
"""Test CRSType Model."""
uri = "http://www.opengis.net/def/crs/EPSG/0/3857"
crs = CRS(uri)
assert crs.root == uri
assert crs.model_dump() == uri
# PROJ methods
assert crs._pyproj_crs == pyproj.CRS.from_epsg(3857)
assert crs.srs == "http://www.opengis.net/def/crs/EPSG/0/3857"
assert crs.to_epsg() == 3857
assert crs.to_wkt() == pyproj.CRS.from_epsg(3857).to_wkt()
assert crs.to_proj4() == pyproj.CRS.from_epsg(3857).to_proj4()
assert crs.to_dict() == pyproj.CRS.from_epsg(3857).to_dict()
assert crs.to_json() == pyproj.CRS.from_epsg(3857).to_json()
# with Options
assert crs.to_epsg(min_confidence=10) == 3857
assert crs.to_wkt(pretty=True) == pyproj.CRS.from_epsg(3857).to_wkt(pretty=True)
assert crs.to_proj4(5) == pyproj.CRS.from_epsg(3857).to_proj4(5)
assert crs.to_json(pretty=True) == pyproj.CRS.from_epsg(3857).to_json(pretty=True)
# Outside OGC Specs but it works
wkt = pyproj.CRS.from_epsg(3857).to_wkt()
crs = CRS(wkt)
assert crs.root == wkt
assert crs.model_dump() == wkt
# PROJ methods
assert crs._pyproj_crs == pyproj.CRS.from_epsg(3857)
assert crs.srs == wkt
assert crs.to_epsg() == 3857
# CRSUri
data = {"uri": "http://www.opengis.net/def/crs/EPSG/0/3857"}
crs = CRS(data)
assert crs.root == CRSUri(uri="http://www.opengis.net/def/crs/EPSG/0/3857")
assert (
crs.model_dump(mode="json") == data
) # we use `mode=json` to dump all sub-model (URL)
assert crs._pyproj_crs == pyproj.CRS.from_epsg(3857)
# CRSWKT
wkt = pyproj.CRS.from_epsg(3857).to_json_dict()
crs = CRS({"wkt": wkt})
assert crs.root == CRSWKT(wkt=wkt)
assert crs.model_dump()["wkt"] == wkt
assert crs._pyproj_crs == pyproj.CRS.from_epsg(3857)
# CRSRef
with pytest.raises(NotImplementedError):
CRS({"referenceSystem": {"yo": 1}})
# something else
with pytest.raises(ValidationError):
CRS({"Hey": 1})
def test_crs_type_in_tms():
"""Check TMS representation when using non-string CRS."""
# CRS URI
crs = {"uri": "http://www.opengis.net/def/crs/EPSG/0/3857"}
tms = TileMatrixSet(
**{
"crs": crs,
"boundingBox": {
"lowerLeft": [-20037508.342789244, -20037508.34278919],
"upperRight": [20037508.34278919, 20037508.342789244],
},
"tileMatrices": [
{
"id": "0",
"scaleDenominator": 559082264.028717,
"cellSize": 156543.033928041,
"pointOfOrigin": [-20037508.342789244, 20037508.342789244],
"tileWidth": 256,
"tileHeight": 256,
"matrixWidth": 1,
"matrixHeight": 1,
},
],
}
)
assert str(tms.crs.root.uri) == "http://www.opengis.net/def/crs/EPSG/0/3857"
assert repr(tms)
def test_geographic_issue164():
"""
Check bbox in geographic CRS.
ref: https://github.com/developmentseed/morecantile/issues/164
"""
extent = [2696082.04374708, 1289407.53195196, 2696210.04374708, 1289535.53195196]
crs = pyproj.CRS.from_epsg("2056")
tms = morecantile.TileMatrixSet.custom(extent, crs)
assert tms.geographic_crs != pyproj.CRS.from_epsg(4326)
assert round(tms.bbox.bottom, 5) == 47.74957
tms.set_geographic_crs(pyproj.CRS.from_epsg(4326))
assert tms.geographic_crs == pyproj.CRS.from_epsg(4326)
assert round(tms.bbox.bottom, 5) == 47.74817
@pytest.mark.parametrize(
"name,is_wgs84",
[
("LINZAntarticaMapTilegrid", False),
("GNOSISGlobalGrid", True),
("EuropeanETRS89_LAEAQuad", False),
("CanadianNAD83_LCC", False),
("UPSArcticWGS84Quad", True),
("NZTM2000Quad", False),
("UTM31WGS84Quad", True),
("UPSAntarcticWGS84Quad", True),
("WorldMercatorWGS84Quad", True),
("WGS1984Quad", True),
("WorldCRS84Quad", False),
("WebMercatorQuad", True),
("CDB1GlobalGrid", True),
],
)
def test_geographic_crs(name, is_wgs84):
"""Check Geographic CRS."""
tms = morecantile.tms.get(name)
assert (tms.geographic_crs == pyproj.CRS.from_epsg(4326)) == is_wgs84
tms.set_geographic_crs(pyproj.CRS.from_epsg(4326))
assert tms.geographic_crs == pyproj.CRS.from_epsg(4326)
# Confirm the original object wasn't updated
tms = morecantile.tms.get(name)
assert (tms.geographic_crs == pyproj.CRS.from_epsg(4326)) == is_wgs84
def test_bottomleft_origin():
"""Create TMS with BottomLeft Origin."""
wmTopLeft = morecantile.tms.get("WebMercatorQuad")
crs = pyproj.CRS.from_epsg(3857)
extent = (
-20037508.342789244,
-20037508.342789244,
20037508.342789244,
20037508.342789244,
)
corner_of_origin = "bottomLeft"
tms = TileMatrixSet.custom(
extent,
crs,
matrix_scale=[1, 1],
minzoom=0,
maxzoom=24,
id="WebMercatorQuadBottomLeft",
ordered_axes=["X", "Y"],
corner_of_origin=corner_of_origin,
)
assert tms.matrix(0).pointOfOrigin == (-20037508.342789244, -20037508.342789244)
assert tms._matrix_origin(tms.matrix(0)) == (
-20037508.342789244,
-20037508.342789244,
)
assert tms.xy_bounds(0, 0, 0) == wmTopLeft.xy_bounds(0, 0, 0)
assert tms.bounds(0, 0, 0) == wmTopLeft.bounds(0, 0, 0)
assert tms.xy_bounds(0, 0, 1).left == -20037508.342789244
assert tms.xy_bounds(0, 0, 1).bottom == -20037508.342789244
assert tms.xy_bounds(1, 1, 1).top == 20037508.342789244
assert tms.xy_bounds(1, 1, 1).right == 20037508.342789244
assert tms.tile(-180, -85, 0) == morecantile.Tile(x=0, y=0, z=0)
assert tms.tile(-180, -85, 1) == morecantile.Tile(x=0, y=0, z=1)
assert tms.tile(-180, 85, 1) == morecantile.Tile(x=0, y=1, z=1)
bounds = tms.xy_bounds(486, tms.matrix(10).matrixHeight - 1 - 332, 10)
expected = wmTopLeft.xy_bounds(486, 332, 10)
for a, b in zip(expected, bounds):
assert round(a - b, 6) == pytest.approx(0)
@pytest.mark.parametrize(
("topLeft_Tile", "bottomLeft_Tile"),
[
(morecantile.Tile(10, 10, 10), morecantile.Tile(10, 1013, 10)),
(morecantile.Tile(10, 1013, 10), morecantile.Tile(10, 10, 10)),
# Check the Origin points
(morecantile.Tile(0, 0, 10), morecantile.Tile(0, 1023, 10)),
(morecantile.Tile(0, 1023, 10), morecantile.Tile(0, 0, 10)),
# Check the end points
(morecantile.Tile(1023, 0, 10), morecantile.Tile(1023, 1023, 10)),
(morecantile.Tile(1023, 1023, 10), morecantile.Tile(1023, 0, 10)),
# Zoom=0
(morecantile.Tile(0, 0, 0), morecantile.Tile(0, 0, 0)),
# zoom=1 on both edges of the zoom level
(morecantile.Tile(0, 0, 1), morecantile.Tile(0, 1, 1)),
(morecantile.Tile(0, 1, 1), morecantile.Tile(0, 0, 1)),
# zoom=14 near the middle
(
morecantile.Tile(x=3413, y=6202, z=14),
morecantile.Tile(x=3413, y=10181, z=14),
),
],
)
def test_topLeft_BottomLeft_bounds_equal_bounds(topLeft_Tile, bottomLeft_Tile):
tmsTop = morecantile.tms.get("WebMercatorQuad")
tmsBottom = TileMatrixSet.custom(
(
-20037508.342789244,
-20037508.342789244,
20037508.342789244,
20037508.342789244,
),
pyproj.CRS.from_epsg(3857),
matrix_scale=[1, 1],
minzoom=0,
maxzoom=24,
id="WebMercatorQuadBottomLeft",
ordered_axes=["X", "Y"],
corner_of_origin="bottomLeft",
)
bounds = tmsTop.xy_bounds(topLeft_Tile)
bounds2 = tmsBottom.xy_bounds(bottomLeft_Tile)
for a, b in zip(bounds, bounds2):
assert round(a - b, 6) == 0
def test_webmercator_bounds():
"""Test WebMercatorQuad bounds.
ref: https://github.com/developmentseed/morecantile/issues/175
"""
tms = morecantile.tms.get("WebMercatorQuad")
assert tms.bounds(0, 0, 0).left == -180.0
assert tms.bounds(0, 0, 0).right == 180.0
assert tuple(tms.xy_bounds(0, 0, 0)) == (
-20037508.342789244,
-20037508.342789244,
20037508.342789244,
20037508.342789244,
)
assert tms.bounds(0, 0, 1).left == -180.0
assert tms.bounds(1, 0, 1).right == 180.0
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/tests/benchmarks.py | tests/benchmarks.py | """Morecantile benchmark."""
import pytest
import morecantile
from morecantile.commons import BoundingBox
tms = morecantile.tms.get("WebMercatorQuad")
# Test tiles from https://github.com/jessekrubin/utiles/blob/ea58b9a017a2e3528f03cc20f16ef531737b863f/utiles-pyo3/bench/test_bench.py
TEST_TILES = (
(0, 0, 0),
(1, 0, 1),
(1, 1, 1),
(1, 40, 7),
(486, 332, 10),
# HIGH ZOOM
(486, 332, 20),
# OUTSIDE TMS Range
(486, 332, 30),
)
@pytest.mark.parametrize("tile", TEST_TILES)
def test_bounds(tile, benchmark):
str_tile = "Tile(x={},y={},z={})".format(*tile)
benchmark.name = f"morecantile.bounds-{str_tile}"
benchmark.fullname = f"morecantile.bounds-{str_tile}"
benchmark.group = "morecantile.bounds"
r = benchmark(tms.bounds, *tile)
assert isinstance(r, BoundingBox)
@pytest.mark.parametrize("tile", TEST_TILES)
def test_xy_bounds(tile, benchmark) -> None:
str_tile = "Tile(x={},y={},z={})".format(*tile)
benchmark.name = f"morecantile.xy_bounds-{str_tile}"
benchmark.fullname = f"morecantile.xy_bounds-{str_tile}"
benchmark.group = "morecantile.xy_bounds"
r = benchmark(tms.xy_bounds, *tile)
assert isinstance(r, BoundingBox)
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/tests/__init__.py | tests/__init__.py | """morecantile tests suite."""
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
developmentseed/morecantile | https://github.com/developmentseed/morecantile/blob/b6c697b13d56470840e45eaf8cc22c90ee60fd62/tests/test_tms_variable_width.py | tests/test_tms_variable_width.py | """Tests for morecantile."""
import pytest
import morecantile
from morecantile.commons import BoundingBox, Tile
from morecantile.errors import InvalidZoomError
from morecantile.models import TileMatrix
gnosisg_tms = morecantile.tms.get("GNOSISGlobalGrid")
cdb1_tms = morecantile.tms.get("CDB1GlobalGrid")
def test_coalesce():
"""test get coalesce."""
matrix = TileMatrix(
**{
"id": "2",
"scaleDenominator": 34942641.501794859767,
"cellSize": 0.087890625,
"cornerOfOrigin": "topLeft",
"pointOfOrigin": [90, -180],
"matrixWidth": 16,
"matrixHeight": 8,
"tileWidth": 256,
"tileHeight": 256,
"variableMatrixWidths": [
{"coalesce": 4, "minTileRow": 0, "maxTileRow": 0},
{"coalesce": 2, "minTileRow": 1, "maxTileRow": 1},
{"coalesce": 2, "minTileRow": 6, "maxTileRow": 6},
{"coalesce": 4, "minTileRow": 7, "maxTileRow": 7},
],
}
)
assert matrix.get_coalesce_factor(0) == 4
assert matrix.get_coalesce_factor(1) == 2
assert matrix.get_coalesce_factor(3) == 1
assert matrix.get_coalesce_factor(6) == 2
assert matrix.get_coalesce_factor(7) == 4
with pytest.raises(ValueError):
matrix.get_coalesce_factor(8)
with pytest.raises(ValueError):
matrix.get_coalesce_factor(-1)
matrix = TileMatrix(
**{
"id": "2",
"scaleDenominator": 34942641.501794859767,
"cellSize": 0.087890625,
"cornerOfOrigin": "topLeft",
"pointOfOrigin": [90, -180],
"matrixWidth": 16,
"matrixHeight": 8,
"tileWidth": 256,
"tileHeight": 256,
}
)
with pytest.raises(ValueError):
matrix.get_coalesce_factor(0)
def test_invalid_matrix():
"""Should raise error because we cannot construct a Matrix for variableWidth TMS."""
with pytest.raises(InvalidZoomError):
cdb1_tms.matrix(22)
with pytest.raises(InvalidZoomError):
gnosisg_tms.matrix(29)
def test_gnosisg():
"""test GNOSISGlobalGrid TMS."""
bounds = gnosisg_tms.xy_bounds(0, 0, 0)
assert bounds == BoundingBox(-180, 0, -90, 90)
bounds = gnosisg_tms.xy_bounds(1, 1, 0)
assert bounds == BoundingBox(-90, -90, 0, 0)
bounds = gnosisg_tms.xy_bounds(0, 0, 1)
assert bounds == BoundingBox(-180, 45, -90, 90)
# tile for index 0,0 and 1,0 should have the same bounds
assert gnosisg_tms.xy_bounds(0, 0, 1) == gnosisg_tms.xy_bounds(1, 0, 1)
assert gnosisg_tms.xy_bounds(2, 0, 1) == gnosisg_tms.xy_bounds(3, 0, 1)
assert gnosisg_tms.xy_bounds(4, 0, 1) == gnosisg_tms.xy_bounds(5, 0, 1)
assert gnosisg_tms.xy_bounds(6, 0, 1) == gnosisg_tms.xy_bounds(7, 0, 1)
assert gnosisg_tms.xy_bounds(0, 1, 1) != gnosisg_tms.xy_bounds(1, 1, 1)
assert gnosisg_tms.xy_bounds(2, 1, 1) != gnosisg_tms.xy_bounds(3, 1, 1)
assert gnosisg_tms.xy_bounds(0, 3, 1) == gnosisg_tms.xy_bounds(1, 3, 1)
assert gnosisg_tms.xy_bounds(2, 3, 1) == gnosisg_tms.xy_bounds(3, 3, 1)
assert gnosisg_tms.xy_bounds(4, 3, 1) == gnosisg_tms.xy_bounds(5, 3, 1)
assert gnosisg_tms.xy_bounds(6, 3, 1) == gnosisg_tms.xy_bounds(7, 3, 1)
# crs and geographic crs are the same
assert gnosisg_tms.xy_bounds(0, 0, 0) == gnosisg_tms.bounds(0, 0, 0)
assert gnosisg_tms.xy_bounds(1, 1, 0) == gnosisg_tms.bounds(1, 1, 0)
assert gnosisg_tms.xy_bounds(0, 0, 1) == gnosisg_tms.bounds(0, 0, 1)
tiles = gnosisg_tms.tiles(-180, -90, 180, 90, [0])
assert len(list(tiles)) == 8
#############################
# CHECK WE DON'T HAVE ALIASES
tiles = list(gnosisg_tms.tiles(-180, -90, 180, 90, [1]))
assert len(tiles) == 24
assert Tile(1, 0, 1) not in tiles
# make sure the aliased tiles are not added
assert len(gnosisg_tms.parent(Tile(0, 0, 1))) == 1
assert len(gnosisg_tms.parent(Tile(0, 0, 2))) == 1
assert len(gnosisg_tms.parent(Tile(0, 0, 3))) == 1
assert len(gnosisg_tms.children(Tile(0, 0, 0), zoom=1)) == 3
assert len(gnosisg_tms.children(Tile(0, 0, 0), zoom=2)) == 11
assert len(gnosisg_tms.children(Tile(0, 1, 1), zoom=2)) == 4
# test neighbors
tiles = gnosisg_tms.neighbors(Tile(0, 0, 1))
assert tiles == [
Tile(x=0, y=1, z=1),
Tile(x=1, y=1, z=1),
Tile(x=2, y=0, z=1),
Tile(x=2, y=1, z=1),
]
tiles = gnosisg_tms.neighbors(Tile(2, 0, 1))
assert tiles == [
Tile(x=0, y=0, z=1),
Tile(x=1, y=1, z=1),
Tile(x=2, y=1, z=1),
Tile(x=3, y=1, z=1),
Tile(x=4, y=0, z=1),
Tile(x=4, y=1, z=1),
]
tiles = gnosisg_tms.neighbors(Tile(6, 0, 1))
assert tiles == [
Tile(x=4, y=0, z=1),
Tile(x=5, y=1, z=1),
Tile(x=6, y=1, z=1),
Tile(x=7, y=1, z=1),
]
tiles = gnosisg_tms.neighbors(Tile(0, 1, 1))
assert tiles == [
Tile(x=0, y=0, z=1),
Tile(x=0, y=2, z=1),
Tile(x=1, y=1, z=1),
Tile(x=1, y=2, z=1),
]
tiles = gnosisg_tms.neighbors(Tile(3, 1, 1))
assert tiles == [
Tile(x=2, y=0, z=1),
Tile(x=2, y=1, z=1),
Tile(x=2, y=2, z=1),
Tile(x=3, y=2, z=1),
Tile(x=4, y=0, z=1),
Tile(x=4, y=1, z=1),
Tile(x=4, y=2, z=1),
]
tiles = gnosisg_tms.neighbors(Tile(0, 3, 1))
assert tiles == [
Tile(x=0, y=2, z=1),
Tile(x=1, y=2, z=1),
Tile(x=2, y=2, z=1),
Tile(x=2, y=3, z=1),
]
# assert alias tile have the same neighbors
assert gnosisg_tms.neighbors(Tile(0, 0, 1)) == gnosisg_tms.neighbors(Tile(1, 0, 1))
assert gnosisg_tms.tile(-180, 90, 2) == Tile(0, 0, 2)
assert gnosisg_tms.tile(-150, 90, 2) == Tile(0, 0, 2)
assert gnosisg_tms.tile(-80, 90, 2) == Tile(4, 0, 2)
assert gnosisg_tms.tile(-180, -90, 2) == Tile(0, 7, 2)
assert gnosisg_tms.tile(-150, -90, 2) == Tile(0, 7, 2)
assert gnosisg_tms.tile(-80, -90, 2) == Tile(4, 7, 2)
# Ignore coalescence and return alias
assert gnosisg_tms.tile(-150, 90, 2, ignore_coalescence=True) == Tile(1, 0, 2)
assert gnosisg_tms.tile(150, -90, 2, ignore_coalescence=True) == Tile(14, 7, 2)
| python | MIT | b6c697b13d56470840e45eaf8cc22c90ee60fd62 | 2026-01-05T07:14:04.097493Z | false |
yanring/Megatron-MoE-ModelZoo | https://github.com/yanring/Megatron-MoE-ModelZoo/blob/1b93710746711003f029cbaa3608cae7637ec554/misc/tools/deepseek_mem_estimate.py | misc/tools/deepseek_mem_estimate.py | def calc(name, seq_len,
n_layers, n_embed, vocab_size,
n_head, n_head_kv,
ff_factor, n_experts, n_activated_experts, ffn_hidden,moe_ffn_hidden, first_k_dense=0,
q_lora_rank=0, k_lora_rank=0, v_lora_rank=0, qk_head_dim=0, rope_head_dim=0, v_head_dim=0,
shared_expert_num=0, mtp=0, gpus=0, pp=0, vpp=0, ep=0, tp=0, etp=0, layers_per_pp=0,
fsdp=False, fp8=False):
assert k_lora_rank == v_lora_rank
total_params, total_flops = 0, 0
head_dim = n_embed // n_head
kv_lora_rank = k_lora_rank
print(f'{name} (seq_len={seq_len}):')
billion_to_gb = 1e9 * 2 / 1024**3
# Embedding
embedding_params = n_embed * vocab_size / 1e9
total_params += embedding_params
print(f' - Embedding params: {embedding_params} B')
embedding_memory = embedding_params * billion_to_gb
print(f' - Embedding memory size: {embedding_memory} GB')
# Attention
if q_lora_rank > 0:
assert n_head == n_head_kv
attn_proj_params = n_layers * (n_embed * q_lora_rank + q_lora_rank * n_head * qk_head_dim) # Q LoRA
attn_proj_params += n_layers * n_embed * (kv_lora_rank + rope_head_dim) # KV LoRA A
attn_proj_params += n_layers * kv_lora_rank * n_head * (qk_head_dim - rope_head_dim) # K LoRA B
attn_proj_params += n_layers * kv_lora_rank * n_head * v_head_dim # V LoRA B
attn_proj_params += n_layers * n_embed * n_head * v_head_dim # O project
attn_proj_params /= 1e9
kv_cache = n_layers * (kv_lora_rank + rope_head_dim) * 2 / 1e6
attn_proj_flops = attn_proj_params * seq_len * 2 * 1e9
attn_proj_flops /= 1e12
else:
# attn_proj_params = n_layers * n_embed * n_embed * 2 / 1e9 # Q, O project
# attn_proj_params += n_layers * n_embed * n_head_kv * head_dim * 2 / 1e9 # K, V project
attn_proj_params = n_layers * n_embed * n_head * qk_head_dim # Q project
attn_proj_params += n_layers * n_embed * (kv_lora_rank + rope_head_dim) # KV LoRA A
attn_proj_params += n_layers * kv_lora_rank * n_head * (qk_head_dim - rope_head_dim) # K LoRA B
attn_proj_params += n_layers * kv_lora_rank * n_head * v_head_dim # V LoRA B
attn_proj_params += n_layers * n_embed * n_head * v_head_dim # O project
attn_proj_params /= 1e9
kv_cache = n_layers * n_head_kv * head_dim * 2 * 2 / 1e6
attn_proj_flops = attn_proj_params * seq_len * 2 * 1e9
attn_proj_flops /= 1e12
# qk_head_dim, v_head_dim = head_dim, head_dim
attn_flops = n_layers * n_head * seq_len * qk_head_dim * seq_len / 2 * 2 # QK^T
attn_flops += n_layers * n_head * seq_len * seq_len * v_head_dim / 2 * 2 # (QK^T)V
attn_flops /= 1e12
attn_flops += attn_proj_flops
total_params += attn_proj_params
total_flops += attn_flops
attn_proj_memory = attn_proj_params * billion_to_gb
print(f' - Attention memory size: {attn_proj_memory} GB')
print(f' - Attention params: {attn_proj_params} B')
print(f' - Attention FLOPs (per {seq_len} training forward tokens): {attn_flops} TFLOPs')
print(f' - KV Cache (per token, BF16): {kv_cache} MB')
if q_lora_rank > 0:
attn_infer_flops = 0
for i in range(seq_len):
attn_infer_flops += n_layers * n_embed * (kv_lora_rank + rope_head_dim) * 2 # KV LoRA A (local u) + MQA K project
attn_infer_flops += n_layers * (n_embed * q_lora_rank + q_lora_rank * n_head * qk_head_dim) * 2 # Q LoRA
attn_infer_flops += n_layers * n_head * ((qk_head_dim - rope_head_dim) * kv_lora_rank) * 2 # q = Q @ BK
attn_infer_flops += n_layers * n_head * (kv_lora_rank + rope_head_dim) * i * 2 # Attn score = q @ u
attn_infer_flops += n_layers * n_head * kv_lora_rank * i * 2 # o = s @ u
attn_infer_flops += n_layers * kv_lora_rank * n_head * v_head_dim * 2 # V LoRA B
attn_infer_flops += n_layers * n_embed * n_head * v_head_dim * 2 # O project
attn_infer_flops /= 1e12
else:
attn_infer_flops = 0
for i in range(seq_len):
attn_infer_flops += n_layers * n_embed * n_embed * 2 * 2 # Q, O project
attn_infer_flops += n_layers * n_embed * n_head_kv * head_dim * 2 * 2 # K, V project
attn_infer_flops += n_layers * n_head * i * qk_head_dim * 2 # Attn score
attn_infer_flops += n_layers * n_head * i * v_head_dim * 2 # V project
attn_infer_flops /= 1e12
print(f' - Attention FLOPs (per {seq_len} completion tokens): {attn_infer_flops} TFLOPs')
# MLP
hidden = n_embed * ff_factor * 8 // 3
hidden = (hidden + 127) // 128 * 128
# mlp_params = (n_layers - first_k_dense) * n_experts * (n_embed * hidden * 2 + hidden * n_embed) / 1e9
# mlp_params += first_k_dense * n_activated_experts * (n_embed * hidden * 2 + hidden * n_embed) / 1e9
# mlp_act_params = n_layers * n_activated_experts * (n_embed * hidden * 2 + hidden * n_embed) / 1e9
# mlp_act_flops = n_layers * seq_len * n_activated_experts * (n_embed * hidden * 2 + hidden * n_embed) * 2 / 1e12
mlp_params = (n_layers - first_k_dense) * n_experts * (n_embed * moe_ffn_hidden * 2 + moe_ffn_hidden * n_embed) / 1e9
mlp_params += first_k_dense * (n_embed * ffn_hidden * 2 + ffn_hidden * n_embed) / 1e9
mlp_act_params = (n_layers - first_k_dense) * n_activated_experts * (n_embed * moe_ffn_hidden * 2 + moe_ffn_hidden * n_embed) / 1e9
mlp_act_params += first_k_dense * (n_embed * ffn_hidden * 2 + ffn_hidden * n_embed) / 1e9
mlp_act_flops = (n_layers - first_k_dense) * seq_len * n_activated_experts * (n_embed * moe_ffn_hidden * 2 + moe_ffn_hidden * n_embed) * 2 / 1e12
mlp_act_flops += first_k_dense * seq_len * (n_embed * ffn_hidden * 2 + ffn_hidden * n_embed) * 2 / 1e12
total_params += mlp_params
total_flops += mlp_act_flops
mlp_memory = mlp_params * billion_to_gb
print(f' - MLP hidden: {hidden}')
print(f' - MLP params: {mlp_params} B')
print(f' - MLP memory size: {mlp_memory} GB')
print(f' - MLP activated params (per token): {mlp_act_params} B')
print(f' - MLP activated FLOPs (per {seq_len} training forward tokens): {mlp_act_flops} TFLOPs')
# Head
head_params = n_embed * vocab_size / 1e9
head_flops = seq_len * n_embed * vocab_size * 2 / 1e12
total_params += head_params
total_flops += head_flops
head_memory = head_params * billion_to_gb
total_memory = total_params * billion_to_gb
print(f' - Head params: {head_params} B')
print(f' - Head memory size: {head_memory} GB')
print(f' - Head FLOPs (per {seq_len} training forward tokens): {head_flops} TFLOPs')
# Gating
gating_flops = (n_layers - first_k_dense) * n_experts * n_embed * seq_len * 2 / 1e12
total_flops += gating_flops
print(f' - Gating FLOPs (per {seq_len} training forward tokens): {gating_flops} TFLOPs')
# Total
print(f' - Total params: {total_params} B')
print(f' - Total memory size: {total_memory} GB')
print(f' - Total activated params (per token): {total_params + mlp_act_params - mlp_params - embedding_params} B')
print(f' - Total FLOPs (per {seq_len} training forward tokens): {total_flops} TFLOPs')
print(f' - Total FLOPs (per forward token): {total_flops / seq_len} TFLOPs')
print(f' - Total FLOPs (fwd and bwdper {seq_len} training forward tokens): {total_flops * 3} TFLOPs')
print(f' - Total FLOPs (per {seq_len} completion tokens): {total_flops - attn_flops + attn_infer_flops} TFLOPs')
print()
# MTP
mtp_proj_params = mtp * n_embed * n_embed * 2 / 1e9
mtp_attn_params = attn_proj_params / n_layers * mtp
mtp_mlp_params = n_experts * (n_embed * moe_ffn_hidden * 2 + moe_ffn_hidden * n_embed) / 1e9 * mtp
mtp_params = mtp_proj_params + mtp_attn_params + mtp_mlp_params
mtp_flops = (attn_flops + mlp_act_flops) / n_layers + gating_flops / (n_layers - first_k_dense) + head_flops + mtp_proj_params * seq_len * 2 / 1e3
print(f' - MTP params: {mtp_params} B')
print(f' - MTP FLOPs (per {seq_len} training forward tokens): {mtp_flops} TFLOPs')
print()
dense_dp = gpus // pp // tp
moe_dp = gpus // pp // ep // etp
print(f' - GPUs{gpus} PP{pp} VPP{vpp} EP{ep} TP{tp} ETP{etp} denseDP{dense_dp} EDP{moe_dp} FSDP{fsdp}')
one_expert_params = (n_embed * moe_ffn_hidden * 2 + moe_ffn_hidden * n_embed) / 1e9
moe_layer_dense_params = attn_proj_params / n_layers + one_expert_params * shared_expert_num
moe_layer_moe_params = one_expert_params * (n_experts - shared_expert_num) / ep
if fp8:
rank_dense_mem = layers_per_pp * moe_layer_dense_params / tp * (8 + 8.0 / dense_dp) * 1e9 / 1024**3
rank_moe_mem = layers_per_pp * moe_layer_moe_params / etp * (8 + 8.0 / moe_dp) * 1e9 / 1024**3
else:
rank_dense_mem = layers_per_pp * moe_layer_dense_params / tp * (6 + 12.0 / dense_dp) * 1e9 / 1024**3
rank_moe_mem = layers_per_pp * moe_layer_moe_params / etp * (6 + 12.0 / moe_dp) * 1e9 / 1024**3
if fsdp:
assert not fp8
rank_dense_mem = layers_per_pp * moe_layer_dense_params / tp * (18.0 / dense_dp) * 1e9 / 1024**3
rank_moe_mem = layers_per_pp * moe_layer_moe_params / etp * (18.0 / moe_dp) * 1e9 / 1024**3 + moe_layer_moe_params / etp * 12.0 * 1e9 / 1024**3
print(f' - Dense Param Mem per rank: {rank_dense_mem} GB')
print(f' - MoE Param Mem per rank: {rank_moe_mem} GB')
print(f' - Total Param Mem per rank: {rank_dense_mem + rank_moe_mem} GB')
print()
topk = n_activated_experts - shared_expert_num
bf16_mb_coeff = 2 / 1024 / 1024
bf16_or_fp8_mb_coeff = 1 / 1024 / 1024 if fp8 else bf16_mb_coeff
fp32_mb_coeff = 4 / 1024 / 1024
int64_mb_coeff = 8 / 1024 / 1024
input_mem = seq_len * 1 * n_embed / tp * bf16_mb_coeff
input_norm_out = seq_len * 1 * n_embed / tp * bf16_or_fp8_mb_coeff
q_down_out = seq_len * 1 * q_lora_rank / tp * bf16_mb_coeff
q_norm_out = seq_len * 1 * q_lora_rank / tp * bf16_or_fp8_mb_coeff
q_up_out = seq_len * 1 * n_head * qk_head_dim / tp * bf16_mb_coeff
kv_down_out = seq_len * 1 * (kv_lora_rank + rope_head_dim) / tp * bf16_mb_coeff
kv_compressed = seq_len * 1 * kv_lora_rank / tp * bf16_mb_coeff
kv_norm_out = seq_len * 1 * kv_lora_rank * bf16_or_fp8_mb_coeff
kv_up_out = seq_len * 1 * n_head * (qk_head_dim - rope_head_dim + v_head_dim) / tp * bf16_mb_coeff
q_apply_rope_out = q_up_out
k_apply_rope_out = seq_len * 1 * n_head * qk_head_dim / tp * bf16_mb_coeff
v_apply_rope_out = seq_len * 1 * n_head * v_head_dim / tp * bf16_mb_coeff
attn_out = seq_len * 1 * n_head * v_head_dim / tp * bf16_mb_coeff
attn_ctx_tensor = 1 * n_head / tp * seq_len * 1 * fp32_mb_coeff
proj_out = seq_len * 1 * n_embed / tp * bf16_mb_coeff
attn_bda_out = proj_out
mlp_norm_out = seq_len * 1 * n_embed / tp * bf16_mb_coeff
shared_AG_out = seq_len * 1 * n_embed * bf16_or_fp8_mb_coeff
router_probs = seq_len / tp * (n_experts - shared_expert_num) * bf16_mb_coeff
permute_row_id_map = seq_len / tp * (n_experts - shared_expert_num) * int64_mb_coeff
share_linear_1_out = seq_len * 1 * moe_ffn_hidden / tp * shared_expert_num * 2 * bf16_or_fp8_mb_coeff
share_act_out = share_linear_1_out / 2
share_linear_2_out = seq_len * 1 * n_embed / tp * bf16_mb_coeff
permute_out = seq_len / tp * etp * topk * 1 * n_embed * bf16_or_fp8_mb_coeff
expert_linear_1_out = seq_len / tp * etp * topk * 1 * moe_ffn_hidden / etp * 2 * bf16_mb_coeff # TODO
expert_act_out = expert_linear_1_out / 2
expert_linear_2_out = seq_len / tp * etp * topk * 1 * n_embed * bf16_mb_coeff
unpermute_alltoall_out = expert_linear_2_out / etp
unpermute_out = unpermute_alltoall_out / topk
mlp_bda_out = unpermute_out
cached = input_mem + input_norm_out + q_down_out + q_norm_out + kv_compressed + kv_norm_out + q_apply_rope_out + k_apply_rope_out + v_apply_rope_out + attn_out + attn_ctx_tensor + \
attn_bda_out + shared_AG_out + \
router_probs + permute_row_id_map + \
share_linear_1_out + share_act_out + \
permute_out + expert_linear_1_out + expert_act_out + unpermute_alltoall_out
cached_layer_num = layers_per_pp * (pp - 1)
if vpp > 1:
cached_layer_num += (layers_per_pp // vpp) * (pp - 1)
cached_t = cached * cached_layer_num
print(f' -- input tensor: {input_mem} MB, cached by input norm {input_mem / cached * 100:.2f}%')
print(f' -- input norm output: {input_norm_out} MB cached by qkv_down {input_norm_out / cached * 100:.2f}%')
print(f' -- q_down_out: {q_down_out} MB cached by q_norm {q_down_out / cached * 100:.2f}%')
print(f' -- q_norm_out: {q_norm_out} MB cached by q_up {q_norm_out / cached * 100:.2f}%')
print(f' -- q_up_out: {q_up_out} MB not cached')
print(f' -- kv_down_out: {kv_down_out} MB not cached')
print(f' -- kv_compressed (output of split(kv_down_out)): {kv_compressed} MB cached by kv_norm {kv_compressed / cached * 100:.2f}%')
print(f' -- kv_norm_out: {kv_norm_out} MB cached by kv_up {kv_norm_out / cached * 100:.2f}%')
print(f' -- kv_up_out: {kv_up_out} MB not cached')
print(f' -- q_apply_rope_out: {q_apply_rope_out} MB cached by core_attn {q_apply_rope_out / cached * 100:.2f}%')
print(f' -- k_apply_rope_out: {k_apply_rope_out} MB cached by core_attn {k_apply_rope_out / cached * 100:.2f}%')
print(f' -- v_apply_rope_out: {v_apply_rope_out} MB cached by core_attn {v_apply_rope_out / cached * 100:.2f}%')
print(f' -- attn_out: {attn_out} MB cached by proj_out and attn itself {attn_out / cached * 100:.2f}%')
print(f' -- attn_ctx_tensor: {attn_ctx_tensor} MB cached by attn itself {attn_ctx_tensor / cached * 100:.2f}%')
print(f' -- proj_out: {proj_out} MB not cached')
print(f' -- attn_bda_out: {attn_bda_out} MB cached by mlp_norm {attn_bda_out / cached * 100:.2f}%')
print(f' -- mlp_norm_out: {mlp_norm_out} MB not cached')
print(f' -- shared_AG_out: {shared_AG_out} MB cached by shared expert {shared_AG_out / cached * 100:.2f}%')
print(f' -- router_probs: {router_probs} MB cached by fused unpermute {router_probs / cached * 100:.2f}%')
print(f' -- permute_row_id_map: {permute_row_id_map} MB cached by fused (un)permute {permute_row_id_map / cached * 100:.2f}%')
print(f' -- share_linear_1_out: {share_linear_1_out} MB cached by share_act {share_linear_1_out / cached * 100:.2f}%')
print(f' -- share_act_out: {share_act_out} MB cached by share_linear_2 {share_act_out / cached * 100:.2f}%')
print(f' -- share_linear_2_out {share_linear_2_out} MB not cached')
print(f' -- permute_out {permute_out} MB cached by expert_linear_1 {permute_out / cached * 100:.2f}%')
print(f' -- expert_linear_1_out: {expert_linear_1_out} MB cached by expert_act {expert_linear_1_out / cached * 100:.2f}%')
print(f' -- expert_act_out: {expert_act_out} MB cached by expert_linear_2 {expert_act_out / cached * 100:.2f}%')
print(f' -- expert_linear_2_out: {expert_linear_2_out} MB not cached')
print(f' -- unpermute_alltoall_out: {unpermute_alltoall_out} MB cached by unpermute {unpermute_alltoall_out / cached * 100:.2f}%')
print(f' -- unpermute_out: {unpermute_out} MB not cached')
print(f' -- mlp_bda_out: {mlp_bda_out} MB not cached (sent to next layer)')
print()
print(f' -- cached micobatch layer num: {cached_layer_num}')
print(f' -- total cached for 1 layer and 1 micobatch: {cached} MB')
print(f' -- cached for all PP microbatches: {cached_t / 1024} GB')
print(f' -- total usage {rank_dense_mem + rank_moe_mem + cached_t / 1024} GB')
print()
print(f' -- full recompute total cached for 1 layer and 1 micobatch: {input_mem} MB')
print(f' -- full recompute cached for all PP microbatches: {input_mem * cached_layer_num / layers_per_pp / 1024} GB')
print(f' -- full recompute total usage {rank_dense_mem + rank_moe_mem + input_mem * cached_layer_num / layers_per_pp / 1024} GB')
print()
act_func_save = share_act_out + expert_act_out
act_func_save_t = act_func_save * cached_layer_num
print(f' --- By act_func recompute, can save {act_func_save} MB for 1 layer and 1 micobatch')
print(f' --- By act_func recompute, can save {act_func_save_t / 1024} GB for all PP microbatches')
norm_save = input_norm_out + mlp_norm_out
norm_save_t = norm_save * cached_layer_num
print(f' --- By norm recompute, can save {norm_save} MB')
print(f' --- By norm recompute, can save {norm_save_t / 1024} GB for all PP microbatches')
up_proj_save = q_apply_rope_out + k_apply_rope_out + v_apply_rope_out
up_proj_save_t = up_proj_save * cached_layer_num
print(f' --- By up_proj+rope recompute, can save {up_proj_save} MB')
print(f' --- By up_proj+rope recompute, can save {up_proj_save_t / 1024} GB for all PP microbatches')
cached_after_recompute = cached_t - act_func_save_t - norm_save_t - up_proj_save_t
print(f' --- Cached size after the above recomputations: {cached_after_recompute / 1024} GB')
print(f' --- total usage {rank_dense_mem + rank_moe_mem + cached_after_recompute / 1024} GB')
print()
probs2swiglu_save = unpermute_alltoall_out
probs2swiglu_save_t = probs2swiglu_save * cached_layer_num
print(f' --- By probs2swiglu, can save {probs2swiglu_save} MB for 1 layer and 1 micobatch')
print(f' --- By probs2swiglu, can save {probs2swiglu_save_t / 1024} GB for all PP microbatches')
cached_after_probs2swiglu = cached_after_recompute - probs2swiglu_save_t
print(f' --- Cached size after probs2swiglu: {cached_after_probs2swiglu / 1024} GB')
print(f' --- total usage {rank_dense_mem + rank_moe_mem + cached_after_probs2swiglu / 1024} GB')
print()
fc1_offloading_save = permute_out
fc1_offloading_save_t = fc1_offloading_save * cached_layer_num
print(f' --- By fc1 offloading, can save {fc1_offloading_save} MB for 1 layer and 1 micobatch')
print(f' --- By fc1 offloading, can save {fc1_offloading_save_t / 1024} GB for all PP microbatches')
cached_after_offloading = cached_after_probs2swiglu - fc1_offloading_save_t
print(f' --- Cached size after the above offloading: {cached_after_offloading / 1024} GB')
print(f' --- total usage {rank_dense_mem + rank_moe_mem + cached_after_offloading / 1024} GB')
print()
shared_expert_save = share_linear_1_out + share_act_out
shared_expert_save_t = shared_expert_save * cached_layer_num
print(f' --- By shared expert recompute, can save {shared_expert_save} MB for 1 layer and 1 micobatch')
print(f' --- By shared expert recompute, can save {shared_expert_save_t / 1024} GB for all PP microbatches')
cached_after_shared_expert = cached_after_offloading - shared_expert_save_t
print(f' --- Cached size after the above recomputations: {cached_after_shared_expert / 1024} GB')
print(f' --- total usage {rank_dense_mem + rank_moe_mem + cached_after_shared_expert / 1024} GB')
print()
if __name__ == '__main__':
# calc('moe_230b_lora', seq_len=4096,
# n_layers=60, n_embed=5120, vocab_size=100125,
# n_head=128, n_head_kv=128,
# ff_factor=0.1125, n_experts=162, n_activated_experts=8,
# ffn_hidden=12288, moe_ffn_hidden=1536,
# q_lora_rank=1536, k_lora_rank=512, v_lora_rank=512, qk_head_dim=192,
# rope_head_dim=64, v_head_dim=128, first_k_dense=1,
# shared_expert_num=2, gpus=256, pp=8, vpp=2, ep=8, tp=2, etp=1, layers_per_pp=8)
calc('moe_671b_lora', seq_len=4096,
n_layers=61, n_embed=7168, vocab_size=129280,
n_head=128, n_head_kv=128,
ff_factor=0.1125, n_experts=257, n_activated_experts=9,
ffn_hidden=18432, moe_ffn_hidden=2048,
q_lora_rank=1536, k_lora_rank=512, v_lora_rank=512, qk_head_dim=192,
rope_head_dim=64, v_head_dim=128, first_k_dense=3,
shared_expert_num=1, mtp=1, gpus=1024, pp=16, vpp=1, ep=64, tp=1, etp=1, layers_per_pp=4, fsdp=False, fp8=False)
| python | MIT | 1b93710746711003f029cbaa3608cae7637ec554 | 2026-01-05T07:14:05.433028Z | false |
yanring/Megatron-MoE-ModelZoo | https://github.com/yanring/Megatron-MoE-ModelZoo/blob/1b93710746711003f029cbaa3608cae7637ec554/ckpt_convert_scripts/DeepSeek-V2/deepseek_v2_hf_to_mg.py | ckpt_convert_scripts/DeepSeek-V2/deepseek_v2_hf_to_mg.py | import json
import os
import random
import re
import numpy as np
import torch
import torch.distributed as dist
from safetensors import safe_open
from megatron.training import get_args
from megatron.training.checkpointing import get_checkpoint_name
from megatron.training.initialize import initialize_megatron
from pretrain_gpt import model_provider
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True, warn_only=True)
def init_distributed_environment(backend="nccl", port="12355"):
"""Initialize the distributed environment for checkpoint conversion.
Args:
backend (str): Distributed backend ('nccl', 'gloo', or 'mpi'). Default: 'nccl'.
port (str): Port number for distributed communication. Default: '12355'.
"""
try:
# Set deterministic behavior
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
random.seed(1234)
np.random.seed(1234)
# Configure distributed environment
os.environ.update({"MASTER_ADDR": "localhost", "MASTER_PORT": port})
# Initialize process group
dist.init_process_group(
backend=backend, init_method="env://", world_size=1, rank=0
)
except Exception as e:
print(f"Failed to initialize distributed environment: {str(e)}")
raise
def add_extra_args(parser):
parser.add_argument("--target-tensor-model-parallel-size", type=int, default=1)
parser.add_argument("--target-pipeline-model-parallel-size", type=int, default=1)
parser.add_argument("--target-expert-model-parallel-size", type=int, default=1)
return parser
def load_tensor(weight_file, weight_map, args):
file_name = weight_map[weight_file]
ckpt_file_path = os.path.join(args.load, file_name)
with safe_open(ckpt_file_path, framework="pt", device=0) as f:
weight = f.get_tensor(weight_file)
if args.bf16:
return weight.bfloat16()
elif args.fp16:
return weight.float16()
else:
return weight
def convert_ckpt_from_hf_to_megatron(mg_model, hf_index_path, args):
print("Start copying")
if args.bf16:
mg_model = mg_model.bfloat16()
elif args.fp16:
mg_model = mg_model.float16()
# Load weight map
with open(hf_index_path, "r", encoding="utf-8") as f:
data = json.load(f)
weight_map = data["weight_map"]
with torch.no_grad():
mg_model.embedding.word_embeddings.weight.copy_(
load_tensor(f"model.embed_tokens.weight", weight_map, args)
)
for mg_layer_idx, mg_layer in enumerate(mg_model.decoder.layers):
hf_layer_idx = mg_layer_idx
hf_layer = f"model.layers.{hf_layer_idx}"
# Input layernorm
mg_layer.input_layernorm.weight.copy_(
load_tensor(f"{hf_layer}.input_layernorm.weight", weight_map, args)
)
# Multi-latent attention
if args.q_lora_rank is not None:
mg_layer.self_attention.linear_q_down_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.q_a_proj.weight", weight_map, args)
)
mg_layer.self_attention.linear_q_up_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.q_b_proj.weight", weight_map, args)
)
mg_layer.self_attention.linear_q_up_proj.layer_norm_weight.copy_(
load_tensor(f"{hf_layer}.self_attn.q_a_layernorm.weight", weight_map, args)
)
else:
mg_layer.self_attention.linear_q_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.q_proj.weight", weight_map, args)
)
mg_layer.self_attention.linear_kv_down_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.kv_a_proj_with_mqa.weight", weight_map, args)
)
mg_layer.self_attention.linear_kv_up_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.kv_b_proj.weight", weight_map, args)
)
mg_layer.self_attention.linear_kv_up_proj.layer_norm_weight.copy_(
load_tensor(f"{hf_layer}.self_attn.kv_a_layernorm.weight", weight_map, args)
)
mg_layer.self_attention.linear_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.o_proj.weight", weight_map, args)
)
# Dense layer
if mg_layer_idx == 0:
mg_layer.mlp.linear_fc1.layer_norm_weight.copy_(
load_tensor(f"{hf_layer}.post_attention_layernorm.weight", weight_map, args)
)
gate_proj = load_tensor(f"{hf_layer}.mlp.gate_proj.weight", weight_map, args)
up_proj = load_tensor(f"{hf_layer}.mlp.up_proj.weight", weight_map, args)
hf_fc1 = torch.cat([gate_proj, up_proj], dim=0)
mg_layer.mlp.linear_fc1.weight.copy_(hf_fc1)
mg_layer.mlp.linear_fc2.weight.copy_(
load_tensor(f"{hf_layer}.mlp.down_proj.weight", weight_map, args)
)
# MoE layer
else:
mg_layer.pre_mlp_layernorm.weight.copy_(
load_tensor(f"{hf_layer}.post_attention_layernorm.weight", weight_map, args)
)
mg_layer.mlp.router.weight.copy_(
load_tensor(f"{hf_layer}.mlp.gate.weight", weight_map, args)
)
if args.moe_grouped_gemm:
for expert_idx in range(args.num_experts):
gate_proj = load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.gate_proj.weight", weight_map, args)
up_proj = load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.up_proj.weight", weight_map, args)
hf_expert_fc1 = torch.cat([gate_proj, up_proj], dim=0)
getattr(mg_layer.mlp.experts.linear_fc1, f"weight{expert_idx}").copy_(hf_expert_fc1)
getattr(mg_layer.mlp.experts.linear_fc2, f"weight{expert_idx}").copy_(
load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.down_proj.weight", weight_map, args)
)
else:
for expert_idx in range(args.num_experts):
gate_proj = load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.gate_proj.weight", weight_map, args)
up_proj = load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.up_proj.weight", weight_map, args)
hf_expert_fc1 = torch.cat([gate_proj, up_proj], dim=0)
expert = getattr(
mg_layer.mlp.experts.local_experts, str(expert_idx)
)
expert.linear_fc1.weight.copy_(hf_expert_fc1)
expert.linear_fc2.weight.copy_(
load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.down_proj.weight", weight_map, args)
)
# Shared experts
shared_gate_proj = load_tensor(f"{hf_layer}.mlp.shared_experts.gate_proj.weight", weight_map, args)
shared_up_proj = load_tensor(f"{hf_layer}.mlp.shared_experts.up_proj.weight", weight_map, args)
shared_experts_fc1 = torch.cat([shared_gate_proj, shared_up_proj], dim=0)
mg_layer.mlp.shared_experts.linear_fc1.weight.copy_(shared_experts_fc1)
mg_layer.mlp.shared_experts.linear_fc2.weight.copy_(
load_tensor(f"{hf_layer}.mlp.shared_experts.down_proj.weight", weight_map, args)
)
# Output layer
mg_model.decoder.final_layernorm.weight.copy_(
load_tensor("model.norm.weight", weight_map, args)
)
mg_model.output_layer.weight.copy_(
load_tensor("lm_head.weight", weight_map, args)
)
def save_state_dict(args, model, checkpoint_name):
state_dict = {
"args": args,
"checkpoint_version": 3.0,
"iteration": 0,
"model": model,
}
checkpoint_dir = os.path.dirname(checkpoint_name)
os.makedirs(checkpoint_dir, exist_ok=True)
print(f"Saving model checkpoint to: {checkpoint_name}")
torch.save(state_dict, checkpoint_name)
def save_mg_model(mg_model, args):
print("Start saving")
args.tensor_model_parallel_size = args.target_tensor_model_parallel_size
args.pipeline_model_parallel_size = args.target_pipeline_model_parallel_size
args.expert_model_parallel_size = args.target_expert_model_parallel_size
os.makedirs(args.save, exist_ok=True)
os.system("cp -rf " + args.load + "/config*.json " + args.save)
os.system("cp -rf " + args.load + "/tokenizer* " + args.save)
tracker_filepath = os.path.join(args.save, "latest_checkpointed_iteration.txt")
with open(tracker_filepath, "w") as f:
f.write("release")
full_model = mg_model.state_dict_for_save_checkpoint()
for key in list(full_model.keys()):
if full_model[key] is None: # or "_extra_state" in k:
full_model.pop(key)
if (
args.tensor_model_parallel_size == 1
and args.pipeline_model_parallel_size == 1
and args.expert_model_parallel_size == 1
):
checkpoint_name = get_checkpoint_name(
args.save,
iteration=0,
release=True,
)
save_state_dict(args, full_model, checkpoint_name)
elif (
args.tensor_model_parallel_size == 1
and args.pipeline_model_parallel_size == 1
and args.expert_model_parallel_size > 1
and args.num_experts % args.expert_model_parallel_size == 0
and not args.moe_grouped_gemm
):
pattern = r"local_experts\.(\d+)\."
num_local_experts = args.num_experts // args.expert_model_parallel_size
for ep_rank in range(args.expert_model_parallel_size):
model_split = {}
checkpoint_name = get_checkpoint_name(
args.save,
iteration=0,
release=True,
pipeline_parallel=None,
tensor_rank=None,
pipeline_rank=None,
expert_parallel=True,
expert_rank=ep_rank,
)
print(f"Saving ep_rank {ep_rank} model to {checkpoint_name}")
# Process model weights for current expert rank
for key, value in full_model.items():
if "local_experts" in key:
global_expert_id = int(re.findall(pattern, key)[0])
if global_expert_id // num_local_experts != ep_rank:
continue
local_expert_id = global_expert_id % num_local_experts
key = key.replace(
f"local_experts.{global_expert_id}",
f"local_experts.{local_expert_id}",
)
model_split[key] = value
save_state_dict(args, model_split, checkpoint_name)
elif (
args.tensor_model_parallel_size == 1
and args.pipeline_model_parallel_size == 1
and args.expert_model_parallel_size > 1
and args.num_experts % args.expert_model_parallel_size == 0
and args.moe_grouped_gemm
):
pattern = r"weight(\d+)"
num_local_experts = args.num_experts // args.expert_model_parallel_size
for ep_rank in range(args.expert_model_parallel_size):
model_split = {}
checkpoint_name = get_checkpoint_name(
args.save,
iteration=0,
release=True,
pipeline_parallel=None,
tensor_rank=None,
pipeline_rank=None,
expert_parallel=True,
expert_rank=ep_rank,
)
print(f"[GroupGEMM] Saving ep_rank {ep_rank} model to {checkpoint_name}")
# Process model weights for current expert rank
for key, value in full_model.items():
if "experts" in key and "weight" in key and "shared_experts" not in key:
match = re.search(pattern, key)
global_expert_id = int(match.group(1))
if global_expert_id // num_local_experts != ep_rank:
continue
local_expert_id = global_expert_id % num_local_experts
key = key.replace(
f"weight{global_expert_id}", f"weight{local_expert_id}"
)
model_split[key] = value
save_state_dict(args, model_split, checkpoint_name)
elif (
args.tensor_model_parallel_size == 1
and args.pipeline_model_parallel_size > 1
and args.num_experts % args.expert_model_parallel_size == 0
):
# Ensure layers can be evenly divided by pipeline model parallel size
assert args.num_layers % args.pipeline_model_parallel_size == 0
layers_per_pipeline = args.num_layers // args.pipeline_model_parallel_size
pattern = r"weight(\d+)"
num_local_experts = args.num_experts // args.expert_model_parallel_size
for pp_rank in range(args.pipeline_model_parallel_size):
# Get the current range of layers for this pipeline stage
pp_start = pp_rank * layers_per_pipeline
pp_end = pp_start + layers_per_pipeline - 1
for ep_rank in range(args.expert_model_parallel_size):
model_split = {}
checkpoint_name = get_checkpoint_name(
args.save,
iteration=0,
release=True,
pipeline_parallel=True,
tensor_rank=None,
pipeline_rank=pp_rank,
expert_parallel=True,
expert_rank=ep_rank,
)
print(f"Saving pp_rank {pp_rank}, ep_rank {ep_rank} model to {checkpoint_name}")
for key, value in full_model.items():
# First pipeline stage
if "embedding" in key:
if pp_rank == 0:
model_split[key] = value
continue
# Last pipeline stage
if "final_layernorm" in key or "output_layer" in key:
if pp_rank == args.pipeline_model_parallel_size - 1:
model_split[key] = value
continue
# Skip if the layer doesn't belong current pipeline stage
original_layer_id = int(key.split(".")[2])
if not pp_start <= original_layer_id <= pp_end:
continue
# Remap layer index for current pipeline stage
local_layer_id = original_layer_id % layers_per_pipeline
key = key.replace(
f"layers.{original_layer_id}", f"layers.{local_layer_id}"
)
if (
"experts" in key
and "weight" in key
and "shared_experts" not in key
):
match = re.search(pattern, key)
global_expert_id = int(match.group(1))
if global_expert_id // num_local_experts != ep_rank:
continue
local_expert_id = global_expert_id % num_local_experts
key = key.replace(
f"weight{global_expert_id}", f"weight{local_expert_id}"
)
model_split[key] = value
save_state_dict(args, model_split, checkpoint_name)
# elif (
# args.tensor_model_parallel_size > 1
# and args.pipeline_model_parallel_size == 1
# and args.num_experts % args.expert_model_parallel_size == 0
# ):
# pattern = r"weight(\d+)"
# num_local_experts = args.num_experts // args.expert_model_parallel_size
# for tp_rank in range(args.tensor_model_parallel_size):
# for ep_rank in range(args.expert_model_parallel_size):
# model_split = {}
# if args.expert_model_parallel_size > 1:
# checkpoint_name = get_checkpoint_name(
# args.save,
# iteration=0,
# release=True,
# pipeline_parallel=None,
# tensor_rank=tp_rank,
# pipeline_rank=None,
# expert_parallel=True,
# expert_rank=ep_rank,
# )
# print(f"Saving tp_rank {tp_rank}, ep_rank {ep_rank} model to {checkpoint_name}")
# elif args.expert_model_parallel_size == 1:
# checkpoint_name = get_checkpoint_name(
# args.save,
# iteration=0,
# release=True,
# pipeline_parallel=None,
# tensor_rank=tp_rank,
# pipeline_rank=None,
# expert_parallel=False,
# )
# print(f"Saving tp_rank {tp_rank} model to {checkpoint_name}")
# for key, value in full_model.items():
# if not isinstance(value, torch.Tensor):
# model_split[key] = value
# elif "linear_q_proj" in key or "linear_q_a_proj" in key:
# seg = value.shape[0] // args.tensor_model_parallel_size
# target_value = value[seg * tp_rank : seg * (tp_rank + 1)]
# elif "linear_q_b_proj" in key:
# seg_0 = value.shape[0] // args.tensor_model_parallel_size
# seg_1 = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[
# seg_0 * tp_rank : seg_0 * (tp_rank + 1),
# seg_1 * tp_rank : seg_1 * (tp_rank + 1),
# ]
# elif "q_a_layernorm" in key:
# seg = value.shape[0] // args.tensor_model_parallel_size
# target_value = value[seg * tp_rank : seg * (tp_rank + 1)]
# elif "linear_kv_b_proj" in key:
# seg = value.shape[0] // args.tensor_model_parallel_size
# target_value = value[seg * tp_rank : seg * (tp_rank + 1)]
# elif "linear_proj" in key:
# seg = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[:, seg * tp_rank : seg * (tp_rank + 1)]
# elif "embedding" in key or "output_layer" in key:
# seg = value.shape[0] // args.tensor_model_parallel_size
# target_value = value[seg * tp_rank : seg * (tp_rank + 1)]
# elif "decoder.layers.0.mlp.linear_fc2" in key:
# seg = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[:, seg * tp_rank : seg * (tp_rank + 1)]
# elif "decoder.layers.0.mlp.linear_fc1" in key:
# viewed = value.view(-1, args.ffn_hidden_size, args.hidden_size)
# seg = args.ffn_hidden_size // args.tensor_model_parallel_size
# target_value = viewed[
# :, seg * tp_rank : seg * (tp_rank + 1), :
# ].reshape(-1, args.hidden_size)
# elif "local_experts" in key:
# expert_rank = int(re.findall(pattern, key)[0])
# if expert_rank // num_local_experts != ep_rank:
# continue
# expert_local_rank = expert_rank % num_local_experts
# if "linear_fc1" in key and "norm" not in key:
# viewed = value.view(
# -1, args.moe_ffn_hidden_size, args.hidden_size
# )
# seg = (
# args.moe_ffn_hidden_size
# // args.tensor_model_parallel_size
# )
# target_value = viewed[
# :, seg * tp_rank : seg * (tp_rank + 1), :
# ].reshape(-1, args.hidden_size)
# elif "linear_fc2" in key:
# seg = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[:, seg * tp_rank : seg * (tp_rank + 1)]
# key = key.replace(
# f"local_experts.{expert_rank}",
# f"local_experts.{expert_local_rank}",
# )
# elif "shared_expert" in key and "gate" not in key:
# if "linear_fc1" in key:
# viewed = value.view(
# -1,
# args.moe_ffn_hidden_size * args.num_shared_experts,
# args.hidden_size,
# )
# seg = (
# args.moe_ffn_hidden_size
# * args.num_shared_experts
# // args.tensor_model_parallel_size
# )
# target_value = viewed[
# :, seg * tp_rank : seg * (tp_rank + 1), :
# ].reshape(-1, args.hidden_size)
# elif "linear_fc2" in key:
# seg = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[:, seg * tp_rank : seg * (tp_rank + 1)]
# else:
# target_value = value
# model_split[key] = target_value
# save_state_dict(args, model_split, checkpoint_name)
else:
raise ValueError(
f"Unsupported model parallel configuration: "
f"TP={args.tensor_model_parallel_size}, "
f"PP={args.pipeline_model_parallel_size}, "
f"EP={args.expert_model_parallel_size}. "
f"Currently only supports TP=1 with PP>=1 and EP>=1 conversion."
)
print(f"Megatron model is saved to {args.save}")
def main():
initialize_megatron(extra_args_provider=add_extra_args)
args = get_args()
mg_model = model_provider()
hf_index_path = os.path.join(args.load, "model.safetensors.index.json")
convert_ckpt_from_hf_to_megatron(mg_model, hf_index_path, args)
save_mg_model(mg_model, args)
if __name__ == "__main__":
init_distributed_environment()
main()
| python | MIT | 1b93710746711003f029cbaa3608cae7637ec554 | 2026-01-05T07:14:05.433028Z | false |
yanring/Megatron-MoE-ModelZoo | https://github.com/yanring/Megatron-MoE-ModelZoo/blob/1b93710746711003f029cbaa3608cae7637ec554/ckpt_convert_scripts/DeepSeek-V3/deepseek_v3_hf_to_mg.py | ckpt_convert_scripts/DeepSeek-V3/deepseek_v3_hf_to_mg.py | import json
import os
import random
import re
import numpy as np
import torch
import torch.distributed as dist
from safetensors import safe_open
from megatron.training import get_args
from megatron.training.checkpointing import get_checkpoint_name
from megatron.training.initialize import initialize_megatron
from pretrain_gpt import model_provider
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True, warn_only=True)
def init_distributed_environment(backend="nccl", port="12355"):
"""Initialize the distributed environment for checkpoint conversion.
Args:
backend (str): Distributed backend ('nccl', 'gloo', or 'mpi'). Default: 'nccl'.
port (str): Port number for distributed communication. Default: '12355'.
"""
try:
# Set deterministic behavior
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
random.seed(1234)
np.random.seed(1234)
# Configure distributed environment
os.environ.update({"MASTER_ADDR": "localhost", "MASTER_PORT": port})
# Initialize process group
dist.init_process_group(
backend=backend, init_method="env://", world_size=1, rank=0
)
except Exception as e:
print(f"Failed to initialize distributed environment: {str(e)}")
raise
def add_extra_args(parser):
parser.add_argument("--target-tensor-model-parallel-size", type=int, default=1)
parser.add_argument("--target-pipeline-model-parallel-size", type=int, default=1)
parser.add_argument("--target-decoder-first-pipeline-num-layers", type=int, default=4)
parser.add_argument("--target-decoder-last-pipeline-num-layers", type=int, default=1)
parser.add_argument("--target-expert-model-parallel-size", type=int, default=1)
return parser
def load_tensor(weight_file, weight_map, args):
file_name = weight_map[weight_file]
ckpt_file_path = os.path.join(args.load, file_name)
with safe_open(ckpt_file_path, framework="pt", device=0) as f:
weight = f.get_tensor(weight_file)
if args.bf16:
return weight.bfloat16()
elif args.fp16:
return weight.float16()
else:
return weight
def convert_transformer_layer_from_hf(transformer_layer, hf_layer_idx, weight_map, args):
hf_layer = f"model.layers.{hf_layer_idx}"
# Input layernorm
transformer_layer.input_layernorm.weight.copy_(
load_tensor(f"{hf_layer}.input_layernorm.weight", weight_map, args)
)
# Multi-latent attention
if args.q_lora_rank is not None:
transformer_layer.self_attention.linear_q_down_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.q_a_proj.weight", weight_map, args)
)
transformer_layer.self_attention.linear_q_up_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.q_b_proj.weight", weight_map, args)
)
transformer_layer.self_attention.linear_q_up_proj.layer_norm_weight.copy_(
load_tensor(f"{hf_layer}.self_attn.q_a_layernorm.weight", weight_map, args)
)
else:
transformer_layer.self_attention.linear_q_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.q_proj.weight", weight_map, args)
)
transformer_layer.self_attention.linear_kv_down_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.kv_a_proj_with_mqa.weight", weight_map, args)
)
transformer_layer.self_attention.linear_kv_up_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.kv_b_proj.weight", weight_map, args)
)
transformer_layer.self_attention.linear_kv_up_proj.layer_norm_weight.copy_(
load_tensor(f"{hf_layer}.self_attn.kv_a_layernorm.weight", weight_map, args)
)
transformer_layer.self_attention.linear_proj.weight.copy_(
load_tensor(f"{hf_layer}.self_attn.o_proj.weight", weight_map, args)
)
# Dense layer
if hf_layer_idx <= 2:
transformer_layer.mlp.linear_fc1.layer_norm_weight.copy_(
load_tensor(f"{hf_layer}.post_attention_layernorm.weight", weight_map, args)
)
gate_proj = load_tensor(f"{hf_layer}.mlp.gate_proj.weight", weight_map, args)
up_proj = load_tensor(f"{hf_layer}.mlp.up_proj.weight", weight_map, args)
hf_fc1 = torch.cat([gate_proj, up_proj], dim=0)
transformer_layer.mlp.linear_fc1.weight.copy_(hf_fc1)
transformer_layer.mlp.linear_fc2.weight.copy_(
load_tensor(f"{hf_layer}.mlp.down_proj.weight", weight_map, args)
)
# MoE layer
else:
transformer_layer.pre_mlp_layernorm.weight.copy_(
load_tensor(f"{hf_layer}.post_attention_layernorm.weight", weight_map, args)
)
transformer_layer.mlp.router.weight.copy_(
load_tensor(f"{hf_layer}.mlp.gate.weight", weight_map, args)
)
transformer_layer.mlp.router.expert_bias.copy_(
load_tensor(f"{hf_layer}.mlp.gate.e_score_correction_bias", weight_map, args)
)
if args.moe_grouped_gemm:
for expert_idx in range(args.num_experts):
gate_proj = load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.gate_proj.weight", weight_map, args)
up_proj = load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.up_proj.weight", weight_map, args)
hf_expert_fc1 = torch.cat([gate_proj, up_proj], dim=0)
getattr(transformer_layer.mlp.experts.linear_fc1, f"weight{expert_idx}").copy_(hf_expert_fc1)
getattr(transformer_layer.mlp.experts.linear_fc2, f"weight{expert_idx}").copy_(
load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.down_proj.weight", weight_map, args)
)
else:
for expert_idx in range(args.num_experts):
gate_proj = load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.gate_proj.weight", weight_map, args)
up_proj = load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.up_proj.weight", weight_map, args)
hf_expert_fc1 = torch.cat([gate_proj, up_proj], dim=0)
expert = getattr(
transformer_layer.mlp.experts.local_experts, str(expert_idx)
)
expert.linear_fc1.weight.copy_(hf_expert_fc1)
expert.linear_fc2.weight.copy_(
load_tensor(f"{hf_layer}.mlp.experts.{expert_idx}.down_proj.weight", weight_map, args)
)
# Shared experts
shared_gate_proj = load_tensor(f"{hf_layer}.mlp.shared_experts.gate_proj.weight", weight_map, args)
shared_up_proj = load_tensor(f"{hf_layer}.mlp.shared_experts.up_proj.weight", weight_map, args)
shared_experts_fc1 = torch.cat([shared_gate_proj, shared_up_proj], dim=0)
transformer_layer.mlp.shared_experts.linear_fc1.weight.copy_(shared_experts_fc1)
transformer_layer.mlp.shared_experts.linear_fc2.weight.copy_(
load_tensor(f"{hf_layer}.mlp.shared_experts.down_proj.weight", weight_map, args)
)
def convert_ckpt_from_hf_to_mg(mg_model, hf_index_path, args):
print("Start copying")
if args.bf16:
mg_model = mg_model.bfloat16()
elif args.fp16:
mg_model = mg_model.float16()
# Load weight map
with open(hf_index_path, "r", encoding="utf-8") as f:
data = json.load(f)
weight_map = data["weight_map"]
with torch.no_grad():
mg_model.embedding.word_embeddings.weight.copy_(
load_tensor(f"model.embed_tokens.weight", weight_map, args)
)
# Main model
for mg_layer_idx, mg_layer in enumerate(mg_model.decoder.layers):
hf_layer_idx = mg_layer_idx
convert_transformer_layer_from_hf(mg_layer, hf_layer_idx, weight_map, args)
# Output layer
mg_model.decoder.final_layernorm.weight.copy_(
load_tensor("model.norm.weight", weight_map, args)
)
mg_model.output_layer.weight.copy_(
load_tensor("lm_head.weight", weight_map, args)
)
# MTP layer
if hasattr(mg_model, "mtp"):
for mtp_layer_idx, mtp_layer in enumerate(mg_model.mtp.layers):
print(f"Converting mtp layer {mtp_layer_idx}")
hf_layer_idx = mtp_layer_idx + args.num_layers
hf_layer = f"model.layers.{hf_layer_idx}"
convert_transformer_layer_from_hf(mtp_layer.transformer_layer, hf_layer_idx, weight_map, args)
mtp_layer.enorm.weight.copy_(
load_tensor(f"{hf_layer}.enorm.weight", weight_map, args)
)
mtp_layer.hnorm.weight.copy_(
load_tensor(f"{hf_layer}.hnorm.weight", weight_map, args)
)
mtp_layer.eh_proj.weight.copy_(
load_tensor(f"{hf_layer}.eh_proj.weight", weight_map, args)
)
mtp_layer.final_layernorm.weight.copy_(
load_tensor(f"{hf_layer}.shared_head.norm.weight", weight_map, args)
)
def save_state_dict(args, model, checkpoint_name):
state_dict = {
"args": args,
"checkpoint_version": 3.0,
"iteration": 0,
"model": model,
}
checkpoint_dir = os.path.dirname(checkpoint_name)
os.makedirs(checkpoint_dir, exist_ok=True)
print(f"Saving model checkpoint to: {checkpoint_name}")
torch.save(state_dict, checkpoint_name)
def save_mg_model(mg_model, args):
print("Start saving")
args.tensor_model_parallel_size = args.target_tensor_model_parallel_size
args.pipeline_model_parallel_size = args.target_pipeline_model_parallel_size
args.decoder_first_pipeline_num_layers = args.target_decoder_first_pipeline_num_layers
args.decoder_last_pipeline_num_layers = args.target_decoder_last_pipeline_num_layers
args.expert_model_parallel_size = args.target_expert_model_parallel_size
os.makedirs(args.save, exist_ok=True)
os.system("cp -rf " + args.load + "/config*.json " + args.save)
os.system("cp -rf " + args.load + "/tokenizer* " + args.save)
tracker_filepath = os.path.join(args.save, "latest_checkpointed_iteration.txt")
with open(tracker_filepath, "w") as f:
f.write("release")
full_model = mg_model.state_dict_for_save_checkpoint()
for key in list(full_model.keys()):
if full_model[key] is None: # or "_extra_state" in k:
full_model.pop(key)
if (
args.tensor_model_parallel_size == 1
and args.pipeline_model_parallel_size == 1
and args.expert_model_parallel_size == 1
):
checkpoint_name = get_checkpoint_name(
args.save,
iteration=0,
release=True,
)
save_state_dict(args, full_model, checkpoint_name)
elif (
args.tensor_model_parallel_size == 1
and args.pipeline_model_parallel_size == 1
and args.expert_model_parallel_size > 1
and args.num_experts % args.expert_model_parallel_size == 0
and not args.moe_grouped_gemm
):
pattern = r"local_experts\.(\d+)\."
num_local_experts = args.num_experts // args.expert_model_parallel_size
for ep_rank in range(args.expert_model_parallel_size):
model_split = {}
checkpoint_name = get_checkpoint_name(
args.save,
iteration=0,
release=True,
pipeline_parallel=None,
tensor_rank=None,
pipeline_rank=None,
expert_parallel=True,
expert_rank=ep_rank,
)
print(f"Saving ep_rank {ep_rank} model to {checkpoint_name}")
# Process model weights for current expert rank
for key, value in full_model.items():
if "local_experts" in key:
global_expert_id = int(re.findall(pattern, key)[0])
if global_expert_id // num_local_experts != ep_rank:
continue
local_expert_id = global_expert_id % num_local_experts
key = key.replace(
f"local_experts.{global_expert_id}",
f"local_experts.{local_expert_id}",
)
model_split[key] = value
save_state_dict(args, model_split, checkpoint_name)
elif (
args.tensor_model_parallel_size == 1
and args.pipeline_model_parallel_size == 1
and args.expert_model_parallel_size > 1
and args.num_experts % args.expert_model_parallel_size == 0
and args.moe_grouped_gemm
):
pattern = r"weight(\d+)"
num_local_experts = args.num_experts // args.expert_model_parallel_size
for ep_rank in range(args.expert_model_parallel_size):
model_split = {}
checkpoint_name = get_checkpoint_name(
args.save,
iteration=0,
release=True,
pipeline_parallel=None,
tensor_rank=None,
pipeline_rank=None,
expert_parallel=True,
expert_rank=ep_rank,
)
print(f"[GroupGEMM] Saving ep_rank {ep_rank} model to {checkpoint_name}")
# Process model weights for current expert rank
for key, value in full_model.items():
if "experts" in key and "weight" in key and "shared_experts" not in key:
match = re.search(pattern, key)
global_expert_id = int(match.group(1))
if global_expert_id // num_local_experts != ep_rank:
continue
local_expert_id = global_expert_id % num_local_experts
key = key.replace(
f"weight{global_expert_id}", f"weight{local_expert_id}"
)
model_split[key] = value
save_state_dict(args, model_split, checkpoint_name)
elif (
args.tensor_model_parallel_size == 1
and args.pipeline_model_parallel_size > 1
and args.num_experts % args.expert_model_parallel_size == 0
):
# Ensure layers can be evenly divided by pipeline model parallel size except for last pipeline stage
assert (
(args.num_layers - args.decoder_last_pipeline_num_layers)
% (args.pipeline_model_parallel_size - 1) == 0
)
layers_per_pipeline = (
(args.num_layers - args.decoder_last_pipeline_num_layers)
// (args.pipeline_model_parallel_size - 1)
)
pattern = r"weight(\d+)"
num_local_experts = args.num_experts // args.expert_model_parallel_size
for pp_rank in range(args.pipeline_model_parallel_size):
# Get the current range of layers for this pipeline stage
if pp_rank != args.pipeline_model_parallel_size - 1 :
pp_start = pp_rank * layers_per_pipeline
pp_end = pp_start + layers_per_pipeline - 1
else:
pp_start = pp_rank * layers_per_pipeline
pp_end = args.num_layers - 1
for ep_rank in range(args.expert_model_parallel_size):
model_split = {}
checkpoint_name = get_checkpoint_name(
args.save,
iteration=0,
release=True,
pipeline_parallel=True,
tensor_rank=None,
pipeline_rank=pp_rank,
expert_parallel=True,
expert_rank=ep_rank,
)
print(f"Saving pp_rank {pp_rank}, ep_rank {ep_rank} model to {checkpoint_name}")
for key, value in full_model.items():
# First and last pipeline stage
if "embedding" in key:
if pp_rank == 0 or pp_rank == args.pipeline_model_parallel_size - 1:
model_split[key] = value
continue
# Last pipeline stage
if "final_layernorm" in key or "output_layer" in key:
if pp_rank == args.pipeline_model_parallel_size - 1:
model_split[key] = value
continue
if "mtp" in key:
if pp_rank == args.pipeline_model_parallel_size - 1:
if (
"experts" in key
and "weight" in key
and "shared_experts" not in key
):
match = re.search(pattern, key)
global_expert_id = int(match.group(1))
if global_expert_id // num_local_experts != ep_rank:
continue
local_expert_id = global_expert_id % num_local_experts
key = key.replace(
f"weight{global_expert_id}", f"weight{local_expert_id}"
)
model_split[key] = value
continue
# Skip if the layer doesn't belong current pipeline stage
original_layer_id = int(key.split(".")[2])
if not pp_start <= original_layer_id <= pp_end:
continue
# Remap layer index for current pipeline stage
local_layer_id = original_layer_id % layers_per_pipeline
key = key.replace(
f"layers.{original_layer_id}", f"layers.{local_layer_id}"
)
if (
"experts" in key
and "weight" in key
and "shared_experts" not in key
):
match = re.search(pattern, key)
global_expert_id = int(match.group(1))
if global_expert_id // num_local_experts != ep_rank:
continue
local_expert_id = global_expert_id % num_local_experts
key = key.replace(
f"weight{global_expert_id}", f"weight{local_expert_id}"
)
model_split[key] = value
save_state_dict(args, model_split, checkpoint_name)
# elif (
# args.tensor_model_parallel_size > 1
# and args.pipeline_model_parallel_size == 1
# and args.num_experts % args.expert_model_parallel_size == 0
# ):
# pattern = r"weight(\d+)"
# num_local_experts = args.num_experts // args.expert_model_parallel_size
# for tp_rank in range(args.tensor_model_parallel_size):
# for ep_rank in range(args.expert_model_parallel_size):
# model_split = {}
# if args.expert_model_parallel_size > 1:
# checkpoint_name = get_checkpoint_name(
# args.save,
# iteration=0,
# release=True,
# pipeline_parallel=None,
# tensor_rank=tp_rank,
# pipeline_rank=None,
# expert_parallel=True,
# expert_rank=ep_rank,
# )
# print(f"Saving tp_rank {tp_rank}, ep_rank {ep_rank} model to {checkpoint_name}")
# elif args.expert_model_parallel_size == 1:
# checkpoint_name = get_checkpoint_name(
# args.save,
# iteration=0,
# release=True,
# pipeline_parallel=None,
# tensor_rank=tp_rank,
# pipeline_rank=None,
# expert_parallel=False,
# )
# print(f"Saving tp_rank {tp_rank} model to {checkpoint_name}")
# for key, value in full_model.items():
# if not isinstance(value, torch.Tensor):
# model_split[key] = value
# elif "linear_q_proj" in key or "linear_q_a_proj" in key:
# seg = value.shape[0] // args.tensor_model_parallel_size
# target_value = value[seg * tp_rank : seg * (tp_rank + 1)]
# elif "linear_q_b_proj" in key:
# seg_0 = value.shape[0] // args.tensor_model_parallel_size
# seg_1 = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[
# seg_0 * tp_rank : seg_0 * (tp_rank + 1),
# seg_1 * tp_rank : seg_1 * (tp_rank + 1),
# ]
# elif "q_a_layernorm" in key:
# seg = value.shape[0] // args.tensor_model_parallel_size
# target_value = value[seg * tp_rank : seg * (tp_rank + 1)]
# elif "linear_kv_b_proj" in key:
# seg = value.shape[0] // args.tensor_model_parallel_size
# target_value = value[seg * tp_rank : seg * (tp_rank + 1)]
# elif "linear_proj" in key:
# seg = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[:, seg * tp_rank : seg * (tp_rank + 1)]
# elif "embedding" in key or "output_layer" in key:
# seg = value.shape[0] // args.tensor_model_parallel_size
# target_value = value[seg * tp_rank : seg * (tp_rank + 1)]
# elif "decoder.layers.0.mlp.linear_fc2" in key:
# seg = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[:, seg * tp_rank : seg * (tp_rank + 1)]
# elif "decoder.layers.0.mlp.linear_fc1" in key:
# viewed = value.view(-1, args.ffn_hidden_size, args.hidden_size)
# seg = args.ffn_hidden_size // args.tensor_model_parallel_size
# target_value = viewed[
# :, seg * tp_rank : seg * (tp_rank + 1), :
# ].reshape(-1, args.hidden_size)
# elif "local_experts" in key:
# expert_rank = int(re.findall(pattern, key)[0])
# if expert_rank // num_local_experts != ep_rank:
# continue
# expert_local_rank = expert_rank % num_local_experts
# if "linear_fc1" in key and "norm" not in key:
# viewed = value.view(
# -1, args.moe_ffn_hidden_size, args.hidden_size
# )
# seg = (
# args.moe_ffn_hidden_size
# // args.tensor_model_parallel_size
# )
# target_value = viewed[
# :, seg * tp_rank : seg * (tp_rank + 1), :
# ].reshape(-1, args.hidden_size)
# elif "linear_fc2" in key:
# seg = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[:, seg * tp_rank : seg * (tp_rank + 1)]
# key = key.replace(
# f"local_experts.{expert_rank}",
# f"local_experts.{expert_local_rank}",
# )
# elif "shared_expert" in key and "gate" not in key:
# if "linear_fc1" in key:
# viewed = value.view(
# -1,
# args.moe_ffn_hidden_size * args.num_shared_experts,
# args.hidden_size,
# )
# seg = (
# args.moe_ffn_hidden_size
# * args.num_shared_experts
# // args.tensor_model_parallel_size
# )
# target_value = viewed[
# :, seg * tp_rank : seg * (tp_rank + 1), :
# ].reshape(-1, args.hidden_size)
# elif "linear_fc2" in key:
# seg = value.shape[1] // args.tensor_model_parallel_size
# target_value = value[:, seg * tp_rank : seg * (tp_rank + 1)]
# else:
# target_value = value
# model_split[key] = target_value
# save_state_dict(args, model_split, checkpoint_name)
else:
raise ValueError(
f"Unsupported model parallel configuration: "
f"TP={args.tensor_model_parallel_size}, "
f"PP={args.pipeline_model_parallel_size}, "
f"EP={args.expert_model_parallel_size}. "
f"Currently only supports TP=1 with PP>=1 and EP>=1 conversion."
)
print(f"Megatron model is saved to {args.save}")
def main():
initialize_megatron(extra_args_provider=add_extra_args)
args = get_args()
mg_model = model_provider()
hf_index_path = os.path.join(args.load, "model.safetensors.index.json")
convert_ckpt_from_hf_to_mg(mg_model, hf_index_path, args)
save_mg_model(mg_model, args)
if __name__ == "__main__":
init_distributed_environment()
main()
| python | MIT | 1b93710746711003f029cbaa3608cae7637ec554 | 2026-01-05T07:14:05.433028Z | false |
NVlabs/TokenBench | https://github.com/NVlabs/TokenBench/blob/dcbb7d587abe2804da4d23ac46f9caf17f4b641c/token_bench/metrics_cli.py | token_bench/metrics_cli.py | # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to compute different metrics for tokenizer evaluation.
Assumes the reconstructed and ground truth folders contain the same number
of videos with the filenames. Compute PSNR, SSIM, LPIPS, and FVD.
Example for MP4 videos:
python3 -m token_bench.metrics_cli \
--mode=all \
--ext=mp4 \
--gtpath <folder to ground-truth videos> \
--targetpath <folder to reconstruction videos>
For images, set the ext to "png" or "jpg".
"""
import argparse
import os
from typing import Callable
import json
import lpips
import numpy as np
import torch
from skimage.metrics import structural_similarity as ssim
from tqdm import tqdm
from glob import glob
from mediapy import read_video
from token_bench.fvd import FVD
_FLOAT32_EPS = np.finfo(np.float32).eps
_UINT8_MAX_F = float(np.iinfo(np.uint8).max)
parser = argparse.ArgumentParser()
parser.add_argument(
"--gtpath",
type=str,
required=True,
help="path/to/eval/videos/<dataset-name>/",
)
parser.add_argument(
"--targetpath",
type=str,
default=None,
help="path/to/eval/videos/<dataset-name>/<target-folder>",
)
parser.add_argument("--mode", type=str, choices=["psnr", "lpips", "fvd", "all"])
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--ext", type=str, default="mp4")
args = parser.parse_args()
def PSNR(input0: np.ndarray, input1: np.ndarray) -> float:
"""Compute PSNR between two videos or two images.
Args:
input0: The first video or image, of shape [..., H, W, C], of [0..255].
input1: The second video or image, of shape [..., H, W, C], of [0..255].
Returns:
The PSNR value.
"""
assert input0.shape == input1.shape, "inputs should have the same shape"
mse = ((input0 - input1) ** 2).mean()
psnr = 20 * np.log10(_UINT8_MAX_F / (np.sqrt(mse) + _FLOAT32_EPS))
return psnr.item()
def SSIM(input0: np.ndarray, input1: np.ndarray) -> float:
"""Compute SSIM between two videos or two images.
Args:
input0: The first video or image, of shape [..., H, W, C], of [0..255].
input1: The second video or image, of shape [..., H, W, C], of [0..255].
Returns:
The SSIM value.
"""
assert input0.shape == input1.shape, "inputs should have the same shape"
if input0.ndim == 3:
input0, input1 = np.array([input0]), np.array([input1])
ssim_values = []
from concurrent.futures import ThreadPoolExecutor
def compute_ssim(pair):
one_image0, one_image1 = pair
return ssim(
one_image0,
one_image1,
data_range=_UINT8_MAX_F,
multichannel=True,
channel_axis=-1,
)
with ThreadPoolExecutor() as executor:
ssim_values = list(executor.map(compute_ssim, zip(input0, input1)))
return np.mean(ssim_values)
def LPIPS(input0: np.ndarray, input1: np.ndarray, loss_fn_vgg: Callable) -> float:
"""Compute LPIPS between two videos or two images.
Args:
input0: The first video or image, of shape [..., H, W, C], of [0..255].
input1: The second video or image, of shape [..., H, W, C], of [0..255].
loss_fn_vgg: The LPIPS loss function.
device: The device to run the computation.
Returns:
The LPIPS value.
"""
assert input0.shape == input1.shape, "inputs should have the same shape"
if input0.ndim == 3:
input0, input1 = np.array([input0]), np.array([input1])
# computing LPIPS needs to normalize input to [-1,1].
input0 = torch.from_numpy(2 * (input0 / _UINT8_MAX_F - 0.5)).to(torch.float32)
input1 = torch.from_numpy(2 * (input1 / _UINT8_MAX_F - 0.5)).to(torch.float32)
input0 = input0.permute(0, 3, 1, 2) # N, C, H, W
input1 = input1.permute(0, 3, 1, 2) # N, C, H, W
# average LPIPS over all frames
results = []
for one_input0, one_input1 in zip(input0, input1):
fm0 = one_input0.unsqueeze(0).to(args.device)
fm1 = one_input1.unsqueeze(0).to(args.device)
res = loss_fn_vgg(fm0, fm1).item()
results.append(res)
return np.mean(results)
def main_psnr_ssim() -> None:
vfiles0 = sorted(list(set(glob(str(f"{args.gtpath}/*.{args.ext}")))))
vfiles1 = sorted(list(set(glob(str(f"{args.targetpath}/*.{args.ext}")))))
psnr_filename = f"{args.targetpath}/psnr.csv"
ssim_filename = f"{args.targetpath}/ssim.csv"
if os.path.exists(psnr_filename) and os.path.exists(ssim_filename):
print(f"{psnr_filename} already exists. Recomputing ...")
print(f"{ssim_filename} already exists. Recomputing ...")
assert len(vfiles0) == len(vfiles1), "number of media files must match"
print(f"Calculating PSNR on {len(vfiles0)} pairs ...")
psnr_values, ssim_values = list(), list()
for input0_file, input1_file in tqdm(zip(vfiles0, vfiles1)):
assert (
input0_file.split("/")[-1] == input1_file.split("/")[-1]
), "file names must match"
input0 = read_video(input0_file).astype(np.float32)
input1 = read_video(input1_file).astype(np.float32)
name = input0_file.split("/")[-1]
psnr_value = PSNR(input0, input1)
ssim_value = SSIM(input0, input1)
psnr_values.append([name, psnr_value])
ssim_values.append([name, ssim_value])
print(f"{name} PSNR: {psnr_value}, SSIM: {ssim_value}")
print(f"mean PSNR: {np.mean([el[-1] for el in psnr_values])}")
print(f"mean SSIM: {np.mean([el[-1] for el in ssim_values])}")
with open(psnr_filename, "w") as fw:
json.dump(psnr_values, fw)
with open(ssim_filename, "w") as fw:
json.dump(ssim_values, fw)
def main_lpips() -> None:
loss_fn_vgg = lpips.LPIPS(net="vgg").to(args.device).eval()
vfiles0 = sorted(list(set(glob(str(f"{args.gtpath}/*.{args.ext}")))))
vfiles1 = sorted(list(set(glob(str(f"{args.targetpath}/*.{args.ext}")))))
lpips_filename = f"{args.targetpath}/lpips.csv"
if os.path.exists(lpips_filename):
print(f"{lpips_filename} already exists. Recomputing ...")
assert len(vfiles0) == len(vfiles1), "video files not match"
print(f"Calculating LPIPS on {len(vfiles1)} pairs ...")
lpips_values = list()
for i in tqdm(range(len(vfiles0))):
vid0 = read_video(vfiles0[i])
vid1 = read_video(vfiles1[i])
name = vfiles0[i].split("/")[-1]
lpips_value = LPIPS(vid0, vid1, loss_fn_vgg)
lpips_values.append([name, lpips_value])
print(f"mean LPIPS: {np.mean([el[-1] for el in lpips_values])}")
with open(lpips_filename, "w") as fw:
json.dump(lpips_values, fw)
def main_fvd(max_n_frame: int = 300) -> None:
fvd_model = FVD("styleganv").to(args.device).double()
vfiles0 = sorted(list(set(glob(str(f"{args.gtpath}/*.{args.ext}")))))
vfiles1 = sorted(list(set(glob(str(f"{args.targetpath}/*.{args.ext}")))))
fvd_filename = f"{args.targetpath}/fvd.csv"
if os.path.exists(fvd_filename):
print(f"{fvd_filename} already exists. Recomputing ...")
fvd_model.reset()
assert len(vfiles0) == len(vfiles1), "video files not match"
print(f"Calculating FVD on {len(vfiles1)} pairs ...")
for i in tqdm(range(len(vfiles0))):
vid0 = read_video(vfiles0[i])[:max_n_frame]
vid1 = read_video(vfiles1[i])[:max_n_frame]
if vid0.ndim == 3:
vid0, vid1 = np.array([vid0]), np.array([vid1])
vid0 = torch.from_numpy(vid0 / 255.0).to(args.device).float()
vid1 = torch.from_numpy(vid1 / 255.0).to(args.device).float()
vid0 = vid0.permute(3, 0, 1, 2).unsqueeze(0)
vid1 = vid1.permute(3, 0, 1, 2).unsqueeze(0)
fvd_model.update_real_fake_batch(vid0, vid1)
fvd = fvd_model.compute().item()
print(f"FVD: {fvd}")
with open(fvd_filename, "w") as fw:
json.dump([fvd], fw)
if __name__ == "__main__":
if args.mode.lower() == "psnr" or args.mode.lower() == "all":
main_psnr_ssim()
if args.mode.lower() == "lpips" or args.mode.lower() == "all":
main_lpips()
if args.mode.lower() == "fvd" or args.mode.lower() == "all":
main_fvd()
| python | Apache-2.0 | dcbb7d587abe2804da4d23ac46f9caf17f4b641c | 2026-01-05T07:14:06.639305Z | false |
NVlabs/TokenBench | https://github.com/NVlabs/TokenBench/blob/dcbb7d587abe2804da4d23ac46f9caf17f4b641c/token_bench/fvd.py | token_bench/fvd.py | # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from copy import deepcopy
from typing import Optional, Sequence, Union
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module
from torchmetrics.image.fid import _compute_fid
from torchmetrics.metric import Metric
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
_STYLEGAN_TORCHSCRIPT_CKPT = (
"./pretrained_ckpts/opensora/eval/fvd/styleganv/i3d_torchscript.pt"
)
def preprocess_single(video, resolution=224, sequence_length=None):
# video: CTHW, [0, 1]
c, t, h, w = video.shape
# temporal crop
if sequence_length is not None:
assert sequence_length <= t
video = video[:, :sequence_length]
# scale shorter side to resolution
scale = resolution / min(h, w)
if h < w:
target_size = (resolution, math.ceil(w * scale))
else:
target_size = (math.ceil(h * scale), resolution)
video = F.interpolate(video, size=target_size, mode="bilinear", align_corners=False)
# center crop
c, t, h, w = video.shape
w_start = (w - resolution) // 2
h_start = (h - resolution) // 2
video = video[:, :, h_start : h_start + resolution, w_start : w_start + resolution]
# [0, 1] -> [-1, 1]
video = (video - 0.5) * 2
return video.contiguous()
class StyleGANvFeatureExtractor(Module):
def __init__(self):
super().__init__()
self.model = torch.jit.load(_STYLEGAN_TORCHSCRIPT_CKPT)
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
@torch.no_grad()
def forward(self, x):
detector_kwargs = dict(
rescale=False, resize=False, return_features=True
) # Return raw features before the softmax layer.
return self.model(
torch.stack([preprocess_single(video) for video in x]), **detector_kwargs
)
class FVD(Metric):
r"""
Frechet Video Distance (FVD) is a metric to evaluate the quality of video generation models.
As input to ``forward`` and ``update`` the metric accepts the following input
- ``videos`` (:class:`~torch.Tensor`): tensor with images feed to the feature extractor with. [0, 1]
- ``real`` (:class:`~bool`): bool indicating if ``videos`` belong to the real or the fake distribution
As output of `forward` and `compute` the metric returns the following output
- ``fvd`` (:class:`~torch.Tensor`): float scalar tensor with mean FVD value over samples
Example:
>>> import torch
>>> torch.manual_seed(123)
>>> NUMBER_OF_VIDEOS = 8
>>> VIDEO_LENGTH = 50
>>> CHANNEL = 3
>>> SIZE = 64
>>> videos1 = torch.zeros(NUMBER_OF_VIDEOS, CHANNEL, VIDEO_LENGTH, SIZE, SIZE, requires_grad=False).cuda()
>>> videos2 = torch.ones(NUMBER_OF_VIDEOS, CHANNEL, VIDEO_LENGTH, SIZE, SIZE, requires_grad=False).cuda()
>>> metric = FVD().cuda()
>>> metric.update(videos1, real=True)
>>> metric.update(videos2, real=False)
>>> metric.compute()
>>> tensor(232.7575)
"""
higher_is_better: bool = False
is_differentiable: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
real_features_sum: Tensor
real_features_cov_sum: Tensor
real_features_num_samples: Tensor
fake_features_sum: Tensor
fake_features_cov_sum: Tensor
fake_features_num_samples: Tensor
feature_extractor: Module
extractor_option: str = "styleganv"
def __init__(
self,
feature_extractor: Union[str, Module] = "styleganv",
real_feature_stats: Optional[str] = None,
reset_real_features: bool = True,
**kwargs,
):
super().__init__(**kwargs)
if isinstance(feature_extractor, str):
# assert feature_extractor == 'styleganv', 'Only StyleGAN video is supported for now'
if feature_extractor.lower() == "styleganv":
self.feature_extractor = StyleGANvFeatureExtractor()
else:
raise NotImplementedError(
"Only StyleGANv and inceptionI3d are supported for now"
)
num_features = 400
else:
raise NotImplementedError()
mx_num_feats = (num_features, num_features)
self.add_state(
"real_features_sum",
torch.zeros(num_features).double(),
dist_reduce_fx="sum",
)
self.add_state(
"real_features_cov_sum",
torch.zeros(mx_num_feats).double(),
dist_reduce_fx="sum",
)
self.add_state(
"real_features_num_samples", torch.tensor(0).long(), dist_reduce_fx="sum"
)
self.add_state(
"fake_features_sum",
torch.zeros(num_features).double(),
dist_reduce_fx="sum",
)
self.add_state(
"fake_features_cov_sum",
torch.zeros(mx_num_feats).double(),
dist_reduce_fx="sum",
)
self.add_state(
"fake_features_num_samples", torch.tensor(0).long(), dist_reduce_fx="sum"
)
self.reset_real_features = reset_real_features
self.reuse_real_stats = real_feature_stats is not None
if self.reuse_real_stats:
raise NotImplementedError()
def update(self, videos: Tensor, real: bool) -> None:
features = self.feature_extractor(videos)
self.orig_dtype = features.dtype
features = features.double()
if features.dim() == 1:
features = features.unsqueeze(0)
if real:
self.real_features_sum += features.sum(dim=0)
self.real_features_cov_sum += features.t().mm(features)
self.real_features_num_samples += videos.shape[0]
else:
self.fake_features_sum += features.sum(dim=0)
self.fake_features_cov_sum += features.t().mm(features)
self.fake_features_num_samples += videos.shape[0]
def update_real_fake_batch(self, real_video: Tensor, fake_video: Tensor) -> None:
self.update(real_video, real=True)
self.update(fake_video, real=False)
def compute_fvd_from_features(
self, real_features: Tensor, fake_features: Tensor
) -> float:
real_features = real_features.double()
fake_features = fake_features.double()
real_features_sum = real_features.sum(dim=0)
real_features_cov_sum = real_features.t().mm(real_features)
real_features_num_samples = real_features.shape[0]
fake_features_sum = fake_features.sum(dim=0)
fake_features_cov_sum = fake_features.t().mm(fake_features)
fake_features_num_samples = fake_features.shape[0]
if real_features_num_samples < 2 or fake_features_num_samples < 2:
raise RuntimeError(
"More than one sample is required for both the real and fake distributed to compute FID"
)
mean_real = (real_features_sum / real_features_num_samples).unsqueeze(0)
mean_fake = (fake_features_sum / fake_features_num_samples).unsqueeze(0)
cov_real_num = (
real_features_cov_sum
- real_features_num_samples * mean_real.t().mm(mean_real)
)
cov_real = cov_real_num / (real_features_num_samples - 1)
cov_fake_num = (
fake_features_cov_sum
- fake_features_num_samples * mean_fake.t().mm(mean_fake)
)
cov_fake = cov_fake_num / (fake_features_num_samples - 1)
return (
_compute_fid(mean_real.squeeze(0), cov_real, mean_fake.squeeze(0), cov_fake)
.float()
.item()
)
def compute(self) -> Tensor:
"""Calculate FID score based on accumulated extracted features from the two distributions."""
if self.real_features_num_samples < 2 or self.fake_features_num_samples < 2:
raise RuntimeError(
"More than one sample is required for both the real and fake distributed to compute FID"
)
mean_real = (self.real_features_sum / self.real_features_num_samples).unsqueeze(
0
)
mean_fake = (self.fake_features_sum / self.fake_features_num_samples).unsqueeze(
0
)
cov_real_num = (
self.real_features_cov_sum
- self.real_features_num_samples * mean_real.t().mm(mean_real)
)
cov_real = cov_real_num / (self.real_features_num_samples - 1)
cov_fake_num = (
self.fake_features_cov_sum
- self.fake_features_num_samples * mean_fake.t().mm(mean_fake)
)
cov_fake = cov_fake_num / (self.fake_features_num_samples - 1)
return _compute_fid(
mean_real.squeeze(0), cov_real, mean_fake.squeeze(0), cov_fake
).to(self.orig_dtype)
def reset(self) -> None:
"""Reset metric states."""
if not self.reset_real_features:
real_features_sum = deepcopy(self.real_features_sum)
real_features_cov_sum = deepcopy(self.real_features_cov_sum)
real_features_num_samples = deepcopy(self.real_features_num_samples)
super().reset()
self.real_features_sum = real_features_sum
self.real_features_cov_sum = real_features_cov_sum
self.real_features_num_samples = real_features_num_samples
else:
super().reset()
def plot(
self,
val: Optional[Union[Tensor, Sequence[Tensor]]] = None,
ax: Optional[_AX_TYPE] = None,
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
"""
return self._plot(val, ax)
| python | Apache-2.0 | dcbb7d587abe2804da4d23ac46f9caf17f4b641c | 2026-01-05T07:14:06.639305Z | false |
NVlabs/TokenBench | https://github.com/NVlabs/TokenBench/blob/dcbb7d587abe2804da4d23ac46f9caf17f4b641c/token_bench/video/preprocessing_script.py | token_bench/video/preprocessing_script.py | # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import imageio
import numpy as np
from glob import glob
import mediapy as media
def resize_video(video: np.ndarray, short_size: int = None) -> np.ndarray:
"""Resizes a video to have the short side of `short_size`."""
if short_size is None:
return video
height, width = video.shape[-3:-1]
if height <= width:
height_new, width_new = short_size, int(width * short_size / height + 0.5)
else:
height_new, width_new = int(height * short_size / width + 0.5), short_size
return media.resize_video(video, shape=(height_new, width_new))
raw_video_dir = "/root/dataset"
input_pattern = raw_video_dir + "/%s/*.%s"
benchmarks = ["bdd_100", "egoexo4D", "panda", "bridgev2"]
exts = ["mov", "mp4", "mp4", "mp4"]
for benchmark, ext in zip(benchmarks, exts):
input_files = sorted(glob(str(input_pattern % (benchmark, ext))))
print(
"Processing", len(input_files), "videos for", input_pattern % (benchmark, ext)
)
for jdx, video_file in enumerate(input_files):
video_reader = imageio.get_reader(video_file, ext)
video_frames = []
for frame in video_reader:
video_frames.append(frame)
input_video, meta_data = np.array(video_frames), video_reader.get_meta_data()
video_fps = meta_data["fps"]
video_duration = meta_data["duration"]
input_video = np.array(input_video)
T, H, W, C = input_video.shape
print("loaded", video_file, "with", (T, H, W))
# clip the videos to 10 seconds if they are longer
num_frame_thres = max(int(np.ceil(video_fps * 10)), 300)
output_video = (
input_video[:num_frame_thres] if T > num_frame_thres else input_video
)
del input_video
# resize the videos to 1080p if needed
output_video = (
resize_video(output_video, 1080) if min(H, W) > 1080 else output_video
)
print((T, H, W, C), "resized to", output_video.shape)
video_file_tokenbench = video_file.replace(
f"/dataset/{benchmark}/", f"/dataset/tokenbench/{benchmark}_"
).replace(f".{ext}", ".mp4")
os.makedirs(os.path.dirname(video_file_tokenbench), exist_ok=True)
print("writing to ...", video_file_tokenbench)
media.write_video(video_file_tokenbench, output_video, fps=video_fps)
del output_video
| python | Apache-2.0 | dcbb7d587abe2804da4d23ac46f9caf17f4b641c | 2026-01-05T07:14:06.639305Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/setup.py | setup.py | from __future__ import annotations
import os
from setuptools import setup
dependencies = [
"aiofiles==22.1.0", # Async IO for files
"blspy==1.0.16", # Signature library
"chiavdf==1.0.7", # timelord and vdf verification
"chiabip158==1.1", # bip158-style wallet filters
"chiapos==1.0.11", # proof of space
"clvm==0.9.7",
"clvm_tools==0.4.5", # Currying, Program.to, other conveniences
"chia_rs==0.1.14",
"clvm-tools-rs==0.1.24", # Rust implementation of clvm_tools' compiler
"aiohttp==3.8.3", # HTTP server for full node rpc
"aiosqlite==0.17.0", # asyncio wrapper for sqlite, to store blocks
"bitstring==3.1.9", # Binary data management library
"colorama==0.4.5", # Colorizes terminal output
"colorlog==6.7.0", # Adds color to logs
"concurrent-log-handler==0.9.20", # Concurrently log and rotate logs
"cryptography==36.0.2", # Python cryptography library for TLS - keyring conflict
"filelock==3.8.0", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)
"keyring==23.6.0", # Store keys in MacOS Keychain, Windows Credential Locker
"keyrings.cryptfile==1.3.4", # Secure storage for keys on Linux (Will be replaced)
# "keyrings.cryptfile==1.3.8", # Secure storage for keys on Linux (Will be replaced)
# See https://github.com/frispete/keyrings.cryptfile/issues/15
"PyYAML==6.0", # Used for config file format
"setproctitle==1.2.3", # Gives the flax processes readable names
"sortedcontainers==2.4.0", # For maintaining sorted mempools
"click==8.1.3", # For the CLI
"dnspython==2.2.1", # Query DNS seeds
"watchdog==2.1.9", # Filesystem event watching - watches keyring.yaml
"dnslib==0.9.22", # dns lib
"typing-extensions==4.3.0", # typing backports like Protocol and TypedDict
"zstd==1.5.2.6",
"packaging==21.3",
"psutil==5.9.1",
]
upnp_dependencies = [
"miniupnpc==2.2.2", # Allows users to open ports on their router
]
dev_dependencies = [
"build",
"coverage",
"diff-cover",
"pre-commit",
"py3createtorrent",
"pylint",
"pytest",
"pytest-asyncio>=0.18.1", # require attribute 'fixture'
"pytest-cov",
"pytest-monitor; sys_platform == 'linux'",
"pytest-xdist",
"twine",
"isort",
"flake8",
"mypy",
"black==22.8.0",
"aiohttp_cors", # For blackd
"ipython", # For asyncio debugging
"pyinstaller==5.3",
"types-aiofiles",
"types-cryptography",
"types-pkg_resources",
"types-pyyaml",
"types-setuptools",
]
kwargs = dict(
name="flax-blockchain",
description="Flax blockchain full node, farmer, timelord, and wallet.",
url="https://flaxnetwork.org/",
license="Apache License",
python_requires=">=3.7, <4",
keywords="flax blockchain node",
install_requires=dependencies,
extras_require=dict(
uvloop=["uvloop"],
dev=dev_dependencies,
upnp=upnp_dependencies,
),
packages=[
"build_scripts",
"flax",
"flax.cmds",
"flax.clvm",
"flax.consensus",
"flax.daemon",
"flax.data_layer",
"flax.full_node",
"flax.timelord",
"flax.farmer",
"flax.harvester",
"flax.introducer",
"flax.plot_sync",
"flax.plotters",
"flax.plotting",
"flax.pools",
"flax.protocols",
"flax.rpc",
"flax.seeder",
"flax.server",
"flax.simulator",
"flax.types.blockchain_format",
"flax.types",
"flax.util",
"flax.wallet",
"flax.wallet.db_wallet",
"flax.wallet.puzzles",
"flax.wallet.cat_wallet",
"flax.wallet.did_wallet",
"flax.wallet.nft_wallet",
"flax.wallet.settings",
"flax.wallet.trading",
"flax.wallet.util",
"flax.ssl",
"mozilla-ca",
],
entry_points={
"console_scripts": [
"flax = flax.cmds.flax:main",
"flax_daemon = flax.daemon.server:main",
"flax_wallet = flax.server.start_wallet:main",
"flax_full_node = flax.server.start_full_node:main",
"flax_harvester = flax.server.start_harvester:main",
"flax_farmer = flax.server.start_farmer:main",
"flax_introducer = flax.server.start_introducer:main",
"flax_crawler = flax.seeder.start_crawler:main",
"flax_seeder = flax.seeder.dns_server:main",
"flax_timelord = flax.server.start_timelord:main",
"flax_timelord_launcher = flax.timelord.timelord_launcher:main",
"flax_full_node_simulator = flax.simulator.start_simulator:main",
"flax_data_layer = flax.server.start_data_layer:main",
"flax_data_layer_http = flax.data_layer.data_layer_server:main",
]
},
package_data={
"flax": ["pyinstaller.spec"],
"": ["*.clvm", "*.clvm.hex", "*.clib", "*.clinc", "*.clsp", "py.typed"],
"flax.util": ["initial-*.yaml", "english.txt"],
"flax.ssl": ["flax_ca.crt", "flax_ca.key", "dst_root_ca.pem"],
"mozilla-ca": ["cacert.pem"],
},
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
project_urls={
"Source": "https://github.com/Flax-Network/flax-blockchain/",
"Changelog": "https://github.com/Flax-Network/flax-blockchain/blob/main/CHANGELOG.md",
},
)
if len(os.environ.get("FLAX_SKIP_SETUP", "")) < 1:
setup(**kwargs) # type: ignore
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/activated.py | activated.py | #!/usr/bin/env python3
from __future__ import annotations
import os
import pathlib
import subprocess
import sys
here = pathlib.Path(__file__).parent
def main(*args: str) -> int:
if len(args) == 0:
print("Parameters required")
return 1
if sys.platform == "win32":
script = "activated.ps1"
command = ["powershell", os.fspath(here.joinpath(script)), *args]
else:
script = "activated.sh"
command = ["sh", os.fspath(here.joinpath(script)), *args]
completed_process = subprocess.run(command)
return completed_process.returncode
sys.exit(main(*sys.argv[1:]))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/installhelper.py | installhelper.py | #
# Install helper code to manage inserting the correct version for the GUI
# Gets the version from the result of "flax version"
# Converts to proper symver format so NPM doesn't complain
# Adds the version info to the package.json file
#
from __future__ import annotations
import json
import os
import subprocess
from os.path import exists
from pkg_resources import parse_version
#
# The following function is borrowed from
# https://github.com/inveniosoftware/invenio-assets/blob/maint-1.0/invenio_assets/npm.py
# Copyright (C) 2015-2018 CERN.
#
def make_semver(version_str: str) -> str:
v = parse_version(version_str)
major = v._version.release[0]
try:
minor = v._version.release[1]
except IndexError:
minor = 0
try:
patch = v._version.release[2]
except IndexError:
patch = 0
prerelease = []
if v._version.pre:
prerelease.append("".join(str(x) for x in v._version.pre))
if v._version.dev:
prerelease.append("".join(str(x) for x in v._version.dev))
local = v.local
version = "{0}.{1}.{2}".format(major, minor, patch)
if prerelease:
version += "-{0}".format(".".join(prerelease))
if local:
version += "+{0}".format(local)
return version
def get_flax_version() -> str:
version: str = "0.0"
output = subprocess.run(["flax", "version"], capture_output=True)
if output.returncode == 0:
version = str(output.stdout.strip(), "utf-8").splitlines()[-1]
return make_semver(version)
def update_version(package_json_path: str):
if not exists(package_json_path):
return
with open(package_json_path) as f:
data = json.load(f)
data["version"] = get_flax_version()
with open(package_json_path, "w") as w:
json.dump(data, indent=4, fp=w)
if __name__ == "__main__":
update_version(f"{os.path.dirname(__file__)}/flax-blockchain-gui/package.json")
update_version(f"{os.path.dirname(__file__)}/flax-blockchain-gui/packages/gui/package.json")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/__init__.py | flax/__init__.py | from __future__ import annotations
from pkg_resources import DistributionNotFound, get_distribution, resource_filename
try:
__version__ = get_distribution("flax-blockchain").version
except DistributionNotFound:
# package is not installed
__version__ = "unknown"
PYINSTALLER_SPEC_PATH = resource_filename("flax", "pyinstaller.spec")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/condition_tools.py | flax/util/condition_tools.py | from typing import Dict, List, Optional, Tuple
from clvm.casts import int_from_bytes
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program, SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32, bytes48
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.condition_with_args import ConditionWithArgs
from flax.util.errors import ConsensusError, Err
from flax.util.ints import uint64
from flax.types.spend_bundle_conditions import SpendBundleConditions
# TODO: review each `assert` and consider replacing with explicit checks
# since asserts can be stripped with python `-OO` flag
def parse_sexp_to_condition(
sexp: Program,
) -> Tuple[Optional[Err], Optional[ConditionWithArgs]]:
"""
Takes a FlaxLisp sexp and returns a ConditionWithArgs.
If it fails, returns an Error
"""
as_atoms = sexp.as_atom_list()
if len(as_atoms) < 1:
return Err.INVALID_CONDITION, None
opcode = as_atoms[0]
opcode = ConditionOpcode(opcode)
return None, ConditionWithArgs(opcode, as_atoms[1:])
def parse_sexp_to_conditions(
sexp: Program,
) -> Tuple[Optional[Err], Optional[List[ConditionWithArgs]]]:
"""
Takes a FlaxLisp sexp (list) and returns the list of ConditionWithArgss
If it fails, returns as Error
"""
results: List[ConditionWithArgs] = []
try:
for _ in sexp.as_iter():
error, cvp = parse_sexp_to_condition(_)
if error:
return error, None
results.append(cvp) # type: ignore # noqa
except ConsensusError:
return Err.INVALID_CONDITION, None
return None, results
def conditions_by_opcode(
conditions: List[ConditionWithArgs],
) -> Dict[ConditionOpcode, List[ConditionWithArgs]]:
"""
Takes a list of ConditionWithArgss(CVP) and return dictionary of CVPs keyed of their opcode
"""
d: Dict[ConditionOpcode, List[ConditionWithArgs]] = {}
cvp: ConditionWithArgs
for cvp in conditions:
if cvp.opcode not in d:
d[cvp.opcode] = list()
d[cvp.opcode].append(cvp)
return d
def pkm_pairs(conditions: SpendBundleConditions, additional_data: bytes) -> Tuple[List[bytes48], List[bytes]]:
ret: Tuple[List[bytes48], List[bytes]] = ([], [])
for pk, msg in conditions.agg_sig_unsafe:
ret[0].append(bytes48(pk))
ret[1].append(msg)
for spend in conditions.spends:
for pk, msg in spend.agg_sig_me:
ret[0].append(bytes48(pk))
ret[1].append(msg + spend.coin_id + additional_data)
return ret
def pkm_pairs_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]], coin_name: bytes32, additional_data: bytes
) -> List[Tuple[bytes48, bytes]]:
assert coin_name is not None
ret: List[Tuple[bytes48, bytes]] = []
for cwa in conditions_dict.get(ConditionOpcode.AGG_SIG_UNSAFE, []):
assert len(cwa.vars) == 2
assert len(cwa.vars[0]) == 48 and len(cwa.vars[1]) <= 1024
assert cwa.vars[0] is not None and cwa.vars[1] is not None
ret.append((bytes48(cwa.vars[0]), cwa.vars[1]))
for cwa in conditions_dict.get(ConditionOpcode.AGG_SIG_ME, []):
assert len(cwa.vars) == 2
assert len(cwa.vars[0]) == 48 and len(cwa.vars[1]) <= 1024
assert cwa.vars[0] is not None and cwa.vars[1] is not None
ret.append((bytes48(cwa.vars[0]), cwa.vars[1] + coin_name + additional_data))
return ret
def created_outputs_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
input_coin_name: bytes32,
) -> List[Coin]:
output_coins = []
for cvp in conditions_dict.get(ConditionOpcode.CREATE_COIN, []):
puzzle_hash, amount_bin = cvp.vars[0], cvp.vars[1]
amount = int_from_bytes(amount_bin)
coin = Coin(input_coin_name, bytes32(puzzle_hash), uint64(amount))
output_coins.append(coin)
return output_coins
def conditions_dict_for_solution(
puzzle_reveal: SerializedProgram,
solution: SerializedProgram,
max_cost: int,
) -> Tuple[Optional[Err], Optional[Dict[ConditionOpcode, List[ConditionWithArgs]]], uint64]:
error, result, cost = conditions_for_solution(puzzle_reveal, solution, max_cost)
if error or result is None:
return error, None, uint64(0)
return None, conditions_by_opcode(result), cost
def conditions_for_solution(
puzzle_reveal: SerializedProgram,
solution: SerializedProgram,
max_cost: int,
) -> Tuple[Optional[Err], Optional[List[ConditionWithArgs]], uint64]:
# get the standard script for a puzzle hash and feed in the solution
try:
cost, r = puzzle_reveal.run_with_cost(max_cost, solution)
error, result = parse_sexp_to_conditions(r)
return error, result, uint64(cost)
except Program.EvalError:
return Err.SEXP_ERROR, None, uint64(0)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/db_wrapper.py | flax/util/db_wrapper.py | from __future__ import annotations
import asyncio
import contextlib
import functools
import sqlite3
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from types import TracebackType
from typing import Any, AsyncIterator, Dict, Generator, Iterable, Optional, Type, Union
import aiosqlite
from typing_extensions import final
if aiosqlite.sqlite_version_info < (3, 32, 0):
SQLITE_MAX_VARIABLE_NUMBER = 900
else:
SQLITE_MAX_VARIABLE_NUMBER = 32700
async def execute_fetchone(
c: aiosqlite.Connection, sql: str, parameters: Iterable[Any] = None
) -> Optional[sqlite3.Row]:
rows = await c.execute_fetchall(sql, parameters)
for row in rows:
return row
return None
@dataclass
class create_connection:
"""Create an object that can both be `await`ed and `async with`ed to get a
connection.
"""
# def create_connection( (for searchability
database: Union[str, Path]
uri: bool = False
log_path: Optional[Path] = None
name: Optional[str] = None
_connection: Optional[aiosqlite.Connection] = field(init=False, default=None)
async def _create(self) -> aiosqlite.Connection:
self._connection = await aiosqlite.connect(database=self.database, uri=self.uri)
if self.log_path is not None:
await self._connection.set_trace_callback(
functools.partial(sql_trace_callback, path=self.log_path, name=self.name)
)
return self._connection
def __await__(self) -> Generator[Any, None, Any]:
return self._create().__await__()
async def __aenter__(self) -> aiosqlite.Connection:
self._connection = await self._create()
return self._connection
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
if self._connection is None:
raise RuntimeError("exiting while self._connection is None")
await self._connection.close()
def sql_trace_callback(req: str, path: Path, name: Optional[str] = None) -> None:
timestamp = datetime.now().strftime("%H:%M:%S.%f")
with path.open(mode="a") as log:
if name is not None:
line = f"{timestamp} {name} {req}\n"
else:
line = f"{timestamp} {req}\n"
log.write(line)
@final
class DBWrapper2:
db_version: int
_lock: asyncio.Lock
_read_connections: asyncio.Queue[aiosqlite.Connection]
_write_connection: aiosqlite.Connection
_num_read_connections: int
_in_use: Dict[asyncio.Task, aiosqlite.Connection]
_current_writer: Optional[asyncio.Task]
_savepoint_name: int
async def add_connection(self, c: aiosqlite.Connection) -> None:
# this guarantees that reader connections can only be used for reading
assert c != self._write_connection
await c.execute("pragma query_only")
self._read_connections.put_nowait(c)
self._num_read_connections += 1
def __init__(self, connection: aiosqlite.Connection, db_version: int = 1) -> None:
self._read_connections = asyncio.Queue()
self._write_connection = connection
self._lock = asyncio.Lock()
self.db_version = db_version
self._num_read_connections = 0
self._in_use = {}
self._current_writer = None
self._savepoint_name = 0
@classmethod
async def create(
cls,
database: Union[str, Path],
*,
db_version: int = 1,
uri: bool = False,
reader_count: int = 4,
log_path: Optional[Path] = None,
journal_mode: str = "WAL",
synchronous: Optional[str] = None,
foreign_keys: bool = False,
row_factory: Optional[Type[aiosqlite.Row]] = None,
) -> DBWrapper2:
write_connection = await create_connection(database=database, uri=uri, log_path=log_path, name="writer")
await (await write_connection.execute(f"pragma journal_mode={journal_mode}")).close()
if synchronous is not None:
await (await write_connection.execute(f"pragma synchronous={synchronous}")).close()
await (await write_connection.execute(f"pragma foreign_keys={'ON' if foreign_keys else 'OFF'}")).close()
write_connection.row_factory = row_factory
self = cls(connection=write_connection, db_version=db_version)
for index in range(reader_count):
read_connection = await create_connection(
database=database,
uri=uri,
log_path=log_path,
name=f"reader-{index}",
)
read_connection.row_factory = row_factory
await self.add_connection(c=read_connection)
return self
async def close(self) -> None:
while self._num_read_connections > 0:
await (await self._read_connections.get()).close()
self._num_read_connections -= 1
await self._write_connection.close()
def _next_savepoint(self) -> str:
name = f"s{self._savepoint_name}"
self._savepoint_name += 1
return name
@contextlib.asynccontextmanager
async def _savepoint_ctx(self) -> AsyncIterator[None]:
name = self._next_savepoint()
await self._write_connection.execute(f"SAVEPOINT {name}")
try:
yield
except: # noqa E722
await self._write_connection.execute(f"ROLLBACK TO {name}")
raise
finally:
# rollback to a savepoint doesn't cancel the transaction, it
# just rolls back the state. We need to cancel it regardless
await self._write_connection.execute(f"RELEASE {name}")
@contextlib.asynccontextmanager
async def writer(self) -> AsyncIterator[aiosqlite.Connection]:
"""
Initiates a new, possibly nested, transaction. If this task is already
in a transaction, none of the changes made as part of this transaction
will become visible to others until that top level transaction commits.
If this transaction fails (by exiting the context manager with an
exception) this transaction will be rolled back, but the next outer
transaction is not necessarily cancelled. It would also need to exit
with an exception to be cancelled.
The sqlite features this relies on are SAVEPOINT, ROLLBACK TO and RELEASE.
"""
task = asyncio.current_task()
assert task is not None
if self._current_writer == task:
# we allow nesting writers within the same task
async with self._savepoint_ctx():
yield self._write_connection
return
async with self._lock:
async with self._savepoint_ctx():
self._current_writer = task
try:
yield self._write_connection
finally:
self._current_writer = None
@contextlib.asynccontextmanager
async def writer_maybe_transaction(self) -> AsyncIterator[aiosqlite.Connection]:
"""
Initiates a write to the database. If this task is already in a write
transaction with the DB, this is a no-op. Any changes made to the
database will be rolled up into the transaction we're already in. If the
current task is not already in a transaction, one will be created and
committed (or rolled back in the case of an exception).
"""
task = asyncio.current_task()
assert task is not None
if self._current_writer == task:
# just use the existing transaction
yield self._write_connection
return
async with self._lock:
async with self._savepoint_ctx():
self._current_writer = task
try:
yield self._write_connection
finally:
self._current_writer = None
@contextlib.asynccontextmanager
async def reader(self) -> AsyncIterator[aiosqlite.Connection]:
async with self.reader_no_transaction() as connection:
if connection.in_transaction:
yield connection
else:
await connection.execute("BEGIN DEFERRED;")
try:
yield connection
finally:
# close the transaction with a rollback instead of commit just in
# case any modifications were submitted through this reader
await connection.rollback()
@contextlib.asynccontextmanager
async def reader_no_transaction(self) -> AsyncIterator[aiosqlite.Connection]:
# there should have been read connections added
assert self._num_read_connections > 0
# we can have multiple concurrent readers, just pick a connection from
# the pool of readers. If they're all busy, we'll wait for one to free
# up.
task = asyncio.current_task()
assert task is not None
# if this task currently holds the write lock, use the same connection,
# so it can read back updates it has made to its transaction, even
# though it hasn't been committed yet
if self._current_writer == task:
# we allow nesting reading while also having a writer connection
# open, within the same task
yield self._write_connection
return
if task in self._in_use:
yield self._in_use[task]
else:
c = await self._read_connections.get()
try:
# record our connection in this dict to allow nested calls in
# the same task to use the same connection
self._in_use[task] = c
yield c
finally:
del self._in_use[task]
self._read_connections.put_nowait(c)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/paginator.py | flax/util/paginator.py | from __future__ import annotations
import dataclasses
from math import ceil
from typing import Sequence
class InvalidPageSizeLimit(Exception):
def __init__(self, page_size_limit: int) -> None:
super().__init__(f"Page size limit must be one or more, not: {page_size_limit}")
class InvalidPageSizeError(Exception):
def __init__(self, page_size: int, page_size_limit: int) -> None:
super().__init__(f"Invalid page size {page_size}. Must be between: 1 and {page_size_limit}")
class PageOutOfBoundsError(Exception):
def __init__(self, page_size: int, max_page_size: int) -> None:
super().__init__(f"Page {page_size} out of bounds. Available pages: 0-{max_page_size}")
@dataclasses.dataclass
class Paginator:
_source: Sequence[object]
_page_size: int
@classmethod
def create(cls, source: Sequence[object], page_size: int, page_size_limit: int = 100) -> Paginator:
if page_size_limit < 1:
raise InvalidPageSizeLimit(page_size_limit)
if page_size > page_size_limit:
raise InvalidPageSizeError(page_size, page_size_limit)
return cls(source, page_size)
def page_size(self) -> int:
return self._page_size
def page_count(self) -> int:
return max(1, ceil(len(self._source) / self._page_size))
def get_page(self, page: int) -> Sequence[object]:
if page < 0 or page >= self.page_count():
raise PageOutOfBoundsError(page, self.page_count() - 1)
offset = page * self._page_size
return self._source[offset : offset + self._page_size]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/task_timing.py | flax/util/task_timing.py | from __future__ import annotations
import asyncio
import inspect
import os
import sys
import time
from types import FrameType
from typing import Any, Dict, List
# This is a development utility that instruments tasks (coroutines) and records
# wall-clock time they spend in various functions. Since it relies on
# setprofile(), it cannot be combined with other profilers.
# to enable this instrumentation, set one of the environment variables:
# FLAX_INSTRUMENT_NODE=1
# FLAX_INSTRUMENT_WALLET=1
# Before starting the daemon.
# When exiting, profiles will be written to the `task-profile-<pid>` directory.
# To generate call trees, run:
# python flax/util/task_timing.py task-profile-<pid>
class FrameInfo:
call_timestamp: float
stack_pos: int
def __init__(self) -> None:
self.call_timestamp = 0.0
self.stack_pos = 0
class CallInfo:
duration: float
calls: int
def add(self, duration: float) -> None:
self.duration += duration
self.calls += 1
def __init__(self, duration: float) -> None:
self.duration = duration
self.calls = 1
class TaskInfo:
stack: Dict[FrameType, FrameInfo]
stack_pos: int
def __init__(self) -> None:
self.stack = {}
self.stack_pos = 0
g_next_id: int = 0
class FunctionInfo:
name: str
file: str
num_calls: int
duration: float
callers: Dict[str, CallInfo]
fun_id: int
def __init__(self, name: str, file: str) -> None:
global g_next_id
self.name = name
self.file = file
self.num_calls = 0
self.duration = 0.0
self.callers = {}
self.fun_id = g_next_id
g_next_id += 1
# maps tasks to call-treea
g_function_infos: Dict[str, Dict[str, FunctionInfo]] = {}
g_tasks: Dict[asyncio.Task[Any], TaskInfo] = {}
g_cwd = os.getcwd() + "/"
# the frame object has the following members:
# clear
# f_back
# f_builtins
# f_code (type: "code")
# f_globals
# f_lasti
# f_lineno
# f_locals
# f_trace
# f_trace_lines
# f_trace_opcodes
# the code class has the following members:
# co_argcount
# co_cellvars
# co_code
# co_consts
# co_filename
# co_firstlineno
# co_flags
# co_freevars
# co_kwonlyargcount
# co_lnotab
# co_name
# co_names
# co_nlocals
# co_posonlyargcount
# co_stacksize
# co_varnames
# replace
# documented here: https://docs.python.org/3/library/inspect.html
def get_stack(frame: FrameType) -> str:
ret = ""
code = frame.f_code
while code.co_flags & inspect.CO_COROUTINE: # pylint: disable=no-member
ret = f"/{code.co_name}{ret}"
if frame.f_back is None:
break
frame = frame.f_back
code = frame.f_code
return ret
def strip_filename(name: str) -> str:
if "/site-packages/" in name:
return name.split("/site-packages/", 1)[1]
if "/lib/" in name:
return name.split("/lib/", 1)[1]
if name.startswith(g_cwd):
return name[len(g_cwd) :]
return name
def get_fun(frame: FrameType) -> str:
code = frame.f_code
return f"{code.co_name}"
def get_file(frame: FrameType) -> str:
code = frame.f_code
return f"{strip_filename(code.co_filename)}:{code.co_firstlineno}"
def trace_fun(frame: FrameType, event: str, arg: Any) -> None:
if event in ["c_call", "c_return", "c_exception"]:
return
# we only care about instrumenting co-routines
if (frame.f_code.co_flags & inspect.CO_COROUTINE) == 0: # pylint: disable=no-member
# with open("instrumentation.log", "a") as f:
# f.write(f"[1] {event} {get_fun(frame)}\n")
return
task = asyncio.current_task()
if task is None:
return
global g_tasks
global g_function_infos
ti = g_tasks.get(task)
if ti is None:
ti = TaskInfo()
g_tasks[task] = ti
# t = f"{task.get_name()}"
if event == "call":
fi = ti.stack.get(frame)
if fi is not None:
ti.stack_pos = fi.stack_pos
# with open("instrumentation.log", "a") as f:
# indent = " " * ti.stack_pos
# f.write(f"{indent}RESUME {t} {get_stack(frame)}\n")
else:
fi = FrameInfo()
fi.stack_pos = ti.stack_pos
fi.call_timestamp = time.perf_counter()
ti.stack[frame] = fi
ti.stack_pos += 1
# indent = " " * ti.stack_pos
# with open("instrumentation.log", "a") as f:
# f.write(f"{indent}CALL {t} {get_stack(frame)}\n")
elif event == "return":
fi = ti.stack.get(frame)
assert fi is not None
# indent = " " * (fi.stack_pos)
if asyncio.isfuture(arg):
# this means the function was suspended
# don't pop it from the stack
pass
# with open("instrumentation.log", "a") as f:
# f.write(f"{indent}SUSPEND {t} {get_stack(frame)}\n")
else:
# with open("instrumentation.log", "a") as f:
# f.write(f"{indent}RETURN {t} {get_stack(frame)}\n")
now = time.perf_counter()
duration = now - fi.call_timestamp
task_name = task.get_name()
fun_name = get_fun(frame)
fun_file = get_file(frame)
task_tree = g_function_infos.get(task_name)
if task_tree is None:
task_tree = {}
g_function_infos[task_name] = task_tree
fun_info = task_tree.get(fun_file)
if fun_info is None:
fun_info = FunctionInfo(fun_name, fun_file)
task_tree[fun_file] = fun_info
if frame.f_back is not None:
n = get_file(frame.f_back)
if n in fun_info.callers:
fun_info.callers[n].add(duration)
else:
fun_info.callers[n] = CallInfo(duration)
fun_info.num_calls += 1
fun_info.duration += duration
del ti.stack[frame]
ti.stack_pos = fi.stack_pos - 1
def start_task_instrumentation() -> None:
sys.setprofile(trace_fun)
def color(pct: float) -> str:
assert pct >= 0 and pct <= 100
return f"{int((100.-pct)//10)+1}"
def fontcolor(pct: float) -> str:
if pct > 80 or pct < 20:
return "white"
else:
return "black"
def stop_task_instrumentation() -> None:
sys.setprofile(None)
global g_function_infos
target_dir = f"task-profile-{os.getpid()}"
try:
os.mkdir(target_dir)
except Exception:
pass
for task, call_tree in g_function_infos.items():
dot_file_name = f"{target_dir}/" + task + ".dot"
total_duration = 0.0
for name, fun_info in call_tree.items():
total_duration = max(total_duration, fun_info.duration)
if total_duration < 0.001:
continue
# ignore trivial call trees
if len(call_tree) <= 2:
continue
filter_frames = set()
with open(dot_file_name, "w") as f:
f.write(
"digraph {\n"
'node [fontsize=11, colorscheme=rdylgn10, style=filled, fontname="Arial"]\n'
'edge [fontsize=11, colorscheme=rdylgn10, fontname="Arial"]\n'
)
# print all nodes (functions)
for name, fun_info in call_tree.items():
# frames that are less than 0.1% of the total wall-clock time are
# filtered
if fun_info.duration / total_duration < 0.001:
filter_frames.add(name)
continue
percent = fun_info.duration * 100 / total_duration
f.write(
f'frame_{fun_info.fun_id} [shape=box, label="{fun_info.name}()\\l'
f"{fun_info.file}\\l"
f"{percent:0.2f}%\\n"
f"{fun_info.duration*1000:0.2f}ms\\n"
f'{fun_info.num_calls}x\\n",'
f"fillcolor={color(percent)}, "
f"color={color(percent)}, "
f"fontcolor={fontcolor(percent)}]\n"
)
# print all edges (calls)
for name, fun_info in call_tree.items():
if name in filter_frames:
continue
for caller, ci in fun_info.callers.items():
caller_info = call_tree.get(caller)
if caller_info is None:
continue
if caller_info.file in filter_frames:
continue
percent = ci.duration * 100 / total_duration
# filter edges that are too insignificant
if percent < 0.01:
continue
f.write(
f"frame_{caller_info.fun_id} -> frame_{fun_info.fun_id} "
f'[label="{percent:0.2f}%\\n{ci.calls}x",'
f"penwidth={0.3+(ci.duration*6/total_duration):0.2f},"
f"color={color(percent)}]\n"
)
f.write("}\n")
if __name__ == "__main__":
import glob
import pathlib
import subprocess
profile_dir = pathlib.Path(sys.argv[1])
queue: List[subprocess.Popen[bytes]] = []
for file in glob.glob(str(profile_dir / "*.dot")):
print(file)
if os.path.exists(file + ".png"):
continue
if len(queue) > 15:
oldest = queue.pop(0)
oldest.wait()
with open(file + ".png", "w+") as f:
queue.append(subprocess.Popen(["dot", "-Tpng", file], stdout=f))
while len(queue) > 0:
oldest = queue.pop(0)
oldest.wait()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/path.py | flax/util/path.py | from __future__ import annotations
import os
from pathlib import Path
from typing import Union
def path_from_root(root: Path, path_str: Union[str, Path]) -> Path:
"""
If path is relative, prepend root
If path is absolute, return it directly.
"""
root = Path(os.path.expanduser(str(root)))
path = Path(path_str)
if not path.is_absolute():
path = root / path
return path.resolve()
def make_path_relative(path_str: Union[str, Path], root: Path) -> Path:
"""
Try to make the given path relative, given the default root.
"""
path = Path(path_str)
try:
path = path.relative_to(root)
except ValueError:
pass
return path
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/dump_keyring.py | flax/util/dump_keyring.py | #!/usr/bin/env python3
import click
import colorama
import yaml
from flax.cmds.passphrase_funcs import prompt_for_passphrase, read_passphrase_from_file
from flax.util.default_root import DEFAULT_KEYS_ROOT_PATH
from flax.util.file_keyring import FileKeyringContent
from flax.util.keyring_wrapper import DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, KeyringWrapper
from cryptography.exceptions import InvalidTag
from io import TextIOWrapper
from pathlib import Path
from typing import Optional
DEFAULT_KEYRING_YAML = DEFAULT_KEYS_ROOT_PATH / "keyring.yaml"
def get_passphrase_prompt(keyring_file: str) -> str:
prompt = (
colorama.Fore.YELLOW
+ colorama.Style.BRIGHT
+ "(Unlock Keyring: "
+ colorama.Fore.MAGENTA
+ keyring_file
+ colorama.Style.RESET_ALL
+ colorama.Fore.YELLOW
+ colorama.Style.BRIGHT
+ ")"
+ colorama.Style.RESET_ALL
+ " Passphrase: "
) # noqa: E501
return prompt
@click.command()
@click.argument("keyring_file", nargs=1, default=DEFAULT_KEYRING_YAML)
@click.option(
"--full-payload", is_flag=True, default=False, help="Print the full keyring contents, including plaintext"
)
@click.option("--passphrase-file", type=click.File("r"), help="File or descriptor to read the passphrase from")
@click.option("--pretty-print", is_flag=True, default=False)
def dump(keyring_file, full_payload: bool, passphrase_file: Optional[TextIOWrapper], pretty_print: bool):
saved_passphrase: Optional[str] = KeyringWrapper.get_shared_instance().get_master_passphrase_from_credential_store()
passphrase: str = saved_passphrase or DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
prompt: str = get_passphrase_prompt(str(keyring_file))
print(f"Attempting to dump contents of keyring file: {keyring_file}\n")
if passphrase_file is not None:
passphrase = read_passphrase_from_file(passphrase_file)
keyring_path = Path(keyring_file)
file_content = FileKeyringContent.create_from_path(keyring_path)
file_content_dict = file_content.to_dict()
for i in range(5):
try:
data_dict = file_content.get_decrypted_data_dict(passphrase)
if full_payload:
dump_content = file_content_dict
dump_content["data"] = data_dict
else:
dump_content = data_dict
if pretty_print:
dump_content = yaml.dump(dump_content)
print(dump_content)
break
except (ValueError, InvalidTag):
passphrase = prompt_for_passphrase(prompt)
except Exception as e:
print(f"Unhandled exception: {e}")
break
def main():
colorama.init()
dump() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
main()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/json_util.py | flax/util/json_util.py | from __future__ import annotations
import dataclasses
import json
from typing import Any
from aiohttp import web
from flax.wallet.util.wallet_types import WalletType
class EnhancedJSONEncoder(json.JSONEncoder):
"""
Encodes bytes as hex strings with 0x, and converts all dataclasses to json.
"""
def default(self, o: Any):
if dataclasses.is_dataclass(o):
return o.to_json_dict()
elif isinstance(o, WalletType):
return o.name
elif hasattr(type(o), "__bytes__"):
return f"0x{bytes(o).hex()}"
elif isinstance(o, bytes):
return f"0x{o.hex()}"
return super().default(o)
def dict_to_json_str(o: Any) -> str:
"""
Converts a python object into json.
"""
json_str = json.dumps(o, cls=EnhancedJSONEncoder, sort_keys=True)
return json_str
def obj_to_response(o: Any) -> web.Response:
"""
Converts a python object into json. Used for RPC server which returns JSON.
"""
json_str = dict_to_json_str(o)
return web.Response(body=json_str, content_type="application/json")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/beta_metrics.py | flax/util/beta_metrics.py | from __future__ import annotations
import asyncio
import logging
import platform
import socket
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional
import psutil
from flax.util.config import load_config
log = logging.getLogger("beta")
metrics_log_interval_default = 5
metrics_log_interval_min = 1
metrics_log_interval_max = 30
def log_static_info() -> None:
log.debug(f"architecture: {platform.architecture()}")
log.debug(f"processor: {platform.processor()}")
log.debug(f"cpu count: {psutil.cpu_count()}")
log.debug(f"machine: {platform.machine()}")
log.debug(f"platform: {platform.platform()}")
def log_cpu_metrics() -> None:
log.debug(
f"CPU - percent: {psutil.cpu_percent(percpu=True)}, "
f"freq: {psutil.cpu_times(percpu=True)}, "
f"freq: {psutil.cpu_freq(percpu=True)}, "
f"load_avg: {psutil.getloadavg()}"
)
def log_memory_metrics() -> None:
psutil.disk_io_counters(perdisk=False)
log.debug(f"MEMORY - virtual memory: {psutil.virtual_memory()}, swap: {psutil.swap_memory()}")
def log_disk_metrics(root_path: Path, plot_dirs: List[str]) -> None:
# TODO, Could this spam the logs too much for large farms? Maybe don't log usage of plot dirs and
# set perdisk=False rather for psutil.disk_io_counters? Lets try it with the default interval of 15s for now.
log.debug(f"DISK partitions: {psutil.disk_partitions()}")
for pot_dir in plot_dirs:
try:
usage = psutil.disk_usage(pot_dir)
except FileNotFoundError:
usage = "Directory not found"
log.debug(f"DISK - usage {pot_dir}: {usage}")
log.debug(f"DISK - usage root: {psutil.disk_usage(str(root_path))}")
log.debug(f"DISK - io counters: {psutil.disk_io_counters(perdisk=True)}")
def log_port_states(config: Dict[str, Any]) -> None:
selected_network = config["selected_network"]
full_node_port = config["network_overrides"]["config"][selected_network]["default_full_node_port"]
test_socket_ipv4 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port_open_ipv4 = test_socket_ipv4.connect_ex(("127.0.0.1", full_node_port)) == 0
log.debug(f"full node port IPv4 [{full_node_port}]: {'open' if port_open_ipv4 else 'closed'}")
test_socket_ipv6 = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
port_open_ipv6 = test_socket_ipv6.connect_ex(("::1", full_node_port)) == 0
log.debug(f"full node port IPv6 [{full_node_port}]: {'open' if port_open_ipv6 else 'closed'}")
def log_network_metrics() -> None:
log.debug(f"NETWORK: io counters: {psutil.net_io_counters(pernic=False)}")
@dataclass
class BetaMetricsLogger:
root_path: Path
task: Optional[asyncio.Task[None]] = None
stop_task: bool = False
def start_logging(self) -> None:
log.debug("start_logging")
log_static_info()
if self.task is not None:
raise RuntimeError("Already started")
self.stop_task = False
self.task = asyncio.create_task(self.run())
async def stop_logging(self) -> None:
log.debug("stop_logging")
if self.task is None:
raise RuntimeError("Not yet started")
self.stop_task = True
await self.task
self.task = None
async def run(self) -> None:
config = load_config(self.root_path, "config.yaml")
interval = min(max(config["beta"]["metrics_log_interval"], metrics_log_interval_min), metrics_log_interval_max)
tick = 0
while not self.stop_task:
try:
tick += 1
# Log every interval
if tick % interval == 0:
log_cpu_metrics()
log_memory_metrics()
log_network_metrics()
# Log after 10 intervals passed
if tick % (interval * 10) == 0:
log_disk_metrics(self.root_path, config["harvester"]["plot_directories"])
log_port_states(config)
except Exception as e:
log.warning(f"BetaMetricsLogger run failed: {e}")
await asyncio.sleep(10)
await asyncio.sleep(1)
log.debug("done")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/make_test_constants.py | flax/util/make_test_constants.py | from __future__ import annotations
from typing import Dict
from flax.consensus.constants import ConsensusConstants
from flax.consensus.default_constants import DEFAULT_CONSTANTS
def make_test_constants(test_constants_overrides: Dict) -> ConsensusConstants:
return DEFAULT_CONSTANTS.replace(**test_constants_overrides)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/hash.py | flax/util/hash.py | from __future__ import annotations
from hashlib import sha256
from flax.types.blockchain_format.sized_bytes import bytes32
def std_hash(b, skip_bytes_conversion: bool = False) -> bytes32:
"""
The standard hash used in many places.
"""
if skip_bytes_conversion:
return bytes32(sha256(b).digest())
else:
return bytes32(sha256(bytes(b)).digest())
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/byte_types.py | flax/util/byte_types.py | from typing import BinaryIO, Iterable, SupportsBytes, Type, TypeVar, Union
from typing_extensions import SupportsIndex
_T_SizedBytes = TypeVar("_T_SizedBytes", bound="SizedBytes")
def hexstr_to_bytes(input_str: str) -> bytes:
"""
Converts a hex string into bytes, removing the 0x if it's present.
"""
if input_str.startswith("0x") or input_str.startswith("0X"):
return bytes.fromhex(input_str[2:])
return bytes.fromhex(input_str)
class SizedBytes(bytes):
"""A streamable type that subclasses "bytes" but requires instances
to be a certain, fixed size specified by the `._size` class attribute.
"""
_size = 0
# This is just a partial exposure of the underlying int constructor. Liskov...
# https://github.com/python/typeshed/blob/f8547a3f3131de90aa47005358eb3394e79cfa13/stdlib/builtins.pyi#L483-L493
def __init__(self, v: Union[Iterable[SupportsIndex], SupportsBytes]) -> None:
# v is unused here and that is ok since .__new__() seems to have already
# processed the parameter when creating the instance of the class. We have no
# additional special action to take here beyond verifying that the newly
# created instance satisfies the length limitation of the particular subclass.
super().__init__()
if len(self) != self._size:
raise ValueError("bad %s initializer %s" % (type(self).__name__, v))
@classmethod
def parse(cls: Type[_T_SizedBytes], f: BinaryIO) -> _T_SizedBytes:
b = f.read(cls._size)
return cls(b)
def stream(self, f: BinaryIO) -> None:
f.write(self)
@classmethod
def from_bytes(cls: Type[_T_SizedBytes], blob: bytes) -> _T_SizedBytes:
return cls(blob)
@classmethod
def from_hexstr(cls: Type[_T_SizedBytes], input_str: str) -> _T_SizedBytes:
if input_str.startswith("0x") or input_str.startswith("0X"):
return cls.fromhex(input_str[2:])
return cls.fromhex(input_str)
def __str__(self) -> str:
return self.hex()
def __repr__(self) -> str:
return "<%s: %s>" % (self.__class__.__name__, str(self))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/permissions.py | flax/util/permissions.py | from __future__ import annotations
import os
from pathlib import Path
from typing import Tuple
def verify_file_permissions(path: Path, mask: int) -> Tuple[bool, int]:
"""
Check that the file's permissions are properly restricted, as compared to the
permission mask
"""
mode = os.stat(path).st_mode & 0o777
return (mode & mask == 0, mode)
def octal_mode_string(mode: int) -> str:
"""Yields a permission mode string: e.g. 0644"""
return f"0{oct(mode)[-3:]}"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/network.py | flax/util/network.py | from __future__ import annotations
import asyncio
import logging
import socket
import ssl
from aiohttp import web
from aiohttp.log import web_logger
from dataclasses import dataclass
from ipaddress import ip_address, IPv4Network, IPv6Network
from typing import Iterable, List, Tuple, Union, Any, Optional, Dict
from typing_extensions import final
from flax.server.outbound_message import NodeType
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16
@final
@dataclass
class WebServer:
runner: web.AppRunner
listen_port: uint16
_close_task: Optional[asyncio.Task[None]] = None
@classmethod
async def create(
cls,
hostname: str,
port: uint16,
routes: List[web.RouteDef],
max_request_body_size: int = 1024**2, # Default `client_max_size` from web.Application
ssl_context: Optional[ssl.SSLContext] = None,
keepalive_timeout: int = 75, # Default from aiohttp.web
shutdown_timeout: int = 60, # Default `shutdown_timeout` from web.TCPSite
prefer_ipv6: bool = False,
logger: logging.Logger = web_logger,
) -> WebServer:
app = web.Application(client_max_size=max_request_body_size, logger=logger)
runner = web.AppRunner(app, access_log=None, keepalive_timeout=keepalive_timeout)
runner.app.add_routes(routes)
await runner.setup()
site = web.TCPSite(runner, hostname, int(port), ssl_context=ssl_context, shutdown_timeout=shutdown_timeout)
await site.start()
#
# On a dual-stack system, we want to get the (first) IPv4 port unless
# prefer_ipv6 is set in which case we use the IPv6 port
#
if port == 0:
port = select_port(prefer_ipv6, runner.addresses)
return cls(runner=runner, listen_port=uint16(port))
async def _close(self) -> None:
await self.runner.shutdown()
await self.runner.cleanup()
def close(self) -> None:
self._close_task = asyncio.create_task(self._close())
async def await_closed(self) -> None:
if self._close_task is None:
raise RuntimeError("WebServer stop not triggered")
await self._close_task
def is_in_network(peer_host: str, networks: Iterable[Union[IPv4Network, IPv6Network]]) -> bool:
try:
peer_host_ip = ip_address(peer_host)
return any(peer_host_ip in network for network in networks)
except ValueError:
return False
def is_localhost(peer_host: str) -> bool:
return peer_host == "127.0.0.1" or peer_host == "localhost" or peer_host == "::1" or peer_host == "0:0:0:0:0:0:0:1"
def class_for_type(type: NodeType) -> Any:
if type is NodeType.FULL_NODE:
from flax.full_node.full_node_api import FullNodeAPI
return FullNodeAPI
elif type is NodeType.WALLET:
from flax.wallet.wallet_node_api import WalletNodeAPI
return WalletNodeAPI
elif type is NodeType.INTRODUCER:
from flax.introducer.introducer_api import IntroducerAPI
return IntroducerAPI
elif type is NodeType.TIMELORD:
from flax.timelord.timelord_api import TimelordAPI
return TimelordAPI
elif type is NodeType.FARMER:
from flax.farmer.farmer_api import FarmerAPI
return FarmerAPI
elif type is NodeType.HARVESTER:
from flax.harvester.harvester_api import HarvesterAPI
return HarvesterAPI
raise ValueError("No class for type")
def get_host_addr(host: Union[PeerInfo, str], prefer_ipv6: Optional[bool]) -> str:
# If there was no preference passed in (from config), set the system-wise
# default here. Not a great place to locate a default value, and we should
# probably do something to write it into the config, but. For now...
if prefer_ipv6 is None:
prefer_ipv6 = False
# Use PeerInfo.is_valid() to see if it's already an address
if isinstance(host, PeerInfo):
hoststr = host.host
if host.is_valid(True):
return hoststr
else:
hoststr = host
if PeerInfo(hoststr, uint16(0)).is_valid(True):
return hoststr
addrset: List[
Tuple["socket.AddressFamily", "socket.SocketKind", int, str, Union[Tuple[str, int], Tuple[str, int, int, int]]]
] = socket.getaddrinfo(hoststr, None)
# Addrset is never empty, an exception is thrown or data is returned.
for t in addrset:
if prefer_ipv6 and t[0] == socket.AF_INET6:
return t[4][0]
if not prefer_ipv6 and t[0] == socket.AF_INET:
return t[4][0]
# If neither matched preference, just return the first available
return addrset[0][4][0]
def is_trusted_inner(peer_host: str, peer_node_id: bytes32, trusted_peers: Dict, testing: bool) -> bool:
if trusted_peers is None:
return False
if not testing and peer_host == "127.0.0.1":
return True
if peer_node_id.hex() not in trusted_peers:
return False
return True
def select_port(prefer_ipv6: bool, addresses: List[Any]) -> uint16:
selected_port: uint16
for address_string, port, *_ in addresses:
address = ip_address(address_string)
if address.version == 6 and prefer_ipv6:
selected_port = port
break
elif address.version == 4 and not prefer_ipv6:
selected_port = port
break
else:
selected_port = addresses[0][1] # no matches, just use the first one in the list
return selected_port
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/partial_func.py | flax/util/partial_func.py | from __future__ import annotations
def partial_async_gen(f, *args):
"""
Returns an async generator function which is equalivalent to the passed in function,
but only takes in one parameter (the first one).
"""
async def inner(first_param):
async for x in f(first_param, *args):
yield x
return inner
def partial_async(f, *args):
"""
Returns an async function which is equalivalent to the passed in function,
but only takes in one parameter (the first one).
"""
async def inner(first_param):
return await f(first_param, *args)
return inner
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/service_groups.py | flax/util/service_groups.py | from typing import KeysView, Generator
SERVICES_FOR_GROUP = {
"all": (
"flax_harvester flax_timelord_launcher flax_timelord flax_farmer "
"flax_full_node flax_wallet flax_data_layer flax_data_layer_http"
).split(),
# TODO: should this be `data_layer`?
"data": "flax_wallet flax_data_layer".split(),
"data_layer_http": "flax_data_layer_http".split(),
"node": "flax_full_node".split(),
"harvester": "flax_harvester".split(),
"farmer": "flax_harvester flax_farmer flax_full_node flax_wallet".split(),
"farmer-no-wallet": "flax_harvester flax_farmer flax_full_node".split(),
"farmer-only": "flax_farmer".split(),
"timelord": "flax_timelord_launcher flax_timelord flax_full_node".split(),
"timelord-only": "flax_timelord".split(),
"timelord-launcher-only": "flax_timelord_launcher".split(),
"wallet": "flax_wallet".split(),
"introducer": "flax_introducer".split(),
"simulator": "flax_full_node_simulator".split(),
"crawler": "flax_crawler".split(),
"seeder": "flax_crawler flax_seeder".split(),
"seeder-only": "flax_seeder".split(),
}
def all_groups() -> KeysView[str]:
return SERVICES_FOR_GROUP.keys()
def services_for_groups(groups) -> Generator[str, None, None]:
for group in groups:
for service in SERVICES_FOR_GROUP[group]:
yield service
def validate_service(service: str) -> bool:
return any(service in _ for _ in SERVICES_FOR_GROUP.values())
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/chunks.py | flax/util/chunks.py | from __future__ import annotations
from typing import Iterator, List, TypeVar
T = TypeVar("T")
def chunks(in_list: List[T], size: int) -> Iterator[List[T]]:
size = max(1, size)
for i in range(0, len(in_list), size):
yield in_list[i : i + size]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/setproctitle.py | flax/util/setproctitle.py | from __future__ import annotations
try:
import setproctitle as pysetproctitle
no_setproctitle = False
except Exception:
no_setproctitle = True
def setproctitle(ps_name: str) -> None:
if no_setproctitle is False:
pysetproctitle.setproctitle(ps_name)
def getproctitle() -> str:
if no_setproctitle is False:
# TODO: add type hints to setproctitle
return pysetproctitle.getproctitle() # type: ignore[no-any-return]
return ""
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/profiler.py | flax/util/profiler.py | import asyncio
import cProfile
import logging
import pathlib
from flax.util.path import path_from_root
# to use the profiler, enable it config file, "enable_profiler"
# the output will be printed to your flax root path, e.g. ~/.flax/mainnet/profile/
# to analyze the profile, run:
# python flax/utils/profiler.py ~/.flax/mainnet/profile | less -r
# this will print CPU usage of the flax full node main thread at 1 second increments.
# find a time window of interest and analyze the profile file (which are in pstats format).
# for example:
# python flax/utils/profiler.py ~/.flax/mainnet/profile 10 20
async def profile_task(root_path: pathlib.Path, service: str, log: logging.Logger) -> None:
profile_dir = path_from_root(root_path, f"profile-{service}")
log.info("Starting profiler. saving to %s" % profile_dir)
profile_dir.mkdir(parents=True, exist_ok=True)
counter = 0
while True:
pr = cProfile.Profile()
pr.enable()
# this will throw CancelledError when we're exiting
await asyncio.sleep(1)
pr.create_stats()
pr.dump_stats(profile_dir / ("slot-%05d.profile" % counter))
log.debug("saving profile %05d" % counter)
counter += 1
if __name__ == "__main__":
import sys
import pstats
import io
from colorama import init, Fore, Back, Style
from subprocess import check_call
profile_dir = pathlib.Path(sys.argv[1])
init(strip=False)
def analyze_cpu_usage(profile_dir: pathlib.Path):
counter = 0
try:
while True:
f = io.StringIO()
st = pstats.Stats(str(profile_dir / ("slot-%05d.profile" % counter)), stream=f)
st.strip_dirs()
st.sort_stats(pstats.SortKey.CUMULATIVE)
st.print_stats()
f.seek(0)
total = 0.0
sleep = 0.0
# output looks like this:
# ncalls tottime percall cumtime percall filename:lineno(function)
# 1 0.000 0.000 0.000 0.000 <function>
for line in f:
if " function calls " in line and " in " in line and " seconds":
# 304307 function calls (291692 primitive calls) in 1.031 seconds
assert total == 0
total = float(line.split()[-2])
continue
columns = line.split(None, 5)
if len(columns) < 6 or columns[0] == "ncalls":
continue
# TODO: to support windows and MacOS, extend this to a list of function known to sleep the process
# e.g. WaitForMultipleObjects or kqueue
if (
"{method 'poll' of 'select.epoll' objects}" in columns[5]
or "method 'control' of 'select.kqueue' objects" in columns[5]
):
# cumulative time
sleep += float(columns[3])
if sleep < 0.000001:
percent = 100.0
else:
percent = 100.0 * (total - sleep) / total
if percent > 90:
color = Fore.RED + Style.BRIGHT
elif percent > 80:
color = Fore.MAGENTA + Style.BRIGHT
elif percent > 70:
color = Fore.YELLOW + Style.BRIGHT
elif percent > 60:
color = Style.BRIGHT
elif percent < 10:
color = Fore.GREEN
else:
color = ""
quantized = int(percent // 2)
print(
("%05d: " + color + "%3.0f%% CPU " + Back.WHITE + "%s" + Style.RESET_ALL + "%s|")
% (counter, percent, " " * quantized, " " * (50 - quantized))
)
counter += 1
except Exception as e:
print(e)
def analyze_slot_range(profile_dir: pathlib.Path, first: int, last: int):
if last < first:
print("ERROR: first must be <= last when specifying slot range")
return
files = []
for i in range(first, last + 1):
files.append(str(profile_dir / ("slot-%05d.profile" % i)))
output_file = "flax-hotspot-%d" % first
if first < last:
output_file += "-%d" % last
print("generating call tree for slot(s) [%d, %d]" % (first, last))
check_call(["gprof2dot", "-f", "pstats", "-o", output_file + ".dot"] + files)
with open(output_file + ".png", "w+") as f:
check_call(["dot", "-T", "png", output_file + ".dot"], stdout=f)
print("output written to: %s.png" % output_file)
if len(sys.argv) == 2:
# this analyzes the CPU usage at all slots saved to the profiler directory
analyze_cpu_usage(profile_dir)
elif len(sys.argv) in [3, 4]:
# the additional arguments are interpreted as either one slot, or a
# slot range (first and last) to analyze
first = int(sys.argv[2])
last = int(sys.argv[3]) if len(sys.argv) == 4 else first
analyze_slot_range(profile_dir, first, last)
else:
print(
"""USAGE:
profiler.py <profile-directory>
Analyze CPU usage at each 1 second interval from the profiles in the specified
directory. Print colored timeline to stdout
profiler.py <profile-directory> <slot>
profiler.py <profile-directory> <first-slot> <last-slot>
Analyze a single slot, or a range of time slots, from the profile directory
"""
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/pip_import.py | flax/util/pip_import.py | "Import a package and install it with PIP if it doesn't exist."
from __future__ import annotations
import subprocess
import sys
def pip_import(module, pypi_name=None):
"""
Return None if we can't import or install it.
"""
try:
return __import__(module)
except ImportError:
pass
subprocess.call([sys.executable, "-m", "pip", "install", pypi_name or module])
return __import__(module)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/generator_tools.py | flax/util/generator_tools.py | from typing import Any, Iterator, List, Tuple, Optional
from chiabip158 import PyBIP158
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.full_block import FullBlock
from flax.types.header_block import HeaderBlock
from flax.types.spend_bundle_conditions import SpendBundleConditions
from flax.consensus.cost_calculator import NPCResult
from flax.util.ints import uint64
def get_block_header(block: FullBlock, tx_addition_coins: List[Coin], removals_names: List[bytes32]) -> HeaderBlock:
# Create filter
byte_array_tx: List[bytearray] = []
addition_coins = tx_addition_coins + list(block.get_included_reward_coins())
if block.is_transaction_block():
for coin in addition_coins:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for name in removals_names:
byte_array_tx.append(bytearray(name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter: bytes = bytes(bip158.GetEncoded())
return HeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.challenge_chain_ip_proof,
block.reward_chain_sp_proof,
block.reward_chain_ip_proof,
block.infused_challenge_chain_ip_proof,
block.foliage,
block.foliage_transaction_block,
encoded_filter,
block.transactions_info,
)
def additions_for_npc(npc_result: NPCResult) -> List[Coin]:
additions: List[Coin] = []
if npc_result.conds is None:
return []
for spend in npc_result.conds.spends:
for puzzle_hash, amount, _ in spend.create_coin:
coin = Coin(spend.coin_id, puzzle_hash, amount)
additions.append(coin)
return additions
def tx_removals_and_additions(results: Optional[SpendBundleConditions]) -> Tuple[List[bytes32], List[Coin]]:
"""
Doesn't return farmer and pool reward.
"""
removals: List[bytes32] = []
additions: List[Coin] = []
# build removals list
if results is None:
return [], []
for spend in results.spends:
removals.append(bytes32(spend.coin_id))
for puzzle_hash, amount, _ in spend.create_coin:
additions.append(Coin(bytes32(spend.coin_id), bytes32(puzzle_hash), uint64(amount)))
return removals, additions
def list_to_batches(list_to_split: List[Any], batch_size: int) -> Iterator[Tuple[int, List[Any]]]:
if batch_size <= 0:
raise ValueError("list_to_batches: batch_size must be greater than 0.")
total_size = len(list_to_split)
if total_size == 0:
return iter(())
for batch_start in range(0, total_size, batch_size):
batch_end = min(batch_start + batch_size, total_size)
yield total_size - batch_end, list_to_split[batch_start:batch_end]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/check_fork_next_block.py | flax/util/check_fork_next_block.py | from typing import List, Callable
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.util.ints import uint32
async def check_fork_next_block(
blockchain: BlockchainInterface, fork_point_height: uint32, peers_with_peak: List, check_block_future: Callable
):
our_peak_height = blockchain.get_peak_height()
ses_heigths = blockchain.get_ses_heights()
if len(ses_heigths) > 2 and our_peak_height is not None:
ses_heigths.sort()
max_fork_ses_height = ses_heigths[-3]
potential_peek = uint32(our_peak_height + 1)
# This is the fork point in SES in the case where no fork was detected
if blockchain.get_peak_height() is not None and fork_point_height == max_fork_ses_height:
for peer in peers_with_peak.copy():
if peer.closed:
peers_with_peak.remove(peer)
continue
# Grab a block at peak + 1 and check if fork point is actually our current height
if await check_block_future(peer, potential_peek, blockchain):
fork_point_height = our_peak_height
break
return fork_point_height
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/bech32m.py | flax/util/bech32m.py | # Copyright (c) 2017 Pieter Wuille
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Based on this specification from Pieter Wuille:
# https://github.com/sipa/bips/blob/bip-bech32m/bip-bech32m.mediawiki
"""Reference implementation for Bech32m and segwit addresses."""
from typing import List, Iterable, Optional, Tuple
from flax.types.blockchain_format.sized_bytes import bytes32
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
def bech32_polymod(values: List[int]) -> int:
"""Internal function that computes the Bech32 checksum."""
generator = [0x3B6A57B2, 0x26508E6D, 0x1EA119FA, 0x3D4233DD, 0x2A1462B3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1FFFFFF) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp: str) -> List[int]:
"""Expand the HRP into values for checksum computation."""
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
M = 0x2BC830A3
def bech32_verify_checksum(hrp: str, data: List[int]) -> bool:
return bech32_polymod(bech32_hrp_expand(hrp) + data) == M
def bech32_create_checksum(hrp: str, data: List[int]) -> List[int]:
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ M
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def bech32_encode(hrp: str, data: List[int]) -> str:
"""Compute a Bech32 string given HRP and data values."""
combined = data + bech32_create_checksum(hrp, data)
return hrp + "1" + "".join([CHARSET[d] for d in combined])
def bech32_decode(bech: str, max_length: int = 90) -> Tuple[Optional[str], Optional[List[int]]]:
"""Validate a Bech32 string, and determine HRP and data."""
bech = bech.strip()
if (any(ord(x) < 33 or ord(x) > 126 for x in bech)) or (bech.lower() != bech and bech.upper() != bech):
return (None, None)
bech = bech.lower()
pos = bech.rfind("1")
if pos < 1 or pos + 7 > len(bech) or len(bech) > max_length:
return (None, None)
if not all(x in CHARSET for x in bech[pos + 1 :]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos + 1 :]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return hrp, data[:-6]
def convertbits(data: Iterable[int], frombits: int, tobits: int, pad: bool = True) -> List[int]:
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
raise ValueError("Invalid Value")
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
raise ValueError("Invalid bits")
return ret
def encode_puzzle_hash(puzzle_hash: bytes32, prefix: str) -> str:
encoded = bech32_encode(prefix, convertbits(puzzle_hash, 8, 5))
return encoded
def decode_puzzle_hash(address: str) -> bytes32:
hrpgot, data = bech32_decode(address)
if data is None:
raise ValueError("Invalid Address")
decoded = convertbits(data, 5, 8, False)
decoded_bytes = bytes32(decoded)
return decoded_bytes
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/keychain.py | flax/util/keychain.py | from __future__ import annotations
import pkg_resources
import sys
import unicodedata
from bitstring import BitArray # pyright: reportMissingImports=false
from blspy import AugSchemeMPL, G1Element, PrivateKey # pyright: reportMissingImports=false
from flax.util.errors import (
KeychainException,
KeychainNotSet,
KeychainKeyDataMismatch,
KeychainFingerprintExists,
KeychainFingerprintNotFound,
KeychainSecretsMissing,
KeychainUserNotFound,
)
from flax.util.hash import std_hash
from flax.util.ints import uint32
from flax.util.keyring_wrapper import KeyringWrapper
from flax.util.streamable import streamable, Streamable
from dataclasses import dataclass
from hashlib import pbkdf2_hmac
from pathlib import Path
from secrets import token_bytes
from typing import Any, Dict, List, Optional, Tuple
from typing_extensions import final
CURRENT_KEY_VERSION = "1.8"
DEFAULT_USER = f"user-flax-{CURRENT_KEY_VERSION}" # e.g. user-flax-1.8
DEFAULT_SERVICE = f"flax-{DEFAULT_USER}" # e.g. flax-user-flax-1.8
MAX_KEYS = 100
MIN_PASSPHRASE_LEN = 8
def supports_os_passphrase_storage() -> bool:
return sys.platform in ["darwin", "win32", "cygwin"]
def passphrase_requirements() -> Dict[str, Any]:
"""
Returns a dictionary specifying current passphrase requirements
"""
return {"is_optional": True, "min_length": MIN_PASSPHRASE_LEN} # lgtm [py/clear-text-logging-sensitive-data]
def set_keys_root_path(keys_root_path: Path) -> None:
"""
Used to set the keys_root_path prior to instantiating the KeyringWrapper shared instance.
"""
KeyringWrapper.set_keys_root_path(keys_root_path)
def bip39_word_list() -> str:
return pkg_resources.resource_string(__name__, "english.txt").decode()
def generate_mnemonic() -> str:
mnemonic_bytes = token_bytes(32)
mnemonic = bytes_to_mnemonic(mnemonic_bytes)
return mnemonic
def bytes_to_mnemonic(mnemonic_bytes: bytes) -> str:
if len(mnemonic_bytes) not in [16, 20, 24, 28, 32]:
raise ValueError(
f"Data length should be one of the following: [16, 20, 24, 28, 32], but it is {len(mnemonic_bytes)}."
)
word_list = bip39_word_list().splitlines()
CS = len(mnemonic_bytes) // 4
checksum = BitArray(bytes(std_hash(mnemonic_bytes)))[:CS]
bitarray = BitArray(mnemonic_bytes) + checksum
mnemonics = []
assert len(bitarray) % 11 == 0
for i in range(0, len(bitarray) // 11):
start = i * 11
end = start + 11
bits = bitarray[start:end]
m_word_position = bits.uint
m_word = word_list[m_word_position]
mnemonics.append(m_word)
return " ".join(mnemonics)
def bytes_from_mnemonic(mnemonic_str: str) -> bytes:
mnemonic: List[str] = mnemonic_str.split(" ")
if len(mnemonic) not in [12, 15, 18, 21, 24]:
raise ValueError("Invalid mnemonic length")
word_list = {word: i for i, word in enumerate(bip39_word_list().splitlines())}
bit_array = BitArray()
for i in range(0, len(mnemonic)):
word = mnemonic[i]
if word not in word_list:
raise ValueError(f"'{word}' is not in the mnemonic dictionary; may be misspelled")
value = word_list[word]
bit_array.append(BitArray(uint=value, length=11))
CS: int = len(mnemonic) // 3
ENT: int = len(mnemonic) * 11 - CS
assert len(bit_array) == len(mnemonic) * 11
assert ENT % 32 == 0
entropy_bytes = bit_array[:ENT].bytes
checksum_bytes = bit_array[ENT:]
checksum = BitArray(std_hash(entropy_bytes))[:CS]
assert len(checksum_bytes) == CS
if checksum != checksum_bytes:
raise ValueError("Invalid order of mnemonic words")
return entropy_bytes
def mnemonic_to_seed(mnemonic: str) -> bytes:
"""
Uses BIP39 standard to derive a seed from entropy bytes.
"""
salt_str: str = "mnemonic"
salt = unicodedata.normalize("NFKD", salt_str).encode("utf-8")
mnemonic_normalized = unicodedata.normalize("NFKD", mnemonic).encode("utf-8")
seed = pbkdf2_hmac("sha512", mnemonic_normalized, salt, 2048)
assert len(seed) == 64
return seed
def default_keychain_user() -> str:
return DEFAULT_USER
def default_keychain_service() -> str:
return DEFAULT_SERVICE
def get_private_key_user(user: str, index: int) -> str:
"""
Returns the keychain user string for a key index.
"""
return f"wallet-{user}-{index}"
@final
@streamable
@dataclass(frozen=True)
class KeyDataSecrets(Streamable):
mnemonic: List[str]
entropy: bytes
private_key: PrivateKey
def __post_init__(self) -> None:
# This is redundant if `from_*` methods are used but its to make sure there can't be an `KeyDataSecrets`
# instance with an attribute mismatch for calculated cached values. Should be ok since we don't handle a lot of
# keys here.
mnemonic_str = self.mnemonic_str()
try:
bytes_from_mnemonic(mnemonic_str)
except Exception as e:
raise KeychainKeyDataMismatch("mnemonic") from e
if bytes_from_mnemonic(mnemonic_str) != self.entropy:
raise KeychainKeyDataMismatch("entropy")
if AugSchemeMPL.key_gen(mnemonic_to_seed(mnemonic_str)) != self.private_key:
raise KeychainKeyDataMismatch("private_key")
@classmethod
def from_mnemonic(cls, mnemonic: str) -> KeyDataSecrets:
return cls(
mnemonic=mnemonic.split(),
entropy=bytes_from_mnemonic(mnemonic),
private_key=AugSchemeMPL.key_gen(mnemonic_to_seed(mnemonic)),
)
@classmethod
def from_entropy(cls, entropy: bytes) -> KeyDataSecrets:
return cls.from_mnemonic(bytes_to_mnemonic(entropy))
@classmethod
def generate(cls) -> KeyDataSecrets:
return cls.from_mnemonic(generate_mnemonic())
def mnemonic_str(self) -> str:
return " ".join(self.mnemonic)
@final
@streamable
@dataclass(frozen=True)
class KeyData(Streamable):
fingerprint: uint32
public_key: G1Element
label: Optional[str]
secrets: Optional[KeyDataSecrets]
def __post_init__(self) -> None:
# This is redundant if `from_*` methods are used but its to make sure there can't be an `KeyData` instance with
# an attribute mismatch for calculated cached values. Should be ok since we don't handle a lot of keys here.
if self.secrets is not None and self.public_key != self.private_key.get_g1():
raise KeychainKeyDataMismatch("public_key")
if self.public_key.get_fingerprint() != self.fingerprint:
raise KeychainKeyDataMismatch("fingerprint")
@classmethod
def from_mnemonic(cls, mnemonic: str, label: Optional[str] = None) -> KeyData:
private_key = AugSchemeMPL.key_gen(mnemonic_to_seed(mnemonic))
return cls(
fingerprint=private_key.get_g1().get_fingerprint(),
public_key=private_key.get_g1(),
label=label,
secrets=KeyDataSecrets.from_mnemonic(mnemonic),
)
@classmethod
def from_entropy(cls, entropy: bytes, label: Optional[str] = None) -> KeyData:
return cls.from_mnemonic(bytes_to_mnemonic(entropy), label)
@classmethod
def generate(cls, label: Optional[str] = None) -> KeyData:
return cls.from_mnemonic(generate_mnemonic(), label)
@property
def mnemonic(self) -> List[str]:
if self.secrets is None:
raise KeychainSecretsMissing()
return self.secrets.mnemonic
def mnemonic_str(self) -> str:
if self.secrets is None:
raise KeychainSecretsMissing()
return self.secrets.mnemonic_str()
@property
def entropy(self) -> bytes:
if self.secrets is None:
raise KeychainSecretsMissing()
return self.secrets.entropy
@property
def private_key(self) -> PrivateKey:
if self.secrets is None:
raise KeychainSecretsMissing()
return self.secrets.private_key
class Keychain:
"""
The keychain stores two types of keys: private keys, which are PrivateKeys from blspy,
and private key seeds, which are bytes objects that are used as a seed to construct
PrivateKeys. Private key seeds are converted to mnemonics when shown to users.
Both types of keys are stored as hex strings in the python keyring, and the implementation of
the keyring depends on OS. Both types of keys can be added, and get_private_keys returns a
list of all keys.
"""
def __init__(self, user: Optional[str] = None, service: Optional[str] = None, force_legacy: bool = False):
self.user = user if user is not None else default_keychain_user()
self.service = service if service is not None else default_keychain_service()
keyring_wrapper: Optional[KeyringWrapper] = (
KeyringWrapper.get_legacy_instance() if force_legacy else KeyringWrapper.get_shared_instance()
)
if keyring_wrapper is None:
raise KeychainNotSet(f"KeyringWrapper not set: force_legacy={force_legacy}")
self.keyring_wrapper = keyring_wrapper
def _get_key_data(self, index: int, include_secrets: bool = True) -> KeyData:
"""
Returns the parsed keychain contents for a specific 'user' (key index). The content
is represented by the class `KeyData`.
"""
user = get_private_key_user(self.user, index)
read_str = self.keyring_wrapper.get_passphrase(self.service, user)
if read_str is None or len(read_str) == 0:
raise KeychainUserNotFound(self.service, user)
str_bytes = bytes.fromhex(read_str)
public_key = G1Element.from_bytes(str_bytes[: G1Element.SIZE])
fingerprint = public_key.get_fingerprint()
entropy = str_bytes[G1Element.SIZE : G1Element.SIZE + 32]
return KeyData(
fingerprint=fingerprint,
public_key=public_key,
label=self.keyring_wrapper.get_label(fingerprint),
secrets=KeyDataSecrets.from_entropy(entropy) if include_secrets else None,
)
def _get_free_private_key_index(self) -> int:
"""
Get the index of the first free spot in the keychain.
"""
index = 0
while True:
try:
self._get_key_data(index)
index += 1
except KeychainUserNotFound:
return index
def add_private_key(self, mnemonic: str, label: Optional[str] = None) -> PrivateKey:
"""
Adds a private key to the keychain, with the given entropy and passphrase. The
keychain itself will store the public key, and the entropy bytes,
but not the passphrase.
"""
seed = mnemonic_to_seed(mnemonic)
entropy = bytes_from_mnemonic(mnemonic)
index = self._get_free_private_key_index()
key = AugSchemeMPL.key_gen(seed)
fingerprint = key.get_g1().get_fingerprint()
if fingerprint in [pk.get_fingerprint() for pk in self.get_all_public_keys()]:
# Prevents duplicate add
raise KeychainFingerprintExists(fingerprint)
# Try to set the label first, it may fail if the label is invalid or already exists.
# This can probably just be moved into `FileKeyring.set_passphrase` after the legacy keyring stuff was dropped.
if label is not None:
self.keyring_wrapper.set_label(fingerprint, label)
try:
self.keyring_wrapper.set_passphrase(
self.service,
get_private_key_user(self.user, index),
bytes(key.get_g1()).hex() + entropy.hex(),
)
except Exception:
if label is not None:
self.keyring_wrapper.delete_label(fingerprint)
raise
return key
def set_label(self, fingerprint: int, label: str) -> None:
"""
Assigns the given label to the first key with the given fingerprint.
"""
self.get_key(fingerprint) # raise if the fingerprint doesn't exist
self.keyring_wrapper.set_label(fingerprint, label)
def delete_label(self, fingerprint: int) -> None:
"""
Removes the label assigned to the key with the given fingerprint.
"""
self.keyring_wrapper.delete_label(fingerprint)
def get_first_private_key(self) -> Optional[Tuple[PrivateKey, bytes]]:
"""
Returns the first key in the keychain that has one of the passed in passphrases.
"""
for index in range(MAX_KEYS + 1):
try:
key_data = self._get_key_data(index)
return key_data.private_key, key_data.entropy
except KeychainUserNotFound:
pass
return None
def get_private_key_by_fingerprint(self, fingerprint: int) -> Optional[Tuple[PrivateKey, bytes]]:
"""
Return first private key which have the given public key fingerprint.
"""
for index in range(MAX_KEYS + 1):
try:
key_data = self._get_key_data(index)
if key_data.fingerprint == fingerprint:
return key_data.private_key, key_data.entropy
except KeychainUserNotFound:
pass
return None
def get_all_private_keys(self) -> List[Tuple[PrivateKey, bytes]]:
"""
Returns all private keys which can be retrieved, with the given passphrases.
A tuple of key, and entropy bytes (i.e. mnemonic) is returned for each key.
"""
all_keys: List[Tuple[PrivateKey, bytes]] = []
for index in range(MAX_KEYS + 1):
try:
key_data = self._get_key_data(index)
all_keys.append((key_data.private_key, key_data.entropy))
except KeychainUserNotFound:
pass
return all_keys
def get_key(self, fingerprint: int, include_secrets: bool = False) -> KeyData:
"""
Return the KeyData of the first key which has the given public key fingerprint.
"""
for index in range(MAX_KEYS + 1):
try:
key_data = self._get_key_data(index, include_secrets)
if key_data.public_key.get_fingerprint() == fingerprint:
return key_data
except KeychainUserNotFound:
pass
raise KeychainFingerprintNotFound(fingerprint)
def get_keys(self, include_secrets: bool = False) -> List[KeyData]:
"""
Returns the KeyData of all keys which can be retrieved.
"""
all_keys: List[KeyData] = []
for index in range(MAX_KEYS + 1):
try:
key_data = self._get_key_data(index, include_secrets)
all_keys.append(key_data)
except KeychainUserNotFound:
pass
return all_keys
def get_all_public_keys(self) -> List[G1Element]:
"""
Returns all public keys.
"""
return [key_data[0].get_g1() for key_data in self.get_all_private_keys()]
def get_first_public_key(self) -> Optional[G1Element]:
"""
Returns the first public key.
"""
key_data = self.get_first_private_key()
return None if key_data is None else key_data[0].get_g1()
def delete_key_by_fingerprint(self, fingerprint: int) -> int:
"""
Deletes all keys which have the given public key fingerprint and returns how many keys were removed.
"""
removed = 0
for index in range(MAX_KEYS + 1):
try:
key_data = self._get_key_data(index, include_secrets=False)
if key_data.fingerprint == fingerprint:
try:
self.keyring_wrapper.delete_label(key_data.fingerprint)
except (KeychainException, NotImplementedError):
# Just try to delete the label and move on if there wasn't one
pass
try:
self.keyring_wrapper.delete_passphrase(self.service, get_private_key_user(self.user, index))
removed += 1
except Exception:
pass
except KeychainUserNotFound:
pass
return removed
def delete_keys(self, keys_to_delete: List[Tuple[PrivateKey, bytes]]):
"""
Deletes all keys in the list.
"""
remaining_fingerprints = {x[0].get_g1().get_fingerprint() for x in keys_to_delete}
remaining_removals = len(remaining_fingerprints)
while len(remaining_fingerprints):
key_to_delete = remaining_fingerprints.pop()
if self.delete_key_by_fingerprint(key_to_delete) > 0:
remaining_removals -= 1
if remaining_removals > 0:
raise ValueError(f"{remaining_removals} keys could not be found for deletion")
def delete_all_keys(self) -> None:
"""
Deletes all keys from the keychain.
"""
for index in range(MAX_KEYS + 1):
try:
key_data = self._get_key_data(index)
self.delete_key_by_fingerprint(key_data.fingerprint)
except KeychainUserNotFound:
pass
@staticmethod
def is_keyring_locked() -> bool:
"""
Returns whether the keyring is in a locked state. If the keyring doesn't have a master passphrase set,
or if a master passphrase is set and the cached passphrase is valid, the keyring is "unlocked"
"""
# Unlocked: If a master passphrase isn't set, or if the cached passphrase is valid
if not Keychain.has_master_passphrase() or (
Keychain.has_cached_passphrase()
and Keychain.master_passphrase_is_valid(Keychain.get_cached_master_passphrase())
):
return False
# Locked: Everything else
return True
@staticmethod
def needs_migration() -> bool:
"""
Returns a bool indicating whether the underlying keyring needs to be migrated to the new
format for passphrase support.
"""
return KeyringWrapper.get_shared_instance().using_legacy_keyring()
@staticmethod
def handle_migration_completed():
"""
When migration completes outside of the current process, we rely on a notification to inform
the current process that it needs to reset/refresh its keyring. This allows us to stop using
the legacy keyring in an already-running daemon if migration is completed using the CLI.
"""
KeyringWrapper.get_shared_instance().refresh_keyrings()
@staticmethod
def migrate_legacy_keyring(
passphrase: Optional[str] = None,
passphrase_hint: Optional[str] = None,
save_passphrase: bool = False,
cleanup_legacy_keyring: bool = False,
) -> None:
"""
Begins legacy keyring migration in a non-interactive manner
"""
if passphrase is not None and passphrase != "":
KeyringWrapper.get_shared_instance().set_master_passphrase(
current_passphrase=None,
new_passphrase=passphrase,
write_to_keyring=False,
passphrase_hint=passphrase_hint,
save_passphrase=save_passphrase,
)
KeyringWrapper.get_shared_instance().migrate_legacy_keyring(cleanup_legacy_keyring=cleanup_legacy_keyring)
@staticmethod
def passphrase_is_optional() -> bool:
"""
Returns whether a user-supplied passphrase is optional, as specified by the passphrase requirements.
"""
return passphrase_requirements().get("is_optional", False)
@staticmethod
def minimum_passphrase_length() -> int:
"""
Returns the minimum passphrase length, as specified by the passphrase requirements.
"""
return passphrase_requirements().get("min_length", 0)
@staticmethod
def passphrase_meets_requirements(passphrase: Optional[str]) -> bool:
"""
Returns whether the provided passphrase satisfies the passphrase requirements.
"""
# Passphrase is not required and None was provided
if (passphrase is None or passphrase == "") and Keychain.passphrase_is_optional():
return True
# Passphrase meets the minimum length requirement
if passphrase is not None and len(passphrase) >= Keychain.minimum_passphrase_length():
return True
return False
@staticmethod
def has_master_passphrase() -> bool:
"""
Returns a bool indicating whether the underlying keyring data
is secured by a passphrase.
"""
return KeyringWrapper.get_shared_instance().has_master_passphrase()
@staticmethod
def master_passphrase_is_valid(passphrase: str, force_reload: bool = False) -> bool:
"""
Checks whether the provided passphrase can unlock the keyring. If force_reload
is true, the keyring payload will be re-read from the backing file. If false,
the passphrase will be checked against the in-memory payload.
"""
return KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase, force_reload=force_reload)
@staticmethod
def has_cached_passphrase() -> bool:
"""
Returns whether the master passphrase has been cached (it may need to be validated)
"""
return KeyringWrapper.get_shared_instance().has_cached_master_passphrase()
@staticmethod
def get_cached_master_passphrase() -> str:
"""
Returns the cached master passphrase
"""
passphrase, _ = KeyringWrapper.get_shared_instance().get_cached_master_passphrase()
return passphrase
@staticmethod
def set_cached_master_passphrase(passphrase: Optional[str]) -> None:
"""
Caches the provided master passphrase
"""
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase)
@staticmethod
def set_master_passphrase(
current_passphrase: Optional[str],
new_passphrase: str,
*,
passphrase_hint: Optional[str] = None,
save_passphrase: bool = False,
) -> None:
"""
Encrypts the keyring contents to new passphrase, provided that the current
passphrase can decrypt the contents
"""
KeyringWrapper.get_shared_instance().set_master_passphrase(
current_passphrase,
new_passphrase,
passphrase_hint=passphrase_hint,
save_passphrase=save_passphrase,
)
@staticmethod
def remove_master_passphrase(current_passphrase: Optional[str]) -> None:
"""
Removes the user-provided master passphrase, and replaces it with the default
master passphrase. The keyring contents will remain encrypted, but to the
default passphrase.
"""
KeyringWrapper.get_shared_instance().remove_master_passphrase(current_passphrase)
@staticmethod
def get_master_passphrase_hint() -> Optional[str]:
"""
Returns the passphrase hint from the keyring
"""
return KeyringWrapper.get_shared_instance().get_master_passphrase_hint()
@staticmethod
def set_master_passphrase_hint(current_passphrase: str, passphrase_hint: Optional[str]) -> None:
"""
Convenience method for setting/removing the passphrase hint. Requires the current
passphrase, as the passphrase hint is written as part of a passphrase update.
"""
Keychain.set_master_passphrase(current_passphrase, current_passphrase, passphrase_hint=passphrase_hint)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/lock.py | flax/util/lock.py | from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from types import TracebackType
from typing import Optional, Type
from filelock import BaseFileLock, FileLock, Timeout
from typing_extensions import final
class LockfileError(Exception):
pass
@final
@dataclass(frozen=True)
class Lockfile:
_lock: BaseFileLock
timeout: float
poll_interval: float
@classmethod
def create(cls, path: Path, timeout: float = -1, poll_interval: float = 0.05) -> Lockfile:
path.parent.mkdir(parents=True, exist_ok=True)
return cls(_lock=FileLock(path.with_name(path.name + ".lock")), timeout=timeout, poll_interval=poll_interval)
def __enter__(self) -> Lockfile:
self.acquire(timeout=self.timeout, poll_interval=self.poll_interval)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.release()
def acquire(self, timeout: float, poll_interval: float) -> None:
try:
self._lock.acquire(timeout=timeout, poll_interval=poll_interval)
except Timeout as e:
raise LockfileError(e) from e
def release(self) -> None:
self._lock.release()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/chain_utils.py | flax/util/chain_utils.py | from typing import List
from clvm.casts import int_from_bytes
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.condition_opcodes import ConditionOpcode
from flax.util.condition_tools import (
conditions_dict_for_solution,
created_outputs_for_conditions_dict,
)
def additions_for_solution(
coin_name: bytes32, puzzle_reveal: SerializedProgram, solution: SerializedProgram, max_cost: int
) -> List[Coin]:
"""
Checks the conditions created by CoinSpend and returns the list of all coins created
"""
err, dic, cost = conditions_dict_for_solution(puzzle_reveal, solution, max_cost)
if err or dic is None:
return []
return created_outputs_for_conditions_dict(dic, coin_name)
def fee_for_solution(puzzle_reveal: SerializedProgram, solution: SerializedProgram, max_cost: int) -> int:
err, dic, cost = conditions_dict_for_solution(puzzle_reveal, solution, max_cost)
if err or dic is None:
return 0
total = 0
for cvp in dic.get(ConditionOpcode.RESERVE_FEE, []):
amount_bin = cvp.vars[0]
amount = int_from_bytes(amount_bin)
total += amount
return total
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/keyring_wrapper.py | flax/util/keyring_wrapper.py | from blspy import PrivateKey # pyright: reportMissingImports=false
from flax.util.default_root import DEFAULT_KEYS_ROOT_PATH
from flax.util.file_keyring import FileKeyring
from flax.util.misc import prompt_yes_no
from keyrings.cryptfile.cryptfile import CryptFileKeyring # pyright: reportMissingImports=false
from keyring.backends.macOS import Keyring as MacKeyring
from keyring.backends.Windows import WinVaultKeyring as WinKeyring
from keyring.errors import KeyringError, PasswordDeleteError
from pathlib import Path
from sys import platform
from typing import Any, List, Optional, Tuple, Type, Union
# We want to protect the keyring, even if a user-specified master passphrase isn't provided
#
# WARNING: Changing the default passphrase will prevent passphrase-less users from accessing
# their existing keys. Using a new default passphrase requires migrating existing users to
# the new passphrase.
DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE = "$ flax passphrase set # all the cool kids are doing it!"
MASTER_PASSPHRASE_SERVICE_NAME = "Flax Passphrase"
MASTER_PASSPHRASE_USER_NAME = "Flax Passphrase"
LegacyKeyring = Union[MacKeyring, WinKeyring, CryptFileKeyring]
OSPassphraseStore = Union[MacKeyring, WinKeyring]
def get_legacy_keyring_instance() -> Optional[LegacyKeyring]:
if platform == "darwin":
return MacKeyring()
elif platform == "win32" or platform == "cygwin":
return WinKeyring()
elif platform == "linux":
keyring: CryptFileKeyring = CryptFileKeyring()
keyring.keyring_key = "your keyring password"
return keyring
return None
def get_os_passphrase_store() -> Optional[OSPassphraseStore]:
if platform == "darwin":
return MacKeyring()
elif platform == "win32" or platform == "cygwin":
return WinKeyring()
return None
def check_legacy_keyring_keys_present(keyring: LegacyKeyring) -> bool:
from keyring.credentials import Credential
from flax.util.keychain import default_keychain_user, default_keychain_service, get_private_key_user, MAX_KEYS
keychain_user: str = default_keychain_user()
keychain_service: str = default_keychain_service()
for index in range(0, MAX_KEYS):
current_user: str = get_private_key_user(keychain_user, index)
credential: Optional[Credential] = keyring.get_credential(keychain_service, current_user)
if credential is not None:
return True
return False
def warn_if_macos_errSecInteractionNotAllowed(error: KeyringError) -> bool:
"""
Check if the macOS Keychain error is errSecInteractionNotAllowed. This commonly
occurs when the keychain is accessed while headless (such as remoting into a Mac
via SSH). Because macOS Keychain operations may require prompting for login creds,
a connection to the WindowServer is required. Returns True if the error was
handled.
"""
if "-25308" in str(error):
print(
"WARNING: Unable to access the macOS Keychain (-25308 errSecInteractionNotAllowed). "
"Are you logged-in remotely?"
)
return True
return False
class KeyringWrapper:
"""
KeyringWrapper provides an abstraction that the Keychain class can use
without requiring knowledge of the keyring backend. During initialization,
a keyring backend is selected based on the OS.
The wrapper is implemented as a singleton, as it may need to manage state
related to the master passphrase and handle migration from the legacy
CryptFileKeyring implementation.
"""
# Static members
__shared_instance = None
__keys_root_path: Path = DEFAULT_KEYS_ROOT_PATH
# Instance members
keys_root_path: Path
keyring: Union[Any, FileKeyring] = None
cached_passphrase: Optional[str] = None
cached_passphrase_is_validated: bool = False
legacy_keyring = None
def __init__(self, keys_root_path: Path = DEFAULT_KEYS_ROOT_PATH, force_legacy: bool = False):
"""
Initializes the keyring backend based on the OS. For Linux, we previously
used CryptFileKeyring. We now use our own FileKeyring backend and migrate
the data from the legacy CryptFileKeyring (on write).
"""
from flax.util.errors import KeychainNotSet
self.keys_root_path = keys_root_path
if force_legacy:
legacy_keyring = get_legacy_keyring_instance()
if check_legacy_keyring_keys_present(legacy_keyring):
self.legacy_keyring = legacy_keyring
else:
self.refresh_keyrings()
if self.keyring is None and self.legacy_keyring is None:
raise KeychainNotSet(
f"Unable to initialize keyring backend: keys_root_path={keys_root_path}, force_legacy={force_legacy}"
)
def refresh_keyrings(self):
self.keyring = None
self.keyring = self._configure_backend()
# Configure the legacy keyring if keyring passphrases are supported to support migration (if necessary)
self.legacy_keyring = self._configure_legacy_backend()
# Initialize the cached_passphrase
self.cached_passphrase = self._get_initial_cached_passphrase()
def _configure_backend(self) -> FileKeyring:
if self.keyring:
raise Exception("KeyringWrapper has already been instantiated")
return FileKeyring.create(keys_root_path=self.keys_root_path)
def _configure_legacy_backend(self) -> LegacyKeyring:
# If keyring.yaml isn't found or is empty, check if we're using
# CryptFileKeyring, Mac Keychain, or Windows Credential Manager
filekeyring = self.keyring if type(self.keyring) == FileKeyring else None
if filekeyring and not filekeyring.has_content():
keyring: Optional[LegacyKeyring] = get_legacy_keyring_instance()
if keyring is not None and check_legacy_keyring_keys_present(keyring):
return keyring
return None
def _get_initial_cached_passphrase(self) -> str:
"""
Grab the saved passphrase from the OS credential store (if available), otherwise
use the default passphrase
"""
from flax.util.keychain import supports_os_passphrase_storage
passphrase: Optional[str] = None
if supports_os_passphrase_storage():
passphrase = self.get_master_passphrase_from_credential_store()
if passphrase is None:
passphrase = DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
return passphrase
@staticmethod
def set_keys_root_path(keys_root_path: Path):
"""
Used to set the keys_root_path prior to instantiating the __shared_instance
"""
KeyringWrapper.__keys_root_path = keys_root_path
@staticmethod
def get_shared_instance(create_if_necessary=True):
if not KeyringWrapper.__shared_instance and create_if_necessary:
KeyringWrapper.__shared_instance = KeyringWrapper(keys_root_path=KeyringWrapper.__keys_root_path)
return KeyringWrapper.__shared_instance
@staticmethod
def cleanup_shared_instance():
KeyringWrapper.__shared_instance = None
@staticmethod
def get_legacy_instance() -> Optional["KeyringWrapper"]:
return KeyringWrapper(force_legacy=True)
def get_keyring(self):
"""
Return the current keyring backend. The legacy keyring is preferred if it's in use
"""
return self.keyring if not self.using_legacy_keyring() else self.legacy_keyring
def using_legacy_keyring(self) -> bool:
return self.legacy_keyring is not None
# Master passphrase support
def keyring_supports_master_passphrase(self) -> bool:
return type(self.get_keyring()) in [FileKeyring]
def get_cached_master_passphrase(self) -> Tuple[Optional[str], bool]:
"""
Returns a tuple including the currently cached passphrase and a bool
indicating whether the passphrase has been previously validated.
"""
return self.cached_passphrase, self.cached_passphrase_is_validated
def set_cached_master_passphrase(self, passphrase: Optional[str], validated=False) -> None:
"""
Cache the provided passphrase and optionally indicate whether the passphrase
has been validated.
"""
self.cached_passphrase = passphrase
self.cached_passphrase_is_validated = validated
def has_cached_master_passphrase(self) -> bool:
passphrase = self.get_cached_master_passphrase()
return passphrase is not None and len(passphrase) > 0
def has_master_passphrase(self) -> bool:
"""
Returns a bool indicating whether the underlying keyring data
is secured by a master passphrase.
"""
return self.keyring_supports_master_passphrase() and self.keyring.has_content()
def master_passphrase_is_valid(self, passphrase: str, force_reload: bool = False) -> bool:
return self.keyring.check_passphrase(passphrase, force_reload=force_reload)
def set_master_passphrase(
self,
current_passphrase: Optional[str],
new_passphrase: str,
*,
write_to_keyring: bool = True,
passphrase_hint: Optional[str] = None,
save_passphrase: bool = False,
) -> None:
"""
Sets a new master passphrase for the keyring
"""
from flax.util.errors import KeychainRequiresMigration, KeychainCurrentPassphraseIsInvalid
from flax.util.keychain import supports_os_passphrase_storage
# Require a valid current_passphrase
if (
self.has_master_passphrase()
and current_passphrase is not None
and not self.master_passphrase_is_valid(current_passphrase)
):
raise KeychainCurrentPassphraseIsInvalid()
self.set_cached_master_passphrase(new_passphrase, validated=True)
self.keyring.set_passphrase_hint(passphrase_hint)
if write_to_keyring:
if self.using_legacy_keyring():
raise KeychainRequiresMigration()
# We're reencrypting the keyring contents using the new passphrase. Ensure that the
# payload has been decrypted by calling load_keyring with the current passphrase.
self.keyring.load_keyring(passphrase=current_passphrase)
self.keyring.write_keyring(fresh_salt=True) # Create a new salt since we're changing the passphrase
if supports_os_passphrase_storage():
if save_passphrase:
self.save_master_passphrase_to_credential_store(new_passphrase)
else:
self.remove_master_passphrase_from_credential_store()
def remove_master_passphrase(self, current_passphrase: Optional[str]) -> None:
"""
Remove the user-specific master passphrase. We still keep the keyring contents encrypted
using the default passphrase.
"""
self.set_master_passphrase(current_passphrase, DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE)
def save_master_passphrase_to_credential_store(self, passphrase: str) -> None:
passphrase_store: Optional[OSPassphraseStore] = get_os_passphrase_store()
if passphrase_store is not None:
try:
passphrase_store.set_password(MASTER_PASSPHRASE_SERVICE_NAME, MASTER_PASSPHRASE_USER_NAME, passphrase)
except KeyringError as e:
if not warn_if_macos_errSecInteractionNotAllowed(e):
raise
return None
def remove_master_passphrase_from_credential_store(self) -> None:
passphrase_store: Optional[OSPassphraseStore] = get_os_passphrase_store()
if passphrase_store is not None:
try:
passphrase_store.delete_password(MASTER_PASSPHRASE_SERVICE_NAME, MASTER_PASSPHRASE_USER_NAME)
except PasswordDeleteError:
if (
passphrase_store.get_credential(MASTER_PASSPHRASE_SERVICE_NAME, MASTER_PASSPHRASE_USER_NAME)
is not None
):
raise
except KeyringError as e:
if not warn_if_macos_errSecInteractionNotAllowed(e):
raise
return None
def get_master_passphrase_from_credential_store(self) -> Optional[str]:
passphrase_store: Optional[OSPassphraseStore] = get_os_passphrase_store()
if passphrase_store is not None:
try:
return passphrase_store.get_password(MASTER_PASSPHRASE_SERVICE_NAME, MASTER_PASSPHRASE_USER_NAME)
except KeyringError as e:
if not warn_if_macos_errSecInteractionNotAllowed(e):
raise
return None
def get_master_passphrase_hint(self) -> Optional[str]:
if self.keyring_supports_master_passphrase():
return self.keyring.get_passphrase_hint()
return None
# Legacy keyring migration
class MigrationResults:
def __init__(
self,
original_private_keys: List[Tuple[PrivateKey, bytes]],
legacy_keyring: LegacyKeyring,
keychain_service: str,
keychain_users: List[str],
):
self.original_private_keys = original_private_keys
self.legacy_keyring = legacy_keyring
self.keychain_service = keychain_service
self.keychain_users = keychain_users
def confirm_migration(self) -> bool:
"""
Before beginning migration, we'll notify the user that the legacy keyring needs to be
migrated and warn about backing up the mnemonic seeds.
If a master passphrase hasn't been explicitly set yet, we'll attempt to prompt and set
the passphrase prior to beginning migration.
"""
master_passphrase, _ = self.get_cached_master_passphrase()
if master_passphrase == DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE:
print(
"\nYour existing keys will be migrated to a new keyring that is optionally secured by a master "
"passphrase."
)
print(
"Would you like to set a master passphrase now? Use 'flax passphrase set' to change the passphrase.\n"
)
response = prompt_yes_no("Set keyring master passphrase?")
if response:
from flax.cmds.passphrase_funcs import prompt_for_new_passphrase
# Prompt for a master passphrase and cache it
new_passphrase, save_passphrase = prompt_for_new_passphrase()
self.set_master_passphrase(
current_passphrase=None,
new_passphrase=new_passphrase,
write_to_keyring=False,
save_passphrase=save_passphrase,
)
else:
print(
"Will skip setting a master passphrase. Use 'flax passphrase set' to set the master passphrase.\n"
)
else:
import colorama
colorama.init()
print("\nYour existing keys will be migrated to a new keyring that is secured by your master passphrase")
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + "WARNING: " + colorama.Style.RESET_ALL, end="")
print(
"It is strongly recommended that you ensure you have a copy of the mnemonic seed for each of your "
"keys prior to beginning migration\n"
)
return prompt_yes_no("Begin keyring migration?")
def migrate_legacy_keys(self) -> MigrationResults:
from flax.util.keychain import get_private_key_user, Keychain, MAX_KEYS
print("Migrating contents from legacy keyring")
keychain: Keychain = Keychain()
# Obtain contents from the legacy keyring. When using the Keychain interface
# to read, the legacy keyring will be preferred over the new keyring.
original_private_keys = keychain.get_all_private_keys()
service = keychain.service
user_passphrase_pairs = []
index = 0
user = get_private_key_user(keychain.user, index)
while index <= MAX_KEYS:
# Build up a list of user/passphrase tuples from the legacy keyring contents
if user is not None:
passphrase = self.get_passphrase(service, user)
if passphrase is not None:
user_passphrase_pairs.append((user, passphrase))
index += 1
user = get_private_key_user(keychain.user, index)
# Write the keys directly to the new keyring (self.keyring)
for (user, passphrase) in user_passphrase_pairs:
self.keyring.set_password(service, user, passphrase)
return KeyringWrapper.MigrationResults(
original_private_keys, self.legacy_keyring, service, [user for (user, _) in user_passphrase_pairs]
)
def verify_migration_results(self, migration_results: MigrationResults) -> bool:
from flax.util.keychain import Keychain
# Stop using the legacy keyring. This will direct subsequent reads to the new keyring.
self.legacy_keyring = None
success: bool = False
print("Verifying migration results...", end="")
# Compare the original keyring contents with the new
try:
keychain: Keychain = Keychain()
original_private_keys = migration_results.original_private_keys
post_migration_private_keys = keychain.get_all_private_keys()
# Sort the key collections prior to comparing
original_private_keys.sort(key=lambda e: str(e[0]))
post_migration_private_keys.sort(key=lambda e: str(e[0]))
if post_migration_private_keys == original_private_keys:
success = True
print(" Verified")
else:
print(" Failed")
raise ValueError("Migrated keys don't match original keys")
except Exception as e:
print(f"\nMigration failed: {e}")
print("Leaving legacy keyring intact")
self.legacy_keyring = migration_results.legacy_keyring # Restore the legacy keyring
raise
return success
def confirm_legacy_keyring_cleanup(self, migration_results) -> bool:
"""
Ask the user whether we should remove keys from the legacy keyring. In the case
of CryptFileKeyring, we can't just delete the file because other python processes
might use the same keyring file.
"""
keyring_name: str = ""
legacy_keyring_type: Type = type(migration_results.legacy_keyring)
if legacy_keyring_type is CryptFileKeyring:
keyring_name = str(migration_results.legacy_keyring.file_path)
elif legacy_keyring_type is MacKeyring:
keyring_name = "macOS Keychain"
elif legacy_keyring_type is WinKeyring:
keyring_name = "Windows Credential Manager"
prompt = "Remove keys from old keyring (recommended)"
if len(keyring_name) > 0:
prompt += f" ({keyring_name})?"
else:
prompt += "?"
return prompt_yes_no(prompt)
def cleanup_legacy_keyring(self, migration_results: MigrationResults):
for user in migration_results.keychain_users:
migration_results.legacy_keyring.delete_password(migration_results.keychain_service, user)
def migrate_legacy_keyring(self, cleanup_legacy_keyring: bool = False):
results = self.migrate_legacy_keys()
success = self.verify_migration_results(results)
if success and cleanup_legacy_keyring:
self.cleanup_legacy_keyring(results)
async def migrate_legacy_keyring_interactive(self) -> bool:
"""
Handle importing keys from the legacy keyring into the new keyring.
Prior to beginning, we'll ensure that we at least suggest setting a master passphrase
and backing up mnemonic seeds. After importing keys from the legacy keyring, we'll
perform a before/after comparison of the keyring contents, and on success we'll prompt
to cleanup the legacy keyring.
"""
from flax.cmds.passphrase_funcs import async_update_daemon_migration_completed_if_running
# Let the user know about the migration.
if not self.confirm_migration():
print("Migration aborted, can't run any flax commands.")
return False
try:
results = self.migrate_legacy_keys()
success = self.verify_migration_results(results)
if success:
print(f"Keyring migration completed successfully ({str(self.keyring.keyring_path)})\n")
except Exception as e:
print(f"\nMigration failed: {e}")
print("Leaving legacy keyring intact")
return False
# Ask if we should clean up the legacy keyring
if self.confirm_legacy_keyring_cleanup(results):
self.cleanup_legacy_keyring(results)
print("Removed keys from old keyring")
else:
print("Keys in old keyring left intact")
# Notify the daemon (if running) that migration has completed
await async_update_daemon_migration_completed_if_running()
return True
# Keyring interface
def get_passphrase(self, service: str, user: str) -> str:
# Continue reading from the legacy keyring until we want to write something,
# at which point we'll migrate the legacy contents to the new keyring
if self.using_legacy_keyring():
passphrase = self.legacy_keyring.get_password(service, user) # type: ignore
return passphrase.hex() if type(passphrase) == bytes else passphrase
return self.get_keyring().get_password(service, user)
def set_passphrase(self, service: str, user: str, passphrase: str):
self.get_keyring().set_password(service, user, passphrase)
def delete_passphrase(self, service: str, user: str):
self.get_keyring().delete_password(service, user)
def get_label(self, fingerprint: int) -> Optional[str]:
if self.using_legacy_keyring():
return None # Legacy keyring doesn't support key labels
return self.keyring.get_label(fingerprint)
def set_label(self, fingerprint: int, label: str) -> None:
if self.using_legacy_keyring():
raise NotImplementedError("Legacy keyring doesn't support key labels")
self.keyring.set_label(fingerprint, label)
def delete_label(self, fingerprint: int) -> None:
if self.using_legacy_keyring():
raise NotImplementedError("Legacy keyring doesn't support key labels")
self.keyring.delete_label(fingerprint)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/merkle_set.py | flax/util/merkle_set.py | from __future__ import annotations
from abc import ABCMeta, abstractmethod
from hashlib import sha256
from typing import Any, Dict, List, Tuple
from flax.types.blockchain_format.sized_bytes import bytes32
"""
A simple, confidence-inspiring Merkle Set standard
Advantages of this standard:
Low CPU requirements
Small proofs of inclusion/exclusion
Reasonably simple implementation
The main tricks in this standard are:
Skips repeated hashing of exactly two things even when they share prefix bits
Proofs support proving including/exclusion for a large number of values in
a single string. They're a serialization of a subset of the tree.
Proof format:
multiproof: subtree
subtree: middle or terminal or truncated or empty
middle: MIDDLE 1 subtree subtree
terminal: TERMINAL 1 hash 32
# If the sibling is empty truncated implies more than two children.
truncated: TRUNCATED 1 hash 32
empty: EMPTY 1
EMPTY: \x00
TERMINAL: \x01
MIDDLE: \x02
TRUNCATED: \x03
"""
EMPTY = bytes([0])
TERMINAL = bytes([1])
MIDDLE = bytes([2])
TRUNCATED = bytes([3])
BLANK = bytes32([0] * 32)
prehashed: Dict[bytes, Any] = {}
def init_prehashed():
for x in [EMPTY, TERMINAL, MIDDLE]:
for y in [EMPTY, TERMINAL, MIDDLE]:
prehashed[x + y] = sha256(bytes([0] * 30) + x + y)
init_prehashed()
def hashdown(mystr: bytes) -> bytes:
assert len(mystr) == 66
h = prehashed[bytes(mystr[0:1] + mystr[33:34])].copy()
h.update(mystr[1:33] + mystr[34:])
return h.digest()[:32]
def compress_root(mystr: bytes) -> bytes32:
assert len(mystr) == 33
if mystr[0:1] == MIDDLE:
return bytes32(mystr[1:])
if mystr[0:1] == EMPTY:
assert mystr[1:] == BLANK
return BLANK
return bytes32(sha256(mystr).digest()[:32])
def get_bit(mybytes: bytes, pos: int) -> int:
assert len(mybytes) == 32
return (mybytes[pos // 8] >> (7 - (pos % 8))) & 1
class Node(metaclass=ABCMeta):
hash: bytes
@abstractmethod
def get_hash(self) -> bytes:
pass
@abstractmethod
def is_empty(self) -> bool:
pass
@abstractmethod
def is_terminal(self) -> bool:
pass
@abstractmethod
def is_double(self) -> bool:
pass
@abstractmethod
def add(self, toadd: bytes, depth: int) -> "Node":
pass
@abstractmethod
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
pass
@abstractmethod
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
pass
@abstractmethod
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class MerkleSet:
root: Node
def __init__(self, root: Node = None):
if root is None:
self.root = _empty
else:
self.root = root
def get_root(self) -> bytes32:
return compress_root(self.root.get_hash())
def add_already_hashed(self, toadd: bytes):
self.root = self.root.add(toadd, 0)
def is_included_already_hashed(self, tocheck: bytes) -> Tuple[bool, bytes]:
proof: List = []
r = self.root.is_included(tocheck, 0, proof)
return r, b"".join(proof)
def _audit(self, hashes: List[bytes]):
newhashes: List = []
self.root._audit(newhashes, [])
assert newhashes == sorted(newhashes)
class EmptyNode(Node):
def __init__(self):
self.hash = BLANK
def get_hash(self) -> bytes:
return EMPTY + BLANK
def is_empty(self) -> bool:
return True
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
return TerminalNode(toadd)
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(EMPTY)
return False
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(EMPTY)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
_empty = EmptyNode()
def _make_middle(children: Any, depth: int) -> Node:
cbits = [get_bit(child.hash, depth) for child in children]
if cbits[0] != cbits[1]:
return MiddleNode(children)
nextvals: List[Node] = [_empty, _empty]
nextvals[cbits[0] ^ 1] = _empty
nextvals[cbits[0]] = _make_middle(children, depth + 1)
return MiddleNode(nextvals)
class TerminalNode(Node):
def __init__(self, hash: bytes, bits: List[int] = None):
assert len(hash) == 32
self.hash = hash
if bits is not None:
self._audit([], bits)
def get_hash(self) -> bytes:
return TERMINAL + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return True
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
if toadd == self.hash:
return self
if toadd > self.hash:
return _make_middle([self, TerminalNode(toadd)], depth)
else:
return _make_middle([TerminalNode(toadd), self], depth)
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(TERMINAL + self.hash)
return tocheck == self.hash
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TERMINAL + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
hashes.append(self.hash)
for pos, v in enumerate(bits):
assert get_bit(self.hash, pos) == v
class MiddleNode(Node):
def __init__(self, children: List[Node]):
self.children = children
if children[0].is_empty() and children[1].is_double():
self.hash = children[1].hash
elif children[1].is_empty() and children[0].is_double():
self.hash = children[0].hash
else:
if children[0].is_empty() and (children[1].is_empty() or children[1].is_terminal()):
raise SetError()
if children[1].is_empty() and children[0].is_terminal():
raise SetError
if children[0].is_terminal() and children[1].is_terminal() and children[0].hash >= children[1].hash:
raise SetError
self.hash = hashdown(children[0].get_hash() + children[1].get_hash())
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
if self.children[0].is_empty():
return self.children[1].is_double()
if self.children[1].is_empty():
return self.children[0].is_double()
return self.children[0].is_terminal() and self.children[1].is_terminal()
def add(self, toadd: bytes, depth: int) -> Node:
bit = get_bit(toadd, depth)
child = self.children[bit]
newchild = child.add(toadd, depth + 1)
if newchild is child:
return self
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(MIDDLE)
if get_bit(tocheck, depth) == 0:
r = self.children[0].is_included(tocheck, depth + 1, p)
self.children[1].other_included(tocheck, depth + 1, p, not self.children[0].is_empty())
return r
else:
self.children[0].other_included(tocheck, depth + 1, p, not self.children[1].is_empty())
return self.children[1].is_included(tocheck, depth + 1, p)
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
if collapse or not self.is_double():
p.append(TRUNCATED + self.hash)
else:
self.is_included(tocheck, depth, p)
def _audit(self, hashes: List[bytes], bits: List[int]):
self.children[0]._audit(hashes, bits + [0])
self.children[1]._audit(hashes, bits + [1])
class TruncatedNode(Node):
def __init__(self, hash: bytes):
self.hash = hash
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
return False
def add(self, toadd: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
raise SetError()
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TRUNCATED + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class SetError(Exception):
pass
def confirm_included_already_hashed(root: bytes32, val: bytes, proof: bytes) -> bool:
return _confirm(root, val, proof, True)
def confirm_not_included_already_hashed(root: bytes32, val: bytes, proof: bytes) -> bool:
return _confirm(root, val, proof, False)
def _confirm(root: bytes32, val: bytes, proof: bytes, expected: bool) -> bool:
try:
p = deserialize_proof(proof)
if p.get_root() != root:
return False
r, junk = p.is_included_already_hashed(val)
return r == expected
except SetError:
return False
def deserialize_proof(proof: bytes) -> MerkleSet:
try:
r, pos = _deserialize(proof, 0, [])
if pos != len(proof):
raise SetError()
return MerkleSet(r)
except IndexError:
raise SetError()
def _deserialize(proof: bytes, pos: int, bits: List[int]) -> Tuple[Node, int]:
t = proof[pos : pos + 1] # flake8: noqa
if t == EMPTY:
return _empty, pos + 1
if t == TERMINAL:
return TerminalNode(proof[pos + 1 : pos + 33], bits), pos + 33 # flake8: noqa
if t == TRUNCATED:
return TruncatedNode(proof[pos + 1 : pos + 33]), pos + 33 # flake8: noqa
if t != MIDDLE:
raise SetError()
v0, pos = _deserialize(proof, pos + 1, bits + [0])
v1, pos = _deserialize(proof, pos, bits + [1])
return MiddleNode([v0, v1]), pos
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/validate_alert.py | flax/util/validate_alert.py | from __future__ import annotations
import json
from pathlib import Path
from blspy import AugSchemeMPL, PublicKeyMPL, SignatureMPL
from flax.util.byte_types import hexstr_to_bytes
from flax.util.hash import std_hash
def validate_alert_file(file_path: Path, pubkey: str) -> bool:
text = file_path.read_text()
validated = validate_alert(text, pubkey)
return validated
def validate_alert(text: str, pubkey: str) -> bool:
json_obj = json.loads(text)
data = json_obj["data"]
message = bytes(data, "UTF-8")
signature = json_obj["signature"]
signature = SignatureMPL.from_bytes(hexstr_to_bytes(signature))
pubkey_bls = PublicKeyMPL.from_bytes(hexstr_to_bytes(pubkey))
sig_match_my = AugSchemeMPL.verify(pubkey_bls, message, signature)
return sig_match_my
def create_alert_file(alert_file_path: Path, key, genesis_challenge_preimage: str):
bytes_preimage = bytes(genesis_challenge_preimage, "UTF-8")
genesis_challenge = std_hash(bytes_preimage)
file_dict = {
"ready": True,
"genesis_challenge": genesis_challenge.hex(),
"genesis_challenge_preimage": genesis_challenge_preimage,
}
data: str = json.dumps(file_dict)
signature = AugSchemeMPL.sign(key, bytes(data, "utf-8"))
file_data = {"data": data, "signature": f"{signature}"}
file_data_json = json.dumps(file_data)
alert_file_path.write_text(file_data_json)
def create_not_ready_alert_file(alert_file_path: Path, key):
file_dict = {
"ready": False,
}
data: str = json.dumps(file_dict)
signature = AugSchemeMPL.sign(key, bytes(data, "utf-8"))
file_data = {"data": data, "signature": f"{signature}"}
file_data_json = json.dumps(file_data)
alert_file_path.write_text(file_data_json)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/struct_stream.py | flax/util/struct_stream.py | from __future__ import annotations
from typing import BinaryIO, SupportsInt, Type, TypeVar, Union
from typing_extensions import Protocol, SupportsIndex
_T_StructStream = TypeVar("_T_StructStream", bound="StructStream")
# https://github.com/python/typeshed/blob/c2182fdd3e572a1220c70ad9c28fd908b70fb19b/stdlib/_typeshed/__init__.pyi#L68-L69
class SupportsTrunc(Protocol):
def __trunc__(self) -> int:
...
def parse_metadata_from_name(cls: Type[_T_StructStream]) -> Type[_T_StructStream]:
# TODO: turn this around to calculate the PACK from the size and signedness
name_signedness, _, name_bit_size = cls.__name__.partition("int")
cls.SIGNED = False if name_signedness == "u" else True
try:
cls.BITS = int(name_bit_size)
except ValueError as e:
raise ValueError(f"expected integer suffix but got: {name_bit_size!r}") from e
if cls.BITS <= 0:
raise ValueError(f"bit size must greater than zero but got: {cls.BITS}")
expected_name = f"{'' if cls.SIGNED else 'u'}int{cls.BITS}"
if cls.__name__ != expected_name:
raise ValueError(f"expected class name is {expected_name} but got: {cls.__name__}")
cls.SIZE, remainder = divmod(cls.BITS, 8)
if remainder != 0:
# There may be a good use case for removing this but until the details are
# thought through we should avoid such cases.
raise ValueError(f"cls.BITS must be a multiple of 8: {cls.BITS}")
if cls.SIGNED:
cls.MAXIMUM_EXCLUSIVE = 2 ** (cls.BITS - 1)
cls.MINIMUM = -(2 ** (cls.BITS - 1))
else:
cls.MAXIMUM_EXCLUSIVE = 2**cls.BITS
cls.MINIMUM = 0
return cls
class StructStream(int):
SIZE = 0
BITS = 0
SIGNED = False
MAXIMUM_EXCLUSIVE = 0
MINIMUM = 0
"""
Create a class that can parse and stream itself based on a struct.pack template string.
"""
# This is just a partial exposure of the underlying int constructor. Liskov...
# https://github.com/python/typeshed/blob/5d07ebc864577c04366fcc46b84479dbec033921/stdlib/builtins.pyi#L181-L185
def __init__(self, value: Union[str, bytes, SupportsInt, SupportsIndex, SupportsTrunc]) -> None:
# v is unused here and that is ok since .__new__() seems to have already
# processed the parameter when creating the instance of the class. We have no
# additional special action to take here beyond verifying that the newly
# created instance satisfies the bounds limitations of the particular subclass.
super().__init__()
if not (self.MINIMUM <= self < self.MAXIMUM_EXCLUSIVE):
raise ValueError(f"Value {self} does not fit into {type(self).__name__}")
@classmethod
def parse(cls: Type[_T_StructStream], f: BinaryIO) -> _T_StructStream:
read_bytes = f.read(cls.SIZE)
return cls.from_bytes(read_bytes)
def stream(self, f: BinaryIO) -> None:
f.write(bytes(self))
@classmethod
def from_bytes(cls: Type[_T_StructStream], blob: bytes) -> _T_StructStream: # type: ignore[override]
if len(blob) != cls.SIZE:
raise ValueError(f"{cls.__name__}.from_bytes() requires {cls.SIZE} bytes but got: {len(blob)}")
return cls(int.from_bytes(blob, "big", signed=cls.SIGNED))
def __bytes__(self) -> bytes:
return super().to_bytes(length=self.SIZE, byteorder="big", signed=self.SIGNED)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/flax_logging.py | flax/util/flax_logging.py | import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
import colorlog
from concurrent_log_handler import ConcurrentRotatingFileHandler
from logging.handlers import SysLogHandler
from flax.cmds.init_funcs import flax_full_version_str
from flax.util.path import path_from_root
from flax.util.default_root import DEFAULT_ROOT_PATH
default_log_level = "WARNING"
def get_beta_logging_config() -> Dict[str, Any]:
return {
"log_filename": f"{flax_full_version_str()}/flax-blockchain/beta.log",
"log_level": "DEBUG",
"log_stdout": False,
"log_maxfilesrotation": 100,
"log_maxbytesrotation": 100 * 1024 * 1024,
"log_use_gzip": True,
}
def get_file_log_handler(
formatter: logging.Formatter, root_path: Path, logging_config: Dict[str, object]
) -> ConcurrentRotatingFileHandler:
log_path = path_from_root(root_path, str(logging_config.get("log_filename", "log/debug.log")))
log_path.parent.mkdir(parents=True, exist_ok=True)
maxrotation = logging_config.get("log_maxfilesrotation", 7)
maxbytesrotation = logging_config.get("log_maxbytesrotation", 50 * 1024 * 1024)
use_gzip = logging_config.get("log_use_gzip", False)
handler = ConcurrentRotatingFileHandler(
log_path, "a", maxBytes=maxbytesrotation, backupCount=maxrotation, use_gzip=use_gzip
)
handler.setFormatter(formatter)
return handler
def initialize_logging(service_name: str, logging_config: Dict, root_path: Path, beta_root_path: Optional[Path] = None):
log_level = logging_config.get("log_level", default_log_level)
file_name_length = 33 - len(service_name)
log_date_format = "%Y-%m-%dT%H:%M:%S"
file_log_formatter = logging.Formatter(
fmt=f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s",
datefmt=log_date_format,
)
handlers: List[logging.Handler] = []
if logging_config["log_stdout"]:
stdout_handler = colorlog.StreamHandler()
stdout_handler.setFormatter(
colorlog.ColoredFormatter(
f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: "
f"%(log_color)s%(levelname)-8s%(reset)s %(message)s",
datefmt=log_date_format,
reset=True,
)
)
handlers.append(stdout_handler)
else:
handlers.append(get_file_log_handler(file_log_formatter, root_path, logging_config))
if logging_config.get("log_syslog", False):
log_syslog_host = logging_config.get("log_syslog_host", "localhost")
log_syslog_port = logging_config.get("log_syslog_port", 514)
log_syslog_handler = SysLogHandler(address=(log_syslog_host, log_syslog_port))
log_syslog_handler.setFormatter(logging.Formatter(fmt=f"{service_name} %(message)s", datefmt=log_date_format))
handlers.append(log_syslog_handler)
if beta_root_path is not None:
handlers.append(get_file_log_handler(file_log_formatter, beta_root_path, get_beta_logging_config()))
root_logger = logging.getLogger()
log_level_exceptions = {}
for handler in handlers:
try:
handler.setLevel(log_level)
except Exception as e:
handler.setLevel(default_log_level)
log_level_exceptions[handler] = e
root_logger.addHandler(handler)
for handler, exception in log_level_exceptions.items():
root_logger.error(
f"Handler {handler}: Invalid log level '{log_level}' found in {service_name} config. "
f"Defaulting to: {default_log_level}. Error: {exception}"
)
# Adjust the root logger to the smallest used log level since its default level is WARNING which would overwrite
# the potentially smaller log levels of specific handlers.
root_logger.setLevel(min(handler.level for handler in handlers))
if root_logger.level <= logging.DEBUG:
logging.getLogger("aiosqlite").setLevel(logging.INFO) # Too much logging on debug level
def initialize_service_logging(service_name: str, config: Dict[str, Any]) -> None:
logging_root_path = DEFAULT_ROOT_PATH
if service_name == "daemon":
# TODO: Maybe introduce a separate `daemon` section in the config instead of having `daemon_port`, `logging`
# and the daemon related stuff as top level entries.
logging_config = config["logging"]
else:
logging_config = config[service_name]["logging"]
beta_config = config.get("beta", {})
beta_config_path = beta_config.get("path") if beta_config.get("enabled", False) else None
initialize_logging(
service_name=service_name,
logging_config=logging_config,
root_path=logging_root_path,
beta_root_path=beta_config_path,
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/ssl_check.py | flax/util/ssl_check.py | import os
import stat
import sys
from flax.util.config import load_config, traverse_dict
from flax.util.permissions import octal_mode_string, verify_file_permissions
from logging import Logger
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
DEFAULT_PERMISSIONS_CERT_FILE: int = 0o644
DEFAULT_PERMISSIONS_KEY_FILE: int = 0o600
# Masks containing permission bits we don't allow
RESTRICT_MASK_CERT_FILE: int = stat.S_IWGRP | stat.S_IXGRP | stat.S_IWOTH | stat.S_IXOTH # 0o033
RESTRICT_MASK_KEY_FILE: int = (
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
) # 0o077
CERT_CONFIG_KEY_PATHS = [
"flax_ssl_ca:crt",
"daemon_ssl:private_crt",
"farmer:ssl:private_crt",
"farmer:ssl:public_crt",
"full_node:ssl:private_crt",
"full_node:ssl:public_crt",
"data_layer:ssl:private_crt",
"data_layer:ssl:public_crt",
"harvester:flax_ssl_ca:crt",
"harvester:private_ssl_ca:crt",
"harvester:ssl:private_crt",
"introducer:ssl:public_crt",
"private_ssl_ca:crt",
"timelord:ssl:private_crt",
"timelord:ssl:public_crt",
"ui:daemon_ssl:private_crt",
"wallet:ssl:private_crt",
"wallet:ssl:public_crt",
]
KEY_CONFIG_KEY_PATHS = [
"flax_ssl_ca:key",
"daemon_ssl:private_key",
"farmer:ssl:private_key",
"farmer:ssl:public_key",
"full_node:ssl:private_key",
"full_node:ssl:public_key",
"harvester:flax_ssl_ca:key",
"harvester:private_ssl_ca:key",
"harvester:ssl:private_key",
"introducer:ssl:public_key",
"private_ssl_ca:key",
"timelord:ssl:private_key",
"timelord:ssl:public_key",
"ui:daemon_ssl:private_key",
"wallet:ssl:private_key",
"wallet:ssl:public_key",
]
# Set to keep track of which files we've already warned about
warned_ssl_files: Set[Path] = set()
def get_all_ssl_file_paths(root_path: Path) -> Tuple[List[Path], List[Path]]:
"""Lookup config values and append to a list of files whose permissions we need to check"""
from flax.ssl.create_ssl import get_mozilla_ca_crt
all_certs: List[Path] = []
all_keys: List[Path] = []
try:
config: Dict = load_config(root_path, "config.yaml", exit_on_error=False, fill_missing_services=True)
for paths, parsed_list in [(CERT_CONFIG_KEY_PATHS, all_certs), (KEY_CONFIG_KEY_PATHS, all_keys)]:
for path in paths:
try:
file = root_path / Path(traverse_dict(config, path))
parsed_list.append(file)
except Exception as e:
print(
f"Failed to lookup config value for {path}: {e}"
) # lgtm [py/clear-text-logging-sensitive-data]
# Check the Mozilla Root CAs as well
all_certs.append(Path(get_mozilla_ca_crt()))
except (FileNotFoundError, ValueError):
pass
return all_certs, all_keys
def get_ssl_perm_warning(path: Path, actual_mode: int, expected_mode: int) -> str:
return (
f"Permissions {octal_mode_string(actual_mode)} for "
f"'{path}' are too open. " # lgtm [py/clear-text-logging-sensitive-data]
f"Expected {octal_mode_string(expected_mode)}"
)
def verify_ssl_certs_and_keys(
cert_paths: List[Path], key_paths: List[Path], log: Optional[Logger] = None
) -> List[Tuple[Path, int, int]]:
"""Check that file permissions are properly set for the provided SSL cert and key files"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return []
invalid_files_and_modes: List[Tuple[Path, int, int]] = []
def verify_paths(paths: List[Path], restrict_mask: int, expected_permissions: int):
nonlocal invalid_files_and_modes
for path in paths:
try:
# Check that the file permissions are not too permissive
is_valid, actual_permissions = verify_file_permissions(path, restrict_mask)
if not is_valid:
if log is not None:
log.error(get_ssl_perm_warning(path, actual_permissions, expected_permissions))
warned_ssl_files.add(path)
invalid_files_and_modes.append((path, actual_permissions, expected_permissions))
except FileNotFoundError:
# permissions can't be dangerously wrong on nonexistent files
pass
except Exception as e:
print(f"Unable to check permissions for {path}: {e}") # lgtm [py/clear-text-logging-sensitive-data]
verify_paths(cert_paths, RESTRICT_MASK_CERT_FILE, DEFAULT_PERMISSIONS_CERT_FILE)
verify_paths(key_paths, RESTRICT_MASK_KEY_FILE, DEFAULT_PERMISSIONS_KEY_FILE)
return invalid_files_and_modes
def check_ssl(root_path: Path) -> None:
"""
Sanity checks on the SSL configuration. Checks that file permissions are properly
set on the keys and certs, warning and exiting if permissions are incorrect.
"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return None
certs_to_check, keys_to_check = get_all_ssl_file_paths(root_path)
invalid_files = verify_ssl_certs_and_keys(certs_to_check, keys_to_check)
if len(invalid_files):
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@ WARNING: UNPROTECTED SSL FILE! @")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
for path, actual_permissions, expected_permissions in invalid_files:
print(
get_ssl_perm_warning(path, actual_permissions, expected_permissions)
) # lgtm [py/clear-text-logging-sensitive-data]
print("One or more SSL files were found with permission issues.")
print("Run `flax init --fix-ssl-permissions` to fix issues.")
def check_and_fix_permissions_for_ssl_file(file: Path, mask: int, updated_mode: int) -> Tuple[bool, bool]:
"""Check file permissions and attempt to fix them if found to be too open"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return True, False
valid: bool = True
updated: bool = False
# Check that the file permissions are not too permissive
try:
(good_perms, mode) = verify_file_permissions(file, mask)
if not good_perms:
valid = False
print(
f"Attempting to set permissions {octal_mode_string(updated_mode)} on "
f"{file}" # lgtm [py/clear-text-logging-sensitive-data]
)
os.chmod(str(file), updated_mode)
updated = True
except Exception as e:
print(f"Failed to change permissions on {file}: {e}") # lgtm [py/clear-text-logging-sensitive-data]
valid = False
return valid, updated
def fix_ssl(root_path: Path) -> None:
"""Attempts to fix SSL cert/key file permissions that are too open"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return None
updated: bool = False
encountered_error: bool = False
certs_to_check, keys_to_check = get_all_ssl_file_paths(root_path)
files_to_fix = verify_ssl_certs_and_keys(certs_to_check, keys_to_check)
for (file, mask, updated_mode) in files_to_fix:
# Check that permissions are correct, and if not, attempt to fix
(valid, fixed) = check_and_fix_permissions_for_ssl_file(file, mask, updated_mode)
if fixed:
updated = True
if not valid and not fixed:
encountered_error = True
if encountered_error:
print("One or more errors were encountered while updating SSL file permissions...")
elif updated:
print("Finished updating SSL file permissions")
else:
print("SSL file permissions are correct")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/files.py | flax/util/files.py | import asyncio
import logging
import os
import shutil
from pathlib import Path
from typing import Union
from aiofiles import tempfile
from typing_extensions import Literal
log = logging.getLogger(__name__)
def move_file(src: Path, dst: Path):
"""
Attempts to move the file at src to dst, falling back to a copy if the move fails.
"""
dir_perms: int = 0o700
# Create the parent directory if necessary
os.makedirs(dst.parent, mode=dir_perms, exist_ok=True)
try:
# Attempt an atomic move first
os.replace(os.fspath(src), os.fspath(dst))
except Exception as e:
log.debug(f"Failed to move {src} to {dst} using os.replace, reattempting with shutil.move: {e}")
try:
# If that fails, use the more robust shutil.move(), though it may internally initiate a copy
shutil.move(os.fspath(src), os.fspath(dst))
except Exception:
log.exception(f"Failed to move {src} to {dst} using shutil.move")
raise
async def move_file_async(src: Path, dst: Path, *, reattempts: int = 6, reattempt_delay: float = 0.5):
"""
Attempts to move the file at src to dst, making multiple attempts if the move fails.
"""
remaining_attempts: int = reattempts
while True:
try:
move_file(src, dst)
except Exception:
if remaining_attempts > 0:
log.debug(f"Failed to move {src} to {dst}, retrying in {reattempt_delay} seconds")
remaining_attempts -= 1
await asyncio.sleep(reattempt_delay)
else:
break
else:
break
if not dst.exists():
raise FileNotFoundError(f"Failed to move {src} to {dst}")
else:
log.debug(f"Moved {src} to {dst}")
async def write_file_async(file_path: Path, data: Union[str, bytes], *, file_mode: int = 0o600, dir_mode: int = 0o700):
"""
Writes the provided data to a temporary file and then moves it to the final destination.
"""
# Create the parent directory if necessary
os.makedirs(file_path.parent, mode=dir_mode, exist_ok=True)
mode: Literal["w+", "w+b"] = "w+" if type(data) == str else "w+b"
temp_file_path: Path
async with tempfile.NamedTemporaryFile(dir=file_path.parent, mode=mode, delete=False) as f:
temp_file_path = f.name
await f.write(data)
await f.flush()
os.fsync(f.fileno())
try:
await move_file_async(temp_file_path, file_path)
except Exception:
log.exception(f"Failed to move temp file {temp_file_path} to {file_path}")
else:
os.chmod(file_path, file_mode)
finally:
# We expect the file replace/move to have succeeded, but cleanup the temp file just in case
try:
if Path(temp_file_path).exists():
os.remove(temp_file_path)
except Exception:
log.exception(f"Failed to remove temp file {temp_file_path}")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/streamable.py | flax/util/streamable.py | from __future__ import annotations
import dataclasses
import io
import os
import pprint
import traceback
from enum import Enum
from typing import (
Any,
BinaryIO,
Callable,
ClassVar,
Collection,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
get_type_hints,
)
from blspy import G1Element, G2Element, PrivateKey
from typing_extensions import Literal, get_args, get_origin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.byte_types import hexstr_to_bytes
from flax.util.hash import std_hash
from flax.util.ints import uint32
pp = pprint.PrettyPrinter(indent=1, width=120, compact=True)
class StreamableError(Exception):
pass
class UnsupportedType(StreamableError):
pass
class DefinitionError(StreamableError):
def __init__(self, message: str, cls: Type[object]):
super().__init__(
f"{message} Correct usage is:\n\n"
f"@streamable\n@dataclass(frozen=True)\nclass {cls.__name__}(Streamable):\n ..."
)
class ParameterMissingError(StreamableError):
def __init__(self, cls: type, missing: List[str]):
super().__init__(
f"{len(missing)} field{'s' if len(missing) != 1 else ''} missing for {cls.__name__}: {', '.join(missing)}"
)
class InvalidTypeError(StreamableError):
def __init__(self, expected: type, actual: type):
super().__init__(
f"Invalid type: Expected {expected.__name__}, Actual: {actual.__name__}",
)
class InvalidSizeError(StreamableError):
def __init__(self, expected: int, actual: int):
super().__init__(
f"Invalid size: Expected {expected}, Actual: {actual}",
)
class ConversionError(StreamableError):
def __init__(self, value: object, to_type: type, exception: Exception):
super().__init__(
f"Failed to convert {value!r} from type {type(value).__name__} to {to_type.__name__}: "
+ "".join(traceback.format_exception_only(type(exception), value=exception)).strip()
)
# TODO: Remove hack, this allows streaming these objects from binary
size_hints = {
"PrivateKey": PrivateKey.PRIVATE_KEY_SIZE,
"G1Element": G1Element.SIZE,
"G2Element": G2Element.SIZE,
"ConditionOpcode": 1,
}
unhashable_types = [
"PrivateKey",
"G1Element",
"G2Element",
"Program",
"SerializedProgram",
]
_T_Streamable = TypeVar("_T_Streamable", bound="Streamable")
ParseFunctionType = Callable[[BinaryIO], object]
StreamFunctionType = Callable[[object, BinaryIO], None]
ConvertFunctionType = Callable[[object], object]
@dataclasses.dataclass(frozen=True)
class Field:
name: str
type: Type[object]
has_default: bool
stream_function: StreamFunctionType
parse_function: ParseFunctionType
convert_function: ConvertFunctionType
post_init_function: ConvertFunctionType
StreamableFields = Tuple[Field, ...]
def create_fields(cls: Type[object]) -> StreamableFields:
hints = get_type_hints(cls)
fields = []
for field in dataclasses.fields(cls):
hint = hints[field.name]
fields.append(
Field(
name=field.name,
type=hint,
has_default=field.default is not dataclasses.MISSING
or field.default_factory is not dataclasses.MISSING,
stream_function=function_to_stream_one_item(hint),
parse_function=function_to_parse_one_item(hint),
convert_function=function_to_convert_one_item(hint),
post_init_function=function_to_post_init_process_one_item(hint),
)
)
return tuple(fields)
def is_type_List(f_type: object) -> bool:
return get_origin(f_type) == list or f_type == list
def is_type_SpecificOptional(f_type: object) -> bool:
"""
Returns true for types such as Optional[T], but not Optional, or T.
"""
return get_origin(f_type) == Union and get_args(f_type)[1]() is None
def is_type_Tuple(f_type: object) -> bool:
return get_origin(f_type) == tuple or f_type == tuple
def convert_optional(convert_func: ConvertFunctionType, item: Any) -> Any:
if item is None:
return None
return convert_func(item)
def convert_tuple(convert_funcs: List[ConvertFunctionType], items: Collection[Any]) -> Tuple[Any, ...]:
if not isinstance(items, (list, tuple)):
raise InvalidTypeError(tuple, type(items))
if len(items) != len(convert_funcs):
raise InvalidSizeError(len(convert_funcs), len(items))
return tuple(convert_func(item) for convert_func, item in zip(convert_funcs, items))
def convert_list(convert_func: ConvertFunctionType, items: List[Any]) -> List[Any]:
if not isinstance(items, list):
raise InvalidTypeError(list, type(items))
return [convert_func(item) for item in items]
def convert_hex_string(item: str) -> bytes:
if not isinstance(item, str):
raise InvalidTypeError(str, type(item))
try:
return hexstr_to_bytes(item)
except Exception as e:
raise ConversionError(item, bytes, e) from e
def convert_byte_type(f_type: Type[Any], item: Any) -> Any:
if isinstance(item, f_type):
return item
if not isinstance(item, bytes):
item = convert_hex_string(item)
try:
return f_type(item)
except Exception as e:
raise ConversionError(item, f_type, e) from e
def convert_unhashable_type(f_type: Type[Any], item: Any) -> Any:
if isinstance(item, f_type):
return item
if not isinstance(item, bytes):
item = convert_hex_string(item)
try:
if hasattr(f_type, "from_bytes_unchecked"):
return f_type.from_bytes_unchecked(item)
else:
return f_type.from_bytes(item)
except Exception as e:
raise ConversionError(item, f_type, e) from e
def convert_primitive(f_type: Type[Any], item: Any) -> Any:
if isinstance(item, f_type):
return item
try:
return f_type(item)
except Exception as e:
raise ConversionError(item, f_type, e) from e
def streamable_from_dict(klass: Type[_T_Streamable], item: Any) -> _T_Streamable:
"""
Converts a dictionary based on a dataclass, into an instance of that dataclass.
Recursively goes through lists, optionals, and dictionaries.
"""
if isinstance(item, klass):
return item
if not isinstance(item, dict):
raise InvalidTypeError(dict, type(item))
fields = klass.streamable_fields()
try:
return klass(**{field.name: field.convert_function(item[field.name]) for field in fields if field.name in item})
except TypeError as e:
missing_fields = [field.name for field in fields if field.name not in item and not field.has_default]
if len(missing_fields) > 0:
raise ParameterMissingError(klass, missing_fields) from e
raise
def function_to_convert_one_item(f_type: Type[Any]) -> ConvertFunctionType:
if is_type_SpecificOptional(f_type):
convert_inner_func = function_to_convert_one_item(get_args(f_type)[0])
return lambda item: convert_optional(convert_inner_func, item)
elif is_type_Tuple(f_type):
args = get_args(f_type)
convert_inner_tuple_funcs = []
for arg in args:
convert_inner_tuple_funcs.append(function_to_convert_one_item(arg))
# Ignoring for now as the proper solution isn't obvious
return lambda items: convert_tuple(convert_inner_tuple_funcs, items) # type: ignore[arg-type]
elif is_type_List(f_type):
inner_type = get_args(f_type)[0]
convert_inner_func = function_to_convert_one_item(inner_type)
# Ignoring for now as the proper solution isn't obvious
return lambda items: convert_list(convert_inner_func, items) # type: ignore[arg-type]
elif hasattr(f_type, "from_json_dict"):
return lambda item: f_type.from_json_dict(item)
elif issubclass(f_type, bytes):
# Type is bytes, data is a hex string or bytes
return lambda item: convert_byte_type(f_type, item)
elif f_type.__name__ in unhashable_types:
# Type is unhashable (bls type), so cast from hex string
return lambda item: convert_unhashable_type(f_type, item)
else:
# Type is a primitive, cast with correct class
return lambda item: convert_primitive(f_type, item)
def post_init_process_item(f_type: Type[Any], item: Any) -> object:
if not isinstance(item, f_type):
try:
item = f_type(item)
except (TypeError, AttributeError, ValueError):
if hasattr(f_type, "from_bytes_unchecked"):
from_bytes_method: Callable[[bytes], Any] = f_type.from_bytes_unchecked
else:
from_bytes_method = f_type.from_bytes
try:
item = from_bytes_method(item)
except Exception:
item = from_bytes_method(bytes(item))
if not isinstance(item, f_type):
raise InvalidTypeError(f_type, type(item))
return item
def function_to_post_init_process_one_item(f_type: Type[object]) -> ConvertFunctionType:
if is_type_SpecificOptional(f_type):
process_inner_func = function_to_post_init_process_one_item(get_args(f_type)[0])
return lambda item: convert_optional(process_inner_func, item)
if is_type_Tuple(f_type):
args = get_args(f_type)
process_inner_tuple_funcs = []
for arg in args:
process_inner_tuple_funcs.append(function_to_post_init_process_one_item(arg))
return lambda items: convert_tuple(process_inner_tuple_funcs, items) # type: ignore[arg-type]
if is_type_List(f_type):
inner_type = get_args(f_type)[0]
process_inner_func = function_to_post_init_process_one_item(inner_type)
return lambda items: convert_list(process_inner_func, items) # type: ignore[arg-type]
return lambda item: post_init_process_item(f_type, item)
def recurse_jsonify(d: Any) -> Any:
"""
Makes bytes objects and unhashable types into strings with 0x, and makes large ints into
strings.
"""
if dataclasses.is_dataclass(d):
new_dict = {}
for field in dataclasses.fields(d):
new_dict[field.name] = recurse_jsonify(getattr(d, field.name))
return new_dict
elif isinstance(d, list) or isinstance(d, tuple):
new_list = []
for item in d:
new_list.append(recurse_jsonify(item))
return new_list
elif isinstance(d, dict):
new_dict = {}
for name, val in d.items():
new_dict[name] = recurse_jsonify(val)
return new_dict
elif type(d).__name__ in unhashable_types or issubclass(type(d), bytes):
return f"0x{bytes(d).hex()}"
elif isinstance(d, Enum):
return d.name
elif isinstance(d, bool):
return d
elif isinstance(d, int):
return int(d)
elif d is None or type(d) == str:
return d
elif hasattr(d, "to_json_dict"):
ret: Union[List[Any], Dict[str, Any], str, None, int] = d.to_json_dict()
return ret
raise UnsupportedType(f"failed to jsonify {d} (type: {type(d)})")
def parse_bool(f: BinaryIO) -> bool:
bool_byte = f.read(1)
assert bool_byte is not None and len(bool_byte) == 1 # Checks for EOF
if bool_byte == bytes([0]):
return False
elif bool_byte == bytes([1]):
return True
else:
raise ValueError("Bool byte must be 0 or 1")
def parse_uint32(f: BinaryIO, byteorder: Literal["little", "big"] = "big") -> uint32:
size_bytes = f.read(4)
assert size_bytes is not None and len(size_bytes) == 4 # Checks for EOF
return uint32(int.from_bytes(size_bytes, byteorder))
def write_uint32(f: BinaryIO, value: uint32, byteorder: Literal["little", "big"] = "big") -> None:
f.write(value.to_bytes(4, byteorder))
def parse_optional(f: BinaryIO, parse_inner_type_f: ParseFunctionType) -> Optional[object]:
is_present_bytes = f.read(1)
assert is_present_bytes is not None and len(is_present_bytes) == 1 # Checks for EOF
if is_present_bytes == bytes([0]):
return None
elif is_present_bytes == bytes([1]):
return parse_inner_type_f(f)
else:
raise ValueError("Optional must be 0 or 1")
def parse_rust(f: BinaryIO, f_type: Type[Any]) -> Any:
assert isinstance(f, io.BytesIO)
buf = f.getbuffer()
ret, advance = f_type.parse_rust(buf[f.tell() :])
f.seek(advance, os.SEEK_CUR)
return ret
def parse_bytes(f: BinaryIO) -> bytes:
list_size = parse_uint32(f)
bytes_read = f.read(list_size)
assert bytes_read is not None and len(bytes_read) == list_size
return bytes_read
def parse_list(f: BinaryIO, parse_inner_type_f: ParseFunctionType) -> List[object]:
full_list: List[object] = []
# wjb assert inner_type != get_args(List)[0]
list_size = parse_uint32(f)
for list_index in range(list_size):
full_list.append(parse_inner_type_f(f))
return full_list
def parse_tuple(f: BinaryIO, list_parse_inner_type_f: List[ParseFunctionType]) -> Tuple[object, ...]:
full_list: List[object] = []
for parse_f in list_parse_inner_type_f:
full_list.append(parse_f(f))
return tuple(full_list)
def parse_size_hints(f: BinaryIO, f_type: Type[Any], bytes_to_read: int, unchecked: bool) -> Any:
bytes_read = f.read(bytes_to_read)
assert bytes_read is not None and len(bytes_read) == bytes_to_read
if unchecked:
return f_type.from_bytes_unchecked(bytes_read)
else:
return f_type.from_bytes(bytes_read)
def parse_str(f: BinaryIO) -> str:
str_size = parse_uint32(f)
str_read_bytes = f.read(str_size)
assert str_read_bytes is not None and len(str_read_bytes) == str_size # Checks for EOF
return bytes.decode(str_read_bytes, "utf-8")
def function_to_parse_one_item(f_type: Type[Any]) -> ParseFunctionType:
"""
This function returns a function taking one argument `f: BinaryIO` that parses
and returns a value of the given type.
"""
inner_type: Type[Any]
if f_type is bool:
return parse_bool
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = function_to_parse_one_item(inner_type)
return lambda f: parse_optional(f, parse_inner_type_f)
if hasattr(f_type, "parse_rust"):
return lambda f: parse_rust(f, f_type)
if hasattr(f_type, "parse"):
# Ignoring for now as the proper solution isn't obvious
return f_type.parse # type: ignore[no-any-return]
if f_type == bytes:
return parse_bytes
if is_type_List(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = function_to_parse_one_item(inner_type)
return lambda f: parse_list(f, parse_inner_type_f)
if is_type_Tuple(f_type):
inner_types = get_args(f_type)
list_parse_inner_type_f = [function_to_parse_one_item(_) for _ in inner_types]
return lambda f: parse_tuple(f, list_parse_inner_type_f)
if hasattr(f_type, "from_bytes_unchecked") and f_type.__name__ in size_hints:
bytes_to_read = size_hints[f_type.__name__]
return lambda f: parse_size_hints(f, f_type, bytes_to_read, unchecked=True)
if hasattr(f_type, "from_bytes") and f_type.__name__ in size_hints:
bytes_to_read = size_hints[f_type.__name__]
return lambda f: parse_size_hints(f, f_type, bytes_to_read, unchecked=False)
if f_type is str:
return parse_str
raise UnsupportedType(f"Type {f_type} does not have parse")
def stream_optional(stream_inner_type_func: StreamFunctionType, item: Any, f: BinaryIO) -> None:
if item is None:
f.write(bytes([0]))
else:
f.write(bytes([1]))
stream_inner_type_func(item, f)
def stream_bytes(item: Any, f: BinaryIO) -> None:
write_uint32(f, uint32(len(item)))
f.write(item)
def stream_list(stream_inner_type_func: StreamFunctionType, item: Any, f: BinaryIO) -> None:
write_uint32(f, uint32(len(item)))
for element in item:
stream_inner_type_func(element, f)
def stream_tuple(stream_inner_type_funcs: List[StreamFunctionType], item: Any, f: BinaryIO) -> None:
assert len(stream_inner_type_funcs) == len(item)
for i in range(len(item)):
stream_inner_type_funcs[i](item[i], f)
def stream_str(item: Any, f: BinaryIO) -> None:
str_bytes = item.encode("utf-8")
write_uint32(f, uint32(len(str_bytes)))
f.write(str_bytes)
def stream_bool(item: Any, f: BinaryIO) -> None:
f.write(int(item).to_bytes(1, "big"))
def stream_streamable(item: object, f: BinaryIO) -> None:
getattr(item, "stream")(f)
def stream_byte_convertible(item: object, f: BinaryIO) -> None:
f.write(getattr(item, "__bytes__")())
def function_to_stream_one_item(f_type: Type[Any]) -> StreamFunctionType:
inner_type: Type[Any]
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
stream_inner_type_func = function_to_stream_one_item(inner_type)
return lambda item, f: stream_optional(stream_inner_type_func, item, f)
elif f_type == bytes:
return stream_bytes
elif hasattr(f_type, "stream"):
return stream_streamable
elif hasattr(f_type, "__bytes__"):
return stream_byte_convertible
elif is_type_List(f_type):
inner_type = get_args(f_type)[0]
stream_inner_type_func = function_to_stream_one_item(inner_type)
return lambda item, f: stream_list(stream_inner_type_func, item, f)
elif is_type_Tuple(f_type):
inner_types = get_args(f_type)
stream_inner_type_funcs = []
for i in range(len(inner_types)):
stream_inner_type_funcs.append(function_to_stream_one_item(inner_types[i]))
return lambda item, f: stream_tuple(stream_inner_type_funcs, item, f)
elif f_type is str:
return stream_str
elif f_type is bool:
return stream_bool
else:
raise UnsupportedType(f"can't stream {f_type}")
def streamable(cls: Type[_T_Streamable]) -> Type[_T_Streamable]:
"""
This decorator forces correct streamable protocol syntax/usage and populates the caches for types hints and
(de)serialization methods for all members of the class. The correct usage is:
@streamable
@dataclass(frozen=True)
class Example(Streamable):
...
The order how the decorator are applied and the inheritance from Streamable are forced. The explicit inheritance is
required because mypy doesn't analyse the type returned by decorators, so we can't just inherit from inside the
decorator. The dataclass decorator is required to fetch type hints, let mypy validate constructor calls and restrict
direct modification of objects by `frozen=True`.
"""
if not dataclasses.is_dataclass(cls):
raise DefinitionError("@dataclass(frozen=True) required first.", cls)
try:
# Ignore mypy here because we especially want to access a not available member to test if
# the dataclass is frozen.
object.__new__(cls)._streamable_test_if_dataclass_frozen_ = None # type: ignore[attr-defined]
except dataclasses.FrozenInstanceError:
pass
else:
raise DefinitionError("dataclass needs to be frozen.", cls)
if not issubclass(cls, Streamable):
raise DefinitionError("Streamable inheritance required.", cls)
cls._streamable_fields = create_fields(cls)
return cls
class Streamable:
"""
This class defines a simple serialization format, and adds methods to parse from/to bytes and json. It also
validates and parses all fields at construction in ´__post_init__` to make sure all fields have the correct type
and can be streamed/parsed properly.
The available primitives are:
* Sized ints serialized in big endian format, e.g. uint64
* Sized bytes serialized in big endian format, e.g. bytes32
* BLS public keys serialized in bls format (48 bytes)
* BLS signatures serialized in bls format (96 bytes)
* bool serialized into 1 byte (0x01 or 0x00)
* bytes serialized as a 4 byte size prefix and then the bytes.
* ConditionOpcode is serialized as a 1 byte value.
* str serialized as a 4 byte size prefix and then the utf-8 representation in bytes.
An item is one of:
* primitive
* Tuple[item1, .. itemx]
* List[item1, .. itemx]
* Optional[item]
* Custom item
A streamable must be a Tuple at the root level (although a dataclass is used here instead).
Iters are serialized in the following way:
1. A tuple of x items is serialized by appending the serialization of each item.
2. A List is serialized into a 4 byte size prefix (number of items) and the serialization of each item.
3. An Optional is serialized into a 1 byte prefix of 0x00 or 0x01, and if it's one, it's followed by the
serialization of the item.
4. A Custom item is serialized by calling the .parse method, passing in the stream of bytes into it. An example is
a CLVM program.
All of the constituents must have parse/from_bytes, and stream/__bytes__ and therefore
be of fixed size. For example, int cannot be a constituent since it is not a fixed size,
whereas uint32 can be.
Furthermore, a get_hash() member is added, which performs a serialization and a sha256.
This class is used for deterministic serialization and hashing, for consensus critical
objects such as the block header.
Make sure to use the streamable decorator when inheriting from the Streamable class to prepare the streaming caches.
"""
_streamable_fields: ClassVar[StreamableFields]
@classmethod
def streamable_fields(cls) -> StreamableFields:
return cls._streamable_fields
def __post_init__(self) -> None:
data = self.__dict__
try:
for field in self._streamable_fields:
object.__setattr__(self, field.name, field.post_init_function(data[field.name]))
except TypeError as e:
missing_fields = [field.name for field in self._streamable_fields if field.name not in data]
if len(missing_fields) > 0:
raise ParameterMissingError(type(self), missing_fields) from e
raise
@classmethod
def parse(cls: Type[_T_Streamable], f: BinaryIO) -> _T_Streamable:
# Create the object without calling __init__() to avoid unnecessary post-init checks in strictdataclass
obj: _T_Streamable = object.__new__(cls)
for field in cls._streamable_fields:
object.__setattr__(obj, field.name, field.parse_function(f))
return obj
def stream(self, f: BinaryIO) -> None:
for field in self._streamable_fields:
field.stream_function(getattr(self, field.name), f)
def get_hash(self) -> bytes32:
return std_hash(bytes(self), skip_bytes_conversion=True)
@classmethod
def from_bytes(cls: Type[_T_Streamable], blob: bytes) -> _T_Streamable:
f = io.BytesIO(blob)
parsed = cls.parse(f)
assert f.read() == b""
return parsed
def __bytes__(self: Any) -> bytes:
f = io.BytesIO()
self.stream(f)
return bytes(f.getvalue())
def __str__(self: Any) -> str:
return pp.pformat(recurse_jsonify(self))
def __repr__(self: Any) -> str:
return pp.pformat(recurse_jsonify(self))
def to_json_dict(self) -> Dict[str, Any]:
ret: Dict[str, Any] = recurse_jsonify(self)
return ret
@classmethod
def from_json_dict(cls: Type[_T_Streamable], json_dict: Dict[str, Any]) -> _T_Streamable:
return streamable_from_dict(cls, json_dict)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/misc.py | flax/util/misc.py | from __future__ import annotations
import dataclasses
from pathlib import Path
from typing import Any, Dict, Sequence, Union
from flax.util.errors import InvalidPathError
from flax.util.ints import uint16
from flax.util.streamable import Streamable, recurse_jsonify, streamable
@streamable
@dataclasses.dataclass(frozen=True)
class VersionedBlob(Streamable):
version: uint16
blob: bytes
def format_bytes(bytes: int) -> str:
if not isinstance(bytes, int) or bytes < 0:
return "Invalid"
LABELS = ("MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
BASE = 1024
value = bytes / BASE
for label in LABELS:
value /= BASE
if value < BASE:
return f"{value:.3f} {label}"
return f"{value:.3f} {LABELS[-1]}"
def format_minutes(minutes: int) -> str:
if not isinstance(minutes, int):
return "Invalid"
if minutes == 0:
return "Now"
hour_minutes = 60
day_minutes = 24 * hour_minutes
week_minutes = 7 * day_minutes
months_minutes = 43800
year_minutes = 12 * months_minutes
years = int(minutes / year_minutes)
months = int(minutes / months_minutes)
weeks = int(minutes / week_minutes)
days = int(minutes / day_minutes)
hours = int(minutes / hour_minutes)
def format_unit_string(str_unit: str, count: int) -> str:
return f"{count} {str_unit}{('s' if count > 1 else '')}"
def format_unit(unit: str, count: int, unit_minutes: int, next_unit: str, next_unit_minutes: int) -> str:
formatted = format_unit_string(unit, count)
minutes_left = minutes % unit_minutes
if minutes_left >= next_unit_minutes:
formatted += " and " + format_unit_string(next_unit, int(minutes_left / next_unit_minutes))
return formatted
if years > 0:
return format_unit("year", years, year_minutes, "month", months_minutes)
if months > 0:
return format_unit("month", months, months_minutes, "week", week_minutes)
if weeks > 0:
return format_unit("week", weeks, week_minutes, "day", day_minutes)
if days > 0:
return format_unit("day", days, day_minutes, "hour", hour_minutes)
if hours > 0:
return format_unit("hour", hours, hour_minutes, "minute", 1)
if minutes > 0:
return format_unit_string("minute", minutes)
return "Unknown"
def prompt_yes_no(prompt: str) -> bool:
while True:
response = str(input(prompt + " (y/n): ")).lower().strip()
ch = response[:1]
if ch == "y":
return True
elif ch == "n":
return False
def get_list_or_len(list_in: Sequence[object], length: bool) -> Union[int, Sequence[object]]:
return len(list_in) if length else list_in
def dataclass_to_json_dict(instance: Any) -> Dict[str, Any]:
ret: Dict[str, Any] = recurse_jsonify(instance)
return ret
def validate_directory_writable(path: Path) -> None:
write_test_path = path / ".write_test"
try:
with write_test_path.open("w"):
pass
write_test_path.unlink()
except FileNotFoundError:
raise InvalidPathError(path, "Directory doesn't exist")
except OSError:
raise InvalidPathError(path, "Directory not writable")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/errors.py | flax/util/errors.py | from __future__ import annotations
from enum import Enum
from pathlib import Path
from typing import Any, List
class Err(Enum):
# temporary errors. Don't blacklist
DOES_NOT_EXTEND = -1
BAD_HEADER_SIGNATURE = -2
MISSING_FROM_STORAGE = -3
INVALID_PROTOCOL_MESSAGE = -4 # We WILL ban for a protocol violation.
SELF_CONNECTION = -5
INVALID_HANDSHAKE = -6
INVALID_ACK = -7
INCOMPATIBLE_PROTOCOL_VERSION = -8
DUPLICATE_CONNECTION = -9
BLOCK_NOT_IN_BLOCKCHAIN = -10
NO_PROOF_OF_SPACE_FOUND = -11
PEERS_DONT_HAVE_BLOCK = -12
MAX_INBOUND_CONNECTIONS_REACHED = -13
UNKNOWN = 1
# permanent errors. Block is un-salvageable garbage.
INVALID_BLOCK_SOLUTION = 2
INVALID_COIN_SOLUTION = 3
DUPLICATE_OUTPUT = 4
DOUBLE_SPEND = 5
UNKNOWN_UNSPENT = 6
BAD_AGGREGATE_SIGNATURE = 7
WRONG_PUZZLE_HASH = 8
BAD_FARMER_COIN_AMOUNT = 9
INVALID_CONDITION = 10
ASSERT_MY_COIN_ID_FAILED = 11
ASSERT_ANNOUNCE_CONSUMED_FAILED = 12
ASSERT_HEIGHT_RELATIVE_FAILED = 13
ASSERT_HEIGHT_ABSOLUTE_FAILED = 14
ASSERT_SECONDS_ABSOLUTE_FAILED = 15
COIN_AMOUNT_EXCEEDS_MAXIMUM = 16
SEXP_ERROR = 17
INVALID_FEE_LOW_FEE = 18
MEMPOOL_CONFLICT = 19
MINTING_COIN = 20
EXTENDS_UNKNOWN_BLOCK = 21
COINBASE_NOT_YET_SPENDABLE = 22
BLOCK_COST_EXCEEDS_MAX = 23
BAD_ADDITION_ROOT = 24
BAD_REMOVAL_ROOT = 25
INVALID_POSPACE_HASH = 26
INVALID_COINBASE_SIGNATURE = 27
INVALID_PLOT_SIGNATURE = 28
TIMESTAMP_TOO_FAR_IN_PAST = 29
TIMESTAMP_TOO_FAR_IN_FUTURE = 30
INVALID_TRANSACTIONS_FILTER_HASH = 31
INVALID_POSPACE_CHALLENGE = 32
INVALID_POSPACE = 33
INVALID_HEIGHT = 34
INVALID_COINBASE_AMOUNT = 35
INVALID_MERKLE_ROOT = 36
INVALID_BLOCK_FEE_AMOUNT = 37
INVALID_WEIGHT = 38
INVALID_TOTAL_ITERS = 39
BLOCK_IS_NOT_FINISHED = 40
INVALID_NUM_ITERATIONS = 41
INVALID_POT = 42
INVALID_POT_CHALLENGE = 43
INVALID_TRANSACTIONS_GENERATOR_HASH = 44
INVALID_POOL_TARGET = 45
INVALID_COINBASE_PARENT = 46
INVALID_FEES_COIN_PARENT = 47
RESERVE_FEE_CONDITION_FAILED = 48
NOT_BLOCK_BUT_HAS_DATA = 49
IS_TRANSACTION_BLOCK_BUT_NO_DATA = 50
INVALID_PREV_BLOCK_HASH = 51
INVALID_TRANSACTIONS_INFO_HASH = 52
INVALID_FOLIAGE_BLOCK_HASH = 53
INVALID_REWARD_COINS = 54
INVALID_BLOCK_COST = 55
NO_END_OF_SLOT_INFO = 56
INVALID_PREV_CHALLENGE_SLOT_HASH = 57
INVALID_SUB_EPOCH_SUMMARY_HASH = 58
NO_SUB_EPOCH_SUMMARY_HASH = 59
SHOULD_NOT_MAKE_CHALLENGE_BLOCK = 60
SHOULD_MAKE_CHALLENGE_BLOCK = 61
INVALID_CHALLENGE_CHAIN_DATA = 62
INVALID_CC_EOS_VDF = 65
INVALID_RC_EOS_VDF = 66
INVALID_CHALLENGE_SLOT_HASH_RC = 67
INVALID_PRIOR_POINT_RC = 68
INVALID_DEFICIT = 69
INVALID_SUB_EPOCH_SUMMARY = 70
INVALID_PREV_SUB_EPOCH_SUMMARY_HASH = 71
INVALID_REWARD_CHAIN_HASH = 72
INVALID_SUB_EPOCH_OVERFLOW = 73
INVALID_NEW_DIFFICULTY = 74
INVALID_NEW_SUB_SLOT_ITERS = 75
INVALID_CC_SP_VDF = 76
INVALID_RC_SP_VDF = 77
INVALID_CC_SIGNATURE = 78
INVALID_RC_SIGNATURE = 79
CANNOT_MAKE_CC_BLOCK = 80
INVALID_RC_SP_PREV_IP = 81
INVALID_RC_IP_PREV_IP = 82
INVALID_IS_TRANSACTION_BLOCK = 83
INVALID_URSB_HASH = 84
OLD_POOL_TARGET = 85
INVALID_POOL_SIGNATURE = 86
INVALID_FOLIAGE_BLOCK_PRESENCE = 87
INVALID_CC_IP_VDF = 88
INVALID_RC_IP_VDF = 89
IP_SHOULD_BE_NONE = 90
INVALID_REWARD_BLOCK_HASH = 91
INVALID_MADE_NON_OVERFLOW_INFUSIONS = 92
NO_OVERFLOWS_IN_FIRST_SUB_SLOT_NEW_EPOCH = 93
MEMPOOL_NOT_INITIALIZED = 94
SHOULD_NOT_HAVE_ICC = 95
SHOULD_HAVE_ICC = 96
INVALID_ICC_VDF = 97
INVALID_ICC_HASH_CC = 98
INVALID_ICC_HASH_RC = 99
INVALID_ICC_EOS_VDF = 100
INVALID_SP_INDEX = 101
TOO_MANY_BLOCKS = 102
INVALID_CC_CHALLENGE = 103
INVALID_PREFARM = 104
ASSERT_SECONDS_RELATIVE_FAILED = 105
BAD_COINBASE_SIGNATURE = 106
# INITIAL_TRANSACTION_FREEZE = 107 # removed
NO_TRANSACTIONS_WHILE_SYNCING = 108
ALREADY_INCLUDING_TRANSACTION = 109
INCOMPATIBLE_NETWORK_ID = 110
PRE_SOFT_FORK_MAX_GENERATOR_SIZE = 111 # Size in bytes
INVALID_REQUIRED_ITERS = 112
TOO_MANY_GENERATOR_REFS = 113 # Number of uint32 entries in the List
ASSERT_MY_PARENT_ID_FAILED = 114
ASSERT_MY_PUZZLEHASH_FAILED = 115
ASSERT_MY_AMOUNT_FAILED = 116
GENERATOR_RUNTIME_ERROR = 117
INVALID_COST_RESULT = 118
INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT = 119
FUTURE_GENERATOR_REFS = 120 # All refs must be to blocks in the past
GENERATOR_REF_HAS_NO_GENERATOR = 121
DOUBLE_SPEND_IN_FORK = 122
INVALID_FEE_TOO_CLOSE_TO_ZERO = 123
COIN_AMOUNT_NEGATIVE = 124
INTERNAL_PROTOCOL_ERROR = 125
INVALID_SPEND_BUNDLE = 126
FAILED_GETTING_GENERATOR_MULTIPROCESSING = 127
class ValidationError(Exception):
def __init__(self, code: Err, error_msg: str = ""):
self.code = code
self.error_msg = error_msg
class ConsensusError(Exception):
def __init__(self, code: Err, errors: List[Any] = []):
super(ConsensusError, self).__init__(f"Error code: {code.name}")
self.errors = errors
class ProtocolError(Exception):
def __init__(self, code: Err, errors: List[Any] = []):
super(ProtocolError, self).__init__(f"Error code: {code.name}")
self.code = code
self.errors = errors
##
# Keychain errors
##
class KeychainException(Exception):
pass
class KeychainKeyDataMismatch(KeychainException):
def __init__(self, data_type: str):
super().__init__(f"KeyData mismatch for: {data_type}")
class KeychainIsLocked(KeychainException):
pass
class KeychainSecretsMissing(KeychainException):
pass
class KeychainRequiresMigration(KeychainException):
def __init__(self) -> None:
super().__init__("Keychain requires migration")
class KeychainCurrentPassphraseIsInvalid(KeychainException):
def __init__(self) -> None:
super().__init__("Invalid current passphrase")
class KeychainMaxUnlockAttempts(KeychainException):
def __init__(self) -> None:
super().__init__("maximum passphrase attempts reached")
class KeychainNotSet(KeychainException):
pass
class KeychainIsEmpty(KeychainException):
pass
class KeychainKeyNotFound(KeychainException):
pass
class KeychainMalformedRequest(KeychainException):
pass
class KeychainMalformedResponse(KeychainException):
pass
class KeychainProxyConnectionFailure(KeychainException):
def __init__(self) -> None:
super().__init__("Failed to connect to keychain service")
class KeychainLockTimeout(KeychainException):
pass
class KeychainProxyConnectionTimeout(KeychainException):
def __init__(self) -> None:
super().__init__("Could not reconnect to keychain service in 30 seconds.")
class KeychainUserNotFound(KeychainException):
def __init__(self, service: str, user: str) -> None:
super().__init__(f"user {user!r} not found for service {service!r}")
class KeychainFingerprintError(KeychainException):
def __init__(self, fingerprint: int, message: str) -> None:
self.fingerprint = fingerprint
super().__init__(f"fingerprint {str(fingerprint)!r} {message}")
class KeychainFingerprintNotFound(KeychainFingerprintError):
def __init__(self, fingerprint: int) -> None:
super().__init__(fingerprint, "not found")
class KeychainFingerprintExists(KeychainFingerprintError):
def __init__(self, fingerprint: int) -> None:
super().__init__(fingerprint, "already exists")
class KeychainLabelError(KeychainException):
def __init__(self, label: str, error: str):
super().__init__(error)
self.label = label
class KeychainLabelInvalid(KeychainLabelError):
pass
class KeychainLabelExists(KeychainLabelError):
def __init__(self, label: str, fingerprint: int) -> None:
super().__init__(label, f"label {label!r} already exists for fingerprint {str(fingerprint)!r}")
self.fingerprint = fingerprint
##
# Miscellaneous errors
##
class InvalidPathError(Exception):
def __init__(self, path: Path, error_message: str):
super().__init__(f"{error_message}: {str(path)!r}")
self.path = path
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/memory_profiler.py | flax/util/memory_profiler.py | from __future__ import annotations
import asyncio
import logging
import pathlib
import tracemalloc
from datetime import datetime
from typing import Dict, List, Optional, Set
from flax.util.path import path_from_root
async def mem_profile_task(root_path: pathlib.Path, service: str, log: logging.Logger) -> None:
profile_dir = path_from_root(root_path, f"memory-profile-{service}") / datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
log.info("Starting memory profiler. saving to %s" % profile_dir)
profile_dir.mkdir(parents=True, exist_ok=True)
try:
tracemalloc.start(30)
counter = 0
while True:
# this will throw CancelledError when we're exiting
await asyncio.sleep(60)
snapshot = tracemalloc.take_snapshot()
snapshot.dump(str(profile_dir / f"heap-{counter:05d}.profile"))
log.info(f"Heap usage: {tracemalloc.get_traced_memory()[0]/1000000:0.3f} MB profile {counter:05d}")
counter += 1
finally:
tracemalloc.stop()
if __name__ == "__main__":
import sys
from functools import lru_cache
from subprocess import check_call
from sys import stdout
from colorama import Back, Fore, Style, init
g_next_id: int = 0
profile_dir = pathlib.Path(sys.argv[1])
init(strip=False)
def print_timeline() -> None:
counter = 0
try:
while True:
snapshot = tracemalloc.Snapshot.load(str(profile_dir / f"heap-{counter:05d}.profile"))
# the total memory usage in MB
total = sum(st.size for st in snapshot.statistics("filename")) / 1000000.0
if total > 150:
color = Fore.RED + Style.BRIGHT
elif total > 120:
color = Fore.MAGENTA + Style.BRIGHT
elif total > 90:
color = Fore.YELLOW + Style.BRIGHT
elif total > 60:
color = Style.BRIGHT
elif total < 30:
color = Fore.GREEN
else:
color = ""
quantized = int(total // 20)
print(f"{counter:05d}: {color}{total:3.0f} MB {Back.WHITE} {' ' * quantized}{Style.RESET_ALL}")
counter += 1
except Exception as e:
print(e)
def color(pct: float) -> str:
return f"{int((100.-pct)//10)+1}"
def fontcolor(pct: float) -> str:
if pct > 80 or pct < 20:
return "white"
else:
return "black"
@lru_cache(maxsize=10000)
def resolve_function(file: str, line: int) -> str:
try:
with open(file, "r") as f:
all_lines: List[str] = []
for row in f:
all_lines.append(row)
# line numbers start at 1
while line > 0:
line -= 1
current = all_lines[line]
if " def " in current or current.startswith("def "):
return current.split("def ")[1].split("(")[0]
return file.rsplit("/", 1)[1]
except Exception:
return "<unknown>"
def analyze_slot(slot: int) -> None:
file = str(profile_dir / f"heap-{slot:05d}.profile")
output_file = str(profile_dir / f"heap-{slot:05d}")
print(f"generating call tree for slot {slot}")
class CallInfo:
size: int
calls: int
def add(self, size: int) -> None:
self.size += size
self.calls += 1
def __init__(self, size: int) -> None:
self.size = size
self.calls = 1
class Frame:
count: int
size: int
callers: Dict[str, CallInfo]
fun_id: int
def __init__(self, size: int) -> None:
global g_next_id
self.count = 1
self.size = size
self.callers = {}
self.fun_id = g_next_id
g_next_id += 1
all_frames: Dict[str, Frame] = {}
total_size = 0
calls = 0
snapshot = tracemalloc.Snapshot.load(file)
for trace in snapshot.traces:
prev_fun: Optional[str] = None
total_size += trace.size
calls += 1
if ((calls - 1) & 255) == 0:
stdout.write(f"\rtotal size: {total_size/1000000:0.3f} MB ({calls} allocs) ")
# to support recursive functions, make sure we only visit each frame
# once during traversal
visited: Set[str] = set()
for frame in trace.traceback:
fun = resolve_function(frame.filename, frame.lineno)
if fun in visited:
prev_fun = fun
continue
visited.add(fun)
if fun in all_frames:
all_frames[fun].count += 1
all_frames[fun].size += trace.size
if prev_fun:
if prev_fun in all_frames[fun].callers:
all_frames[fun].callers[prev_fun].add(trace.size)
else:
all_frames[fun].callers[prev_fun] = CallInfo(trace.size)
else:
all_frames[fun] = Frame(trace.size)
if prev_fun:
all_frames[fun].callers[prev_fun] = CallInfo(trace.size)
prev_fun = fun
print(f"\nwriting {output_file + '.dot'}")
with open(output_file + ".dot", "w") as f:
f.write(
"digraph {\n"
'node [fontsize=11, colorscheme=rdylgn10, style=filled, fontname="Arial"]\n'
'edge [fontsize=11, colorscheme=rdylgn10, fontname="Arial"]\n'
)
filter_frames = set()
for name, fr in all_frames.items():
# frames that are less than 0.1% of the total allocations are
# filtered
if fr.size / total_size < 0.001:
filter_frames.add(name)
continue
percent = fr.size * 100 / total_size
f.write(
f'frame_{fr.fun_id} [shape=box, label="{name}()\\l'
f"{percent:0.2f}%\\n"
f"{fr.size/1000000:0.3f}MB\\n"
f'{fr.count}x\\n",'
f"fillcolor={color(percent)}, "
f"color={color(percent)}, "
f"fontcolor={fontcolor(percent)}]\n"
)
# print all edges (calls)
for name, fr in all_frames.items():
if name in filter_frames:
continue
for caller, ci in fr.callers.items():
caller_info = all_frames.get(caller)
if caller_info is None:
continue
if caller in filter_frames:
continue
percent = ci.size * 100 / total_size
# filter edges that are too insignificant
if percent < 0.01:
continue
caller_frame = all_frames.get(caller)
assert caller_frame
caller_id = caller_frame.fun_id
f.write(
f"frame_{caller_id} -> frame_{fr.fun_id} "
f'[label="{percent:0.2f}%\\n{ci.calls}x",'
f"penwidth={0.3+(ci.size*6/total_size):0.2f},"
f"color={color(percent)}]\n"
)
f.write("}\n")
print(f"writing {output_file}.png")
with open(output_file + ".png", "wb") as f2:
check_call(["dot", "-Tpng", output_file + ".dot"], stdout=f2)
if len(sys.argv) == 2:
print_timeline()
elif len(sys.argv) == 3:
slot = int(sys.argv[2])
analyze_slot(slot)
else:
print(
"""USAGE:
memory_profiler.py <profile-directory>
Analyze memory usage at 1 minute interval from the profiles in the specified
directory. Print colored timeline to stdout
memory_profiler.py <profile-directory> <slot>
Analyze a single slot from the profile directory
"""
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/recursive_replace.py | flax/util/recursive_replace.py | from __future__ import annotations
from dataclasses import replace
from typing import Any
def recursive_replace(root_obj: Any, replace_str: str, replace_with: Any) -> Any:
split_str = replace_str.split(".")
if len(split_str) == 1:
return replace(root_obj, **{split_str[0]: replace_with})
sub_obj = recursive_replace(getattr(root_obj, split_str[0]), ".".join(split_str[1:]), replace_with)
return replace(root_obj, **{split_str[0]: sub_obj})
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/config.py | flax/util/config.py | from __future__ import annotations
import argparse
import contextlib
import copy
import logging
import os
import shutil
import sys
import tempfile
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, Optional, Union
import pkg_resources
import yaml
from typing_extensions import Literal
from flax.util.lock import Lockfile
PEER_DB_PATH_KEY_DEPRECATED = "peer_db_path" # replaced by "peers_file_path"
WALLET_PEERS_PATH_KEY_DEPRECATED = "wallet_peers_path" # replaced by "wallet_peers_file_path"
log = logging.getLogger(__name__)
def initial_config_file(filename: Union[str, Path]) -> str:
return pkg_resources.resource_string(__name__, f"initial-{filename}").decode()
def create_default_flax_config(root_path: Path, filenames=["config.yaml"]) -> None:
for filename in filenames:
default_config_file_data: str = initial_config_file(filename)
path: Path = config_path_for_filename(root_path, filename)
tmp_path: Path = path.with_suffix("." + str(os.getpid()))
path.parent.mkdir(parents=True, exist_ok=True)
with open(tmp_path, "w") as f:
f.write(default_config_file_data)
try:
os.replace(str(tmp_path), str(path))
except PermissionError:
shutil.move(str(tmp_path), str(path))
def config_path_for_filename(root_path: Path, filename: Union[str, Path]) -> Path:
path_filename = Path(filename)
if path_filename.is_absolute():
return path_filename
return root_path / "config" / filename
@contextlib.contextmanager
def lock_config(root_path: Path, filename: Union[str, Path]) -> Iterator[None]:
# TODO: This is presently used in some tests to lock the saving of the
# configuration file without having loaded it right there. This usage
# should probably be removed and this function made private.
config_path = config_path_for_filename(root_path, filename)
with Lockfile.create(config_path):
yield
@contextlib.contextmanager
def lock_and_load_config(
root_path: Path,
filename: Union[str, Path],
fill_missing_services: bool = False,
) -> Iterator[Dict[str, Any]]:
with lock_config(root_path=root_path, filename=filename):
config = _load_config_maybe_locked(
root_path=root_path,
filename=filename,
acquire_lock=False,
fill_missing_services=fill_missing_services,
)
yield config
def save_config(root_path: Path, filename: Union[str, Path], config_data: Any):
# This must be called under an acquired config lock
path: Path = config_path_for_filename(root_path, filename)
with tempfile.TemporaryDirectory(dir=path.parent) as tmp_dir:
tmp_path: Path = Path(tmp_dir) / Path(filename)
with open(tmp_path, "w") as f:
yaml.safe_dump(config_data, f)
try:
os.replace(str(tmp_path), path)
except PermissionError:
shutil.move(str(tmp_path), str(path))
def load_config(
root_path: Path,
filename: Union[str, Path],
sub_config: Optional[str] = None,
exit_on_error: bool = True,
fill_missing_services: bool = False,
) -> Dict:
return _load_config_maybe_locked(
root_path=root_path,
filename=filename,
sub_config=sub_config,
exit_on_error=exit_on_error,
acquire_lock=True,
fill_missing_services=fill_missing_services,
)
def _load_config_maybe_locked(
root_path: Path,
filename: Union[str, Path],
sub_config: Optional[str] = None,
exit_on_error: bool = True,
acquire_lock: bool = True,
fill_missing_services: bool = False,
) -> Dict:
# This must be called under an acquired config lock, or acquire_lock should be True
path = config_path_for_filename(root_path, filename)
if not path.is_file():
if not exit_on_error:
raise ValueError("Config not found")
print(f"can't find {path}")
print("** please run `flax init` to migrate or create new config files **")
# TODO: fix this hack
sys.exit(-1)
# This loop should not be necessary due to the config lock, but it's kept here just in case
for i in range(10):
try:
with contextlib.ExitStack() as exit_stack:
if acquire_lock:
exit_stack.enter_context(lock_config(root_path, filename))
with open(path, "r") as opened_config_file:
r = yaml.safe_load(opened_config_file)
if r is None:
log.error(f"yaml.safe_load returned None: {path}")
time.sleep(i * 0.1)
continue
if fill_missing_services:
r.update(load_defaults_for_missing_services(config=r, config_name=path.name))
if sub_config is not None:
r = r.get(sub_config)
return r
except Exception as e:
tb = traceback.format_exc()
log.error(f"Error loading file: {tb} {e} Retrying {i}")
time.sleep(i * 0.1)
raise RuntimeError("Was not able to read config file successfully")
def load_config_cli(
root_path: Path,
filename: str,
sub_config: Optional[str] = None,
fill_missing_services: bool = False,
) -> Dict:
"""
Loads configuration from the specified filename, in the config directory,
and then overrides any properties using the passed in command line arguments.
Nested properties in the config file can be used in the command line with ".",
for example --farmer_peer.host. Does not support lists.
"""
config = load_config(root_path, filename, sub_config, fill_missing_services=fill_missing_services)
flattened_props = flatten_properties(config)
parser = argparse.ArgumentParser()
for prop_name, value in flattened_props.items():
if type(value) is list:
continue
prop_type: Callable = str2bool if type(value) is bool else type(value) # type: ignore
parser.add_argument(f"--{prop_name}", type=prop_type, dest=prop_name)
for key, value in vars(parser.parse_args()).items():
if value is not None:
flattened_props[key] = value
return unflatten_properties(flattened_props)
def flatten_properties(config: Dict) -> Dict:
properties = {}
for key, value in config.items():
if type(value) is dict:
for key_2, value_2 in flatten_properties(value).items():
properties[key + "." + key_2] = value_2
else:
properties[key] = value
return properties
def unflatten_properties(config: Dict) -> Dict:
properties: Dict = {}
for key, value in config.items():
if "." in key:
add_property(properties, key, value)
else:
properties[key] = value
return properties
def add_property(d: Dict, partial_key: str, value: Any):
if "." not in partial_key: # root of dict
d[partial_key] = value
else:
key_1, key_2 = partial_key.split(".", maxsplit=1)
if key_1 not in d:
d[key_1] = {}
if "." in key_2:
add_property(d[key_1], key_2, value)
else:
d[key_1][key_2] = value
def str2bool(v: Union[str, bool]) -> bool:
# Source from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "True", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "False", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def traverse_dict(d: Dict, key_path: str) -> Any:
"""
Traverse nested dictionaries to find the element pointed-to by key_path.
Key path components are separated by a ':' e.g.
"root:child:a"
"""
if type(d) is not dict:
raise TypeError(f"unable to traverse into non-dict value with key path: {key_path}")
# Extract one path component at a time
components = key_path.split(":", maxsplit=1)
if components is None or len(components) == 0:
raise KeyError(f"invalid config key path: {key_path}")
key = components[0]
remaining_key_path = components[1] if len(components) > 1 else None
val: Any = d.get(key, None)
if val is not None:
if remaining_key_path is not None:
return traverse_dict(val, remaining_key_path)
return val
else:
raise KeyError(f"value not found for key: {key}")
method_strings = Literal["default", "python_default", "fork", "forkserver", "spawn"]
method_values = Optional[Literal["fork", "forkserver", "spawn"]]
start_methods: Dict[method_strings, method_values] = {
"default": None,
"python_default": None,
"fork": "fork",
"forkserver": "forkserver",
"spawn": "spawn",
}
def process_config_start_method(
config: Dict[str, Any],
log=logging.Logger,
) -> method_values:
from_config: object = config.get("multiprocessing_start_method")
choice: method_strings
if from_config is None:
# handle not only the key being missing, but also set to None
choice = "default"
elif from_config not in start_methods.keys():
start_methods_string = ", ".join(option for option in start_methods.keys())
log.warning(f"Configured start method {from_config!r} not available in: {start_methods_string}")
choice = "default"
else:
# mypy doesn't realize that by the time we get here from_config must be one of
# the keys in `start_methods` due to the above `not in` condition.
choice = from_config # type: ignore[assignment]
processed_method = start_methods[choice]
log.info(f"Selected multiprocessing start method: {choice}")
return processed_method
def override_config(config: Dict[str, Any], config_overrides: Optional[Dict[str, Any]]):
new_config = copy.deepcopy(config)
if config_overrides is None:
return new_config
for k, v in config_overrides.items():
add_property(new_config, k, v)
return new_config
def selected_network_address_prefix(config: Dict[str, Any]) -> str:
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
return address_prefix
def load_defaults_for_missing_services(config: Dict[str, Any], config_name: str) -> Dict[str, Any]:
services = ["data_layer"]
missing_services = [service for service in services if service not in config]
defaulted = {}
if len(missing_services) > 0:
marshalled_default_config: str = initial_config_file(config_name)
unmarshalled_default_config = yaml.safe_load(marshalled_default_config)
for service in missing_services:
defaulted[service] = unmarshalled_default_config[service]
if "logging" in defaulted[service]:
to_be_referenced = config.get("logging")
if to_be_referenced is not None:
defaulted[service]["logging"] = to_be_referenced
if "selected_network" in defaulted[service]:
to_be_referenced = config.get("selected_network")
if to_be_referenced is not None:
# joining to hopefully force a new string of the same value
defaulted[service]["selected_network"] = "".join(to_be_referenced)
return defaulted
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/create_alert_file.py | flax/util/create_alert_file.py | from __future__ import annotations
from pathlib import Path
from typing import List
from blspy import AugSchemeMPL
from flax.util.ints import uint32
from flax.util.keychain import Keychain
from flax.util.validate_alert import create_alert_file, create_not_ready_alert_file, validate_alert_file
bitcoin_hash = None
bram_message = None
status = None
while True:
status_input = input("What is the status of this alert? (ready/not ready)").lower()
if status_input == "ready":
status = True
break
elif status_input == "not ready":
status = False
break
else:
print("Unknown input")
keychain: Keychain = Keychain()
print("\n___________ SELECT KEY ____________")
private_keys = keychain.get_all_private_keys()
if len(private_keys) == 0:
print("There are no saved private keys.")
quit()
print("Showing all private keys:")
for sk, seed in private_keys:
print("\nFingerprint:", sk.get_g1().get_fingerprint())
selected_key = None
while True:
user_input = input("\nEnter fingerprint of the key you want to use, or enter Q to quit: ").lower()
if user_input == "q":
quit()
for sk, seed in private_keys:
fingerprint = sk.get_g1().get_fingerprint()
pub = sk.get_g1()
if int(user_input) == fingerprint:
print(f"Selected: {fingerprint}")
selected_key = sk
break
if selected_key is not None:
break
print("\n___________ HD PATH ____________")
while True:
hd_path = input("Enter the HD path in the form 'm/12381/8444/n/n', or enter Q to quit: ").lower()
if hd_path == "q":
quit()
verify = input(f"Is this correct path: {hd_path}? (y/n) ").lower()
if verify == "y":
break
k = Keychain()
private_keys = k.get_all_private_keys()
path: List[uint32] = [uint32(int(i)) for i in hd_path.split("/") if i != "m"]
# Derive HD key using path form input
for c in path:
selected_key = AugSchemeMPL.derive_child_sk(selected_key, c)
print("Public key:", selected_key.get_g1())
# get file path
file_path = None
while True:
file_path = input("Enter the path where you want to save signed alert file, or q to quit: ")
if file_path == "q" or file_path == "Q":
quit()
file_path = file_path.strip()
y_n = input(f"Is this correct path (y/n)?: {file_path} ").lower()
if y_n == "y":
break
f_path: Path = Path(file_path)
if status is True:
print("")
print("___________ BITCOIN BLOCK HASH ____________")
while True:
bitcoin_hash = input("Insert Bitcoin block hash: ")
print(f"Bitcoin block hash = {bitcoin_hash}")
y_n = input("Does this look good (y/n): ").lower()
if y_n == "y":
break
print("")
print("___________ BRAM MESSAGE ____________")
while True:
bram_message = input("Insert message from Bram: ")
print(f"Bram message = {bram_message}")
y_n = input("Does this look good (y/n): ").lower()
if y_n == "y":
break
genesis_challenge_preimage = f"bitcoin_hash:{bitcoin_hash},bram_message:{bram_message}"
create_alert_file(f_path, selected_key, genesis_challenge_preimage)
print(f"Alert written to file {f_path}")
pubkey = f"{bytes(selected_key.get_g1()).hex()}"
validated = validate_alert_file(f_path, pubkey)
if validated:
print(f"Signature has passed validation for pubkey: {pubkey}")
else:
print(f"Signature has failed validation for pubkey: {pubkey}")
assert False
else:
create_not_ready_alert_file(f_path, selected_key)
print(f"Alert written to file {f_path}")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/api_decorators.py | flax/util/api_decorators.py | from __future__ import annotations
import functools
import logging
from dataclasses import dataclass, field
from inspect import signature
from typing import TYPE_CHECKING, Any, Callable, Coroutine, List, Optional, Union, get_type_hints
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.server.outbound_message import Message
from flax.util.streamable import Streamable, _T_Streamable
log = logging.getLogger(__name__)
if TYPE_CHECKING:
from flax.server.ws_connection import WSFlaxConnection
converted_api_f_type = Union[
Callable[[Union[bytes, _T_Streamable]], Coroutine[Any, Any, Optional[Message]]],
Callable[[Union[bytes, _T_Streamable], WSFlaxConnection], Coroutine[Any, Any, Optional[Message]]],
]
initial_api_f_type = Union[
Callable[[Any, _T_Streamable], Coroutine[Any, Any, Optional[Message]]],
Callable[[Any, _T_Streamable, WSFlaxConnection], Coroutine[Any, Any, Optional[Message]]],
]
metadata_attribute_name = "_flax_api_metadata"
@dataclass
class ApiMetadata:
api_function: bool = False
peer_required: bool = False
bytes_required: bool = False
execute_task: bool = False
reply_type: List[ProtocolMessageTypes] = field(default_factory=list)
message_class: Optional[Any] = None
def get_metadata(function: Callable[..., Any]) -> ApiMetadata:
maybe_metadata: Optional[ApiMetadata] = getattr(function, metadata_attribute_name, None)
if maybe_metadata is None:
return ApiMetadata()
return maybe_metadata
def set_default_and_get_metadata(function: Callable[..., Any]) -> ApiMetadata:
maybe_metadata: Optional[ApiMetadata] = getattr(function, metadata_attribute_name, None)
if maybe_metadata is None:
metadata = ApiMetadata()
setattr(function, metadata_attribute_name, metadata)
else:
metadata = maybe_metadata
return metadata
def api_request(f: initial_api_f_type) -> converted_api_f_type: # type: ignore
@functools.wraps(f)
def f_substitute(*args, **kwargs) -> Any: # type: ignore
binding = sig.bind(*args, **kwargs)
binding.apply_defaults()
inter = dict(binding.arguments)
# Converts each parameter from a Python dictionary, into an instance of the object
# specified by the type annotation (signature) of the function that is being called (f)
# The method can also be called with the target type instead of a dictionary.
for param_name, param_class in non_bytes_parameter_annotations.items():
original = inter[param_name]
if isinstance(original, Streamable):
if metadata.bytes_required:
inter[f"{param_name}_bytes"] = bytes(original)
elif isinstance(original, bytes):
if metadata.bytes_required:
inter[f"{param_name}_bytes"] = original
inter[param_name] = param_class.from_bytes(original)
return f(**inter) # type: ignore
non_bytes_parameter_annotations = {
name: hint for name, hint in get_type_hints(f).items() if name not in {"self", "return"} if hint is not bytes
}
sig = signature(f)
# Note that `functools.wraps()` is copying over the metadata attribute from `f()`
# onto `f_substitute()`.
metadata = set_default_and_get_metadata(function=f_substitute)
metadata.api_function = True
# It would be good to better identify the single parameter of interest.
metadata.message_class = [
hint for name, hint in get_type_hints(f).items() if name not in {"self", "peer", "return"}
][-1]
return f_substitute
def peer_required(func: Callable[..., Any]) -> Callable[..., Any]:
def inner() -> Callable[..., Any]:
metadata = set_default_and_get_metadata(function=func)
metadata.peer_required = True
return func
return inner()
def bytes_required(func: Callable[..., Any]) -> Callable[..., Any]:
def inner() -> Callable[..., Any]:
metadata = set_default_and_get_metadata(function=func)
metadata.bytes_required = True
return func
return inner()
def execute_task(func: Callable[..., Any]) -> Callable[..., Any]:
def inner() -> Callable[..., Any]:
metadata = set_default_and_get_metadata(function=func)
metadata.execute_task = True
return func
return inner()
def reply_type(prot_type: List[ProtocolMessageTypes]) -> Callable[..., Any]:
def wrap(func: Callable[..., Any]) -> Callable[..., Any]:
def inner() -> Callable[..., Any]:
metadata = set_default_and_get_metadata(function=func)
metadata.reply_type.extend(prot_type)
return func
return inner()
return wrap
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/util/db_synchronous.py | flax/util/db_synchronous.py | from __future__ import annotations
def db_synchronous_on(setting: str) -> str:
if setting == "on":
return "NORMAL"
if setting == "off":
return "OFF"
if setting == "full":
return "FULL"
# for now, default to synchronous=FULL mode. This can be made more
# sophisticated in the future. There are still material performance
# improvements to be had in cases where the risks are low.
# e.g.
# type = GetDriveTypeW(db_path)
# if type == DRIVE_FIXED or type == DRIVE_RAMDISK:
# return "OFF"
return "FULL"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.