text
stringlengths 0
5.92k
|
|---|
"""The setup script.""" import json import os import sysconfig from importlib.util import find_spec from pathlib import Path from setuptools import find_namespace_packages, find_packages, setup from setuptools.command.build_py import build_py with open("README.md", "rb") as readme_file: readme = readme_file.read().decode("UTF-8") with open(f"src{os.sep}taipy{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" requirements = [ "backports.zoneinfo>=0.2.1,<0.3;python_version<'3.9'", "cookiecutter>=2.1.1,<2.2", "taipy-gui@git+https://git@github.com/Avaiga/taipy-gui.git@develop", "taipy-rest@git+https://git@github.com/Avaiga/taipy-rest.git@develop", "taipy-templates@git+https://git@github.com/Avaiga/taipy-templates.git@develop", ] test_requirements = ["pytest>=3.8"] extras_require = { "ngrok": ["pyngrok>=5.1,<6.0"], "image": [ "python-magic>=0.4.24,<0.5;platform_system!='Windows'", "python-magic-bin>=0.4.14,<0.5;platform_system=='Windows'", ], "rdp": ["rdp>=0.8"], "arrow": ["pyarrow>=10.0.1,<11.0"], "mssql": ["pyodbc>=4"], } def _build_webapp(): already_exists = Path("./src/taipy/gui_core/lib/taipy-gui-core.js").exists() if not already_exists: # default site-packages path is from the current python interpreter site_packages_path = sysconfig.get_path("purelib") # taipy-gui should be available through setup_requires option # taipy-gui at this step is installed in a backend site-packages separated from the one being used by pip if find_spec("taipy") and find_spec("taipy.gui"): import taipy site_packages_path = Path(taipy.__file__).absolute().parent.parent # Specify the correct path to taipy-gui in gui/.env file env_file_path = Path(__file__).absolute().parent / "frontend" / "taipy" / ".env" if not os.path.exists(env_file_path): with open(env_file_path, "w") as env_file: env_file.write(f"TAIPY_GUI_DIR={site_packages_path}\n") os.system("cd frontend/taipy && npm ci && npm run build") class NPMInstall(build_py): def run(self): _build_webapp() build_py.run(self) setup( author="Avaiga", author_email="dev@taipy.io", python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", ], description="A 360° open-source platform from Python pilots to production-ready web apps.", install_requires=requirements, entry_points={ "console_scripts": [ "taipy = taipy._entrypoint:_entrypoint", ] }, license="Apache License 2.0", long_description=readme, long_description_content_type="text/markdown", keywords="taipy", name="taipy", package_dir={"": "src"}, packages=find_namespace_packages(where="src") + find_packages(include=["taipy"]), include_package_data=True, test_suite="tests", url="https://github.com/avaiga/taipy", version=version_string, zip_safe=False, extras_require=extras_require, cmdclass={"build_py": NPMInstall}, )
|
import re import sys repo_name = sys.argv[1] branch_name = sys.argv[2] # Regex pattern <img\s+([^>]*?)(?<!['"])(?<!\/)src\s*=\s*(['"])(?!http|\/)(.*?)\2([^>]*?)> pattern = re.compile("<img\\s+([^>]*?)(?<!['\"])(?<!\\/)src\\s*=\\s*(['\"])(?!http|\\/)(.*?)\\2([^>]*?)>") replacement = r'<img \1src="https://raw.githubusercontent.com/Avaiga/{repo_name}/{branch_name}/\3"\4>' with open("README.md") as readme_file: readme_str = readme_file.read() modified_readme = re.sub(pattern, replacement.format(repo_name=repo_name, branch_name=branch_name), readme_str) with open("README.md", "w") as readme_file: readme_file.write(modified_readme)
|
# ############################################################ # Generate Python interface definition files # ############################################################ from src.taipy.gui.config import Config import json import os import typing as t # ############################################################ # Generate gui pyi file (gui/gui.pyi) # ############################################################ gui_py_file = "./src/taipy/gui/gui.py" gui_pyi_file = gui_py_file + "i" os.system(f"pipenv run stubgen {gui_py_file} --no-import --parse-only --export-less -o ./") gui_config = "".join( f", {k}: {v.__name__} = ..." if "<class" in str(v) else f", {k}: {str(v).replace('typing', 't').replace('src.taipy.gui.config.', '')} = ..." for k, v in Config.__annotations__.items() ) replaced_content = "" with open(gui_pyi_file, "r") as file: for line in file: if "def run(" in line: line = line.replace( ", run_server: bool = ..., run_in_thread: bool = ..., async_mode: str = ..., **kwargs", gui_config ) replaced_content = replaced_content + line with open(gui_pyi_file, "w") as write_file: write_file.write(replaced_content) # ############################################################ # Generate Page Builder pyi file (gui/builder/__init__.pyi) # ############################################################ builder_py_file = "./src/taipy/gui/builder/__init__.py" builder_pyi_file = builder_py_file + "i" with open("./src/taipy/gui/viselements.json", "r") as file: viselements = json.load(file) with open("./tools/builder/block.txt", "r") as file: block_template = file.read() with open("./tools/builder/control.txt", "r") as file: control_template = file.read() os.system(f"pipenv run stubgen {builder_py_file} --no-import --parse-only --export-less -o ./") with open(builder_pyi_file, "a") as file: file.write("from ._element import _Element, _Block\n") def get_properties(element, viselements) -> t.List[t.Dict[str, t.Any]]: properties = element["properties"] if "inherits" not in element: return properties for inherit in element["inherits"]: inherit_element = next((e for e in viselements["undocumented"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements["blocks"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements["controls"] if e[0] == inherit), None) if inherit_element is None: raise RuntimeError(f"Can't find element with name {inherit}") properties += get_properties(inherit_element[1], viselements) return properties def build_doc(element: t.Dict[str, t.Any]): if "doc" not in element: return "" doc = str(element["doc"]).replace("\n", f'\n{16*" "}') return f"{element['name']} ({element['type']}): {doc} {'(default: '+element['default_value'] + ')' if 'default_value' in element else ''}" # noqa: E501 for control_element in viselements["controls"]: name = control_element[0] property_list = [] property_names = [] for property in get_properties(control_element[1], viselements): if property["name"] not in property_names and "[" not in property["name"]: property_list.append(property) property_names.append(property["name"]) properties = ", ".join([f"{p} = ..." for p in property_names]) doc_arguments = f"\n{12*' '}".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, "a") as file: file.write( control_template.replace("{{name}}", name) .replace("{{properties}}", properties) .replace("{{doc_arguments}}", doc_arguments) ) for block_element in viselements["blocks"]: name = block_element[0] property_list = [] property_names = [] for property in get_properties(block_element[1], viselements): if property["name"] not in property_names and "[" not in property["name"]: property_list.append(property) property_names.append(property["name"]) properties = ", ".join([f"{p} = ..." for p in property_names]) doc_arguments = f"{8*' '}".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, "a") as file: file.write( block_template.replace("{{name}}", name) .replace("{{properties}}", properties) .replace("{{doc_arguments}}", doc_arguments) ) os.system(f"pipenv run isort {gui_pyi_file}") os.system(f"pipenv run black {gui_pyi_file}") os.system(f"pipenv run isort {builder_pyi_file}") os.system(f"pipenv run black {builder_pyi_file}")
|
import pytest def pytest_addoption(parser): parser.addoption("--e2e-base-url", action="store", default="/", help="base url for e2e testing") parser.addoption("--e2e-port", action="store", default="5000", help="port for e2e testing") @pytest.fixture(scope="session") def e2e_base_url(request): return request.config.getoption("--e2e-base-url") @pytest.fixture(scope="session") def e2e_port(request): return request.config.getoption("--e2e-port")
|
from unittest import mock from src.taipy._run import _run from taipy.core import Core from taipy.gui import Gui from taipy.rest import Rest @mock.patch("taipy.gui.Gui.run") def test_run_pass_with_gui(gui_run): _run(Gui()) gui_run.assert_called_once() @mock.patch("taipy.core.Core.run") def test_run_pass_with_core(core_run): _run(Core()) core_run.assert_called_once() @mock.patch("taipy.rest.Rest.run") @mock.patch("taipy.core.Core.run") def test_run_pass_with_rest(rest_run, core_run): _run(Rest()) rest_run.assert_called_once() core_run.assert_called_once() @mock.patch("taipy.rest.Rest.run") @mock.patch("taipy.core.Core.run") def test_run_pass_with_core_and_rest(core_run, rest_run): _run(Core(), Rest()) core_run.assert_called_once() rest_run.assert_called_once() @mock.patch("taipy.gui.Gui.run") @mock.patch("taipy.rest.Rest.run") @mock.patch("taipy.core.Core.run") def test_run_pass_with_gui_and_rest(core_run, rest_run, gui_run): _run(Gui(), Rest()) gui_run.assert_called_once() core_run.assert_called_once() rest_run.assert_not_called() @mock.patch("taipy.gui.Gui.run") @mock.patch("taipy.core.Core.run") def test_run_pass_with_gui_and_core(core_run, gui_run): _run(Gui(), Core()) gui_run.assert_called_once() core_run.assert_called_once()
|
"""Unit test package for taipy."""
|
from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import CoreServiceIsAlreadyRunning from taipy.config import Config from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked class TestCore: def test_run_core_trigger_config_check(self, caplog): Config.configure_data_node(id="d0", storage_type="toto") with patch("sys.argv", ["prog"]): with pytest.raises(SystemExit): core = Core() core.run() expected_error_message = ( "`storage_type` field of DataNodeConfig `d0` must be either csv, sql_table," " sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory." ' Current value of property `storage_type` is "toto".' ) assert expected_error_message in caplog.text core.stop() def test_run_core_as_a_service_development_mode(self): _OrchestratorFactory._dispatcher = None with patch("sys.argv", ["prog"]): core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _DevelopmentJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) core.stop() def test_run_core_as_a_service_standalone_mode(self): _OrchestratorFactory._dispatcher = None with patch("sys.argv", ["prog"]): core = Core() assert core._orchestrator is None assert core._dispatcher is None assert _OrchestratorFactory._dispatcher is None Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() assert core._orchestrator is not None assert core._orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._orchestrator == _Orchestrator assert core._dispatcher is not None assert isinstance(core._dispatcher, _StandaloneJobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert core._dispatcher.is_running() assert _OrchestratorFactory._dispatcher.is_running() core.stop() def test_core_service_can_only_be_run_once(self): with patch("sys.argv", ["prog"]): core_instance_1 = Core() core_instance_2 = Core() core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_1.run() with pytest.raises(CoreServiceIsAlreadyRunning): core_instance_2.run() # Stop the Core service and run it again should work core_instance_1.stop() core_instance_1.run() core_instance_1.stop() core_instance_2.run() core_instance_2.stop() def test_block_config_update_when_core_service_is_running_development_mode(self): _OrchestratorFactory._dispatcher = None with patch("sys.argv", ["prog"]): core = Core() core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id="i1") core.stop() def test_block_config_update_when_core_service_is_running_standalone_mode(self): _OrchestratorFactory._dispatcher = None with patch("sys.argv", ["prog"]): core = Core() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) core.run() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_data_node(id="i1") core.stop()
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import json import os from datetime import datetime, timedelta import pytest from src.taipy.core._repository._decoder import _Decoder from src.taipy.core._repository._encoder import _Encoder @pytest.fixture(scope="function", autouse=True) def create_and_delete_json_file(): test_json_file = { "name": "testing", "date": datetime(1991, 1, 1), "default_data": "data for testing encoder", "validity_period": timedelta(days=1), } with open("data.json", "w") as f: json.dump(test_json_file, f, ensure_ascii=False, indent=4, cls=_Encoder) yield os.unlink("data.json") def test_json_encoder(): with open("data.json") as json_file: data = json.load(json_file) assert data["name"] == "testing" assert data["default_data"] == "data for testing encoder" assert data["date"] == { "__type__": "Datetime", "__value__": "1991-01-01T00:00:00", } assert data["date"].get("__type__") == "Datetime" assert data["date"].get("__value__") == "1991-01-01T00:00:00" def test_json_decoder(): with open("data.json") as json_file: data = json.load(json_file, cls=_Decoder) assert data["name"] == "testing" assert data["default_data"] == "data for testing encoder" assert data["date"] == datetime(1991, 1, 1)
|
import src.taipy.core.taipy as tp from src.taipy.core.config import Config def test_no_special_characters(): scenario_config = Config.configure_scenario("scenario_1") scenario = tp.create_scenario(scenario_config, name="martin") assert scenario.name == "martin" scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == "martin" def test_many_special_characters(): scenario_config = Config.configure_scenario("scenario_1") special_characters = ( "!#$%&'()*+,-./:;<=>?@[]^_`\\{" "»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ" "רÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñò" "óôõö÷øùúûüýþÿĀāĂ㥹ĆćĈĉĊċČčĎ" "ďĐđĒēĔĕĖėĘęĚěĜĝĞğĠġĢģĤĥĦħĨĩĪ" "īĬĭĮįİIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇ" "ňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠšŢţ" "ŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ" ) scenario = tp.create_scenario(scenario_config, name=special_characters) assert scenario.name == special_characters scenarios = tp.get_scenarios() assert len(scenarios) == 1 assert scenarios[0].name == special_characters
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import json import os import pathlib import shutil import pytest from src.taipy.core.exceptions.exceptions import InvalidExportPath from taipy.config.config import Config from .mocks import MockConverter, MockFSRepository, MockModel, MockObj, MockSQLRepository class TestRepositoriesStorage: @pytest.mark.parametrize( "mock_repo,params", [ (MockFSRepository, {"model_type": MockModel, "dir_name": "mock_model", "converter": MockConverter}), (MockSQLRepository, {"model_type": MockModel, "converter": MockConverter}), ], ) def test_save_and_fetch_model(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj("uuid", "foo") r._save(m) fetched_model = r._load(m.id) assert m == fetched_model @pytest.mark.parametrize( "mock_repo,params", [ (MockFSRepository, {"model_type": MockModel, "dir_name": "mock_model", "converter": MockConverter}), (MockSQLRepository, {"model_type": MockModel, "converter": MockConverter}), ], ) def test_exists(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) m = MockObj("uuid", "foo") r._save(m) assert r._exists(m.id) assert not r._exists("not-existed-model") @pytest.mark.parametrize( "mock_repo,params", [ (MockFSRepository, {"model_type": MockModel, "dir_name": "mock_model", "converter": MockConverter}), (MockSQLRepository, {"model_type": MockModel, "converter": MockConverter}), ], ) def test_get_all(self, mock_repo, params, init_sql_repo): objs = [] r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f"uuid-{i}", f"Foo{i}") objs.append(m) r._save(m) _objs = r._load_all() assert len(_objs) == 5 for obj in _objs: assert isinstance(obj, MockObj) assert sorted(objs, key=lambda o: o.id) == sorted(_objs, key=lambda o: o.id) @pytest.mark.parametrize( "mock_repo,params", [ (MockFSRepository, {"model_type": MockModel, "dir_name": "mock_model", "converter": MockConverter}), (MockSQLRepository, {"model_type": MockModel, "converter": MockConverter}), ], ) def test_delete_all(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f"uuid-{i}", f"Foo{i}") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_all() _models = r._load_all() assert len(_models) == 0 @pytest.mark.parametrize( "mock_repo,params", [ (MockFSRepository, {"model_type": MockModel, "dir_name": "mock_model", "converter": MockConverter}), (MockSQLRepository, {"model_type": MockModel, "converter": MockConverter}), ], ) def test_delete_many(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() for i in range(5): m = MockObj(f"uuid-{i}", f"Foo{i}") r._save(m) _models = r._load_all() assert len(_models) == 5 r._delete_many(["uuid-0", "uuid-1"]) _models = r._load_all() assert len(_models) == 3 @pytest.mark.parametrize( "mock_repo,params", [ (MockFSRepository, {"model_type": MockModel, "dir_name": "mock_model", "converter": MockConverter}), (MockSQLRepository, {"model_type": MockModel, "converter": MockConverter}), ], ) def test_search(self, mock_repo, params, init_sql_repo): r = mock_repo(**params) r._delete_all() m = MockObj("uuid", "foo") r._save(m) m1 = r._search("name", "bar") m2 = r._search("name", "foo") assert m1 == [] assert m2 == [m] @pytest.mark.parametrize( "mock_repo,params", [ (MockFSRepository, {"model_type": MockModel, "dir_name": "mock_model", "converter": MockConverter}), (MockSQLRepository, {"model_type": MockModel, "converter": MockConverter}), ], ) @pytest.mark.parametrize("export_path", ["tmp"]) def test_export(self, mock_repo, params, export_path, init_sql_repo): r = mock_repo(**params) m = MockObj("uuid", "foo") r._save(m) r._export("uuid", export_path) assert pathlib.Path(os.path.join(export_path, "mock_model/uuid.json")).exists() with open(os.path.join(export_path, "mock_model/uuid.json"), "r") as exported_file: exported_data = json.load(exported_file) assert exported_data["id"] == "uuid" assert exported_data["name"] == "foo" # Export to same location again should work r._export("uuid", export_path) assert pathlib.Path(os.path.join(export_path, "mock_model/uuid.json")).exists() if mock_repo == MockFSRepository: with pytest.raises(InvalidExportPath): r._export("uuid", Config.core.storage_folder) shutil.rmtree(export_path, ignore_errors=True)
|
import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Optional from sqlalchemy import Column, String, Table from sqlalchemy.dialects import sqlite from sqlalchemy.orm import declarative_base, registry from sqlalchemy.schema import CreateTable from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._repository._sql_repository import _SQLRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config class Base: __allow_unmapped__ = True Base = declarative_base(cls=Base) # type: ignore mapper_registry = registry() @dataclass class MockObj: def __init__(self, id: str, name: str, version: Optional[str] = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() @dataclass class MockModel(Base): # type: ignore __table__ = Table( "mock_model", mapper_registry.metadata, Column("id", String(200), primary_key=True), Column("name", String(200)), Column("version", String(200)), ) id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data["id"], name=data["name"], version=data["version"]) def _to_entity(self): return MockObj(id=self.id, name=self.name, version=self.version) @classmethod def _from_entity(cls, entity: MockObj): return MockModel(id=entity.id, name=entity.name, version=entity._version) def to_list(self): return [self.id, self.name, self.version] class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity): return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model): return MockObj(id=model.id, name=model.name, version=model.version) class MockFSRepository(_FileSystemRepository): def __init__(self, **kwargs): super().__init__(**kwargs) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockSQLRepository(_SQLRepository): def __init__(self, **kwargs): super().__init__(**kwargs) self.db.execute(str(CreateTable(MockModel.__table__, if_not_exists=True).compile(dialect=sqlite.dialect())))
|
import pytest from taipy.config.config import Config def test_job_config(): assert Config.job_config.mode == "development" job_c = Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) assert job_c.mode == "standalone" assert job_c.max_nb_of_workers == 2 assert Config.job_config.mode == "standalone" assert Config.job_config.max_nb_of_workers == 2 Config.configure_job_executions(foo="bar") assert Config.job_config.foo == "bar" def test_clean_config(): job_config = Config.configure_job_executions(mode="standalone", max_nb_of_workers=2, prop="foo") assert Config.job_config is job_config job_config._clean() # Check if the instance before and after _clean() is the same assert Config.job_config is job_config assert job_config.mode == "development" assert job_config._config == {"max_nb_of_workers": 1} assert job_config.properties == {}
|
from taipy.config.config import Config def migrate_pickle_path(dn): dn.path = "s1.pkl" def migrate_skippable(task): task.skippable = True def test_migration_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node("data_nodes1", "pickle") migration_cfg = Config.add_migration_function( target_version="1.0", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {"1.0": {"data_nodes1": migrate_pickle_path}} assert migration_cfg.properties == {} data_nodes2 = Config.configure_data_node("data_nodes2", "pickle") migration_cfg = Config.add_migration_function( target_version="1.0", config=data_nodes2, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == { "1.0": {"data_nodes1": migrate_pickle_path, "data_nodes2": migrate_pickle_path} } def test_clean_config(): assert Config.migration_functions.migration_fcts == {} data_nodes1 = Config.configure_data_node("data_nodes1", "pickle") migration_cfg = Config.add_migration_function( target_version="1.0", config=data_nodes1, migration_fct=migrate_pickle_path, ) assert migration_cfg.migration_fcts == {"1.0": {"data_nodes1": migrate_pickle_path}} assert migration_cfg.properties == {} migration_cfg._clean() assert migration_cfg.migration_fcts == {} assert migration_cfg._properties == {}
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. from src.taipy.core.config import CoreSection from src.taipy.core.config.data_node_config import DataNodeConfig from src.taipy.core.config.job_config import JobConfig from src.taipy.core.config.migration_config import MigrationConfig from src.taipy.core.config.scenario_config import ScenarioConfig from src.taipy.core.config.task_config import TaskConfig from taipy.config._config import _Config from taipy.config.common.scope import Scope from taipy.config.config import Config from taipy.config.global_app.global_app_config import GlobalAppConfig def _test_default_job_config(job_config: JobConfig): assert job_config is not None assert job_config.mode == JobConfig._DEFAULT_MODE def _test_default_core_section(core_section: CoreSection): assert core_section is not None assert core_section.mode == CoreSection._DEFAULT_MODE assert core_section.version_number == "" assert not core_section.force assert core_section.root_folder == "./taipy/" assert core_section.storage_folder == ".data/" assert core_section.repository_type == "filesystem" assert core_section.repository_properties == {} assert len(core_section.properties) == 0 def _test_default_data_node_config(dn_config: DataNodeConfig): assert dn_config is not None assert dn_config.id is not None assert dn_config.storage_type == "pickle" assert dn_config.scope == Scope.SCENARIO assert dn_config.validity_period is None assert len(dn_config.properties) == 0 # type: ignore def _test_default_task_config(task_config: TaskConfig): assert task_config is not None assert task_config.id is not None assert task_config.input_configs == [] assert task_config.output_configs == [] assert task_config.function is None assert not task_config.skippable assert len(task_config.properties) == 0 # type: ignore def _test_default_scenario_config(scenario_config: ScenarioConfig): assert scenario_config is not None assert scenario_config.id is not None assert scenario_config.tasks == [] assert scenario_config.task_configs == [] assert scenario_config.additional_data_nodes == [] assert scenario_config.additional_data_node_configs == [] assert scenario_config.data_nodes == [] assert scenario_config.data_node_configs == [] assert scenario_config.sequences == {} assert len(scenario_config.properties) == 0 # type: ignore def _test_default_version_migration_config(version_migration_config: MigrationConfig): assert version_migration_config is not None assert version_migration_config.migration_fcts == {} assert len(version_migration_config.properties) == 0 # type: ignore def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._global_config is not None _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 3 assert len(default_config._sections) == 3 _test_default_job_config(default_config._unique_sections[JobConfig.name]) _test_default_job_config(Config.job_config) _test_default_job_config(JobConfig().default_config()) _test_default_version_migration_config(default_config._unique_sections[MigrationConfig.name]) _test_default_version_migration_config(Config.migration_functions) _test_default_version_migration_config(MigrationConfig.default_config()) _test_default_core_section(default_config._unique_sections[CoreSection.name]) _test_default_core_section(Config.core) _test_default_core_section(CoreSection().default_config()) _test_default_data_node_config(default_config._sections[DataNodeConfig.name][_Config.DEFAULT_KEY]) _test_default_data_node_config(Config.data_nodes[_Config.DEFAULT_KEY]) _test_default_data_node_config(DataNodeConfig.default_config()) assert len(default_config._sections[DataNodeConfig.name]) == 1 assert len(Config.data_nodes) == 1 _test_default_task_config(default_config._sections[TaskConfig.name][_Config.DEFAULT_KEY]) _test_default_task_config(Config.tasks[_Config.DEFAULT_KEY]) _test_default_task_config(TaskConfig.default_config()) assert len(default_config._sections[TaskConfig.name]) == 1 assert len(Config.tasks) == 1 _test_default_scenario_config(default_config._sections[ScenarioConfig.name][_Config.DEFAULT_KEY]) Config.scenarios[_Config.DEFAULT_KEY] _test_default_scenario_config(Config.scenarios[_Config.DEFAULT_KEY]) _test_default_scenario_config(ScenarioConfig.default_config()) assert len(default_config._sections[ScenarioConfig.name]) == 1 assert len(Config.scenarios) == 1
|
from unittest.mock import patch import pytest from src.taipy.core._init_version import _read_version from src.taipy.core.config.core_section import CoreSection from src.taipy.core.exceptions import ConfigCoreVersionMismatched from taipy.config.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile _MOCK_CORE_VERSION = "3.1.1" @pytest.fixture(scope="function", autouse=True) def mock_core_version(): with patch("src.taipy.core.config.core_section._read_version") as mock_read_version: mock_read_version.return_value = _MOCK_CORE_VERSION CoreSection._CURRENT_CORE_VERSION = _MOCK_CORE_VERSION Config.unique_sections[CoreSection.name] = CoreSection.default_config() Config._default_config._unique_sections[CoreSection.name] = CoreSection.default_config() yield @pytest.fixture(scope="session", autouse=True) def reset_core_version(): yield CoreSection._CURRENT_CORE_VERSION = _read_version() class TestCoreVersionInCoreSectionConfig: major, minor, patch = _MOCK_CORE_VERSION.split(".") current_version = f"{major}.{minor}.{patch}" current_dev_version = f"{major}.{minor}.{patch}.dev0" compatible_future_version = f"{major}.{minor}.{int(patch) + 1}" compatible_future_dev_version = f"{major}.{minor}.{int(patch) + 1}.dev0" core_version_is_compatible = [ # Current version and dev version should be compatible (f"{major}.{minor}.{patch}", True), (f"{major}.{minor}.{patch}.dev0", True), # Future versions with same major and minor should be compatible (f"{major}.{minor}.{int(patch) + 1}", True), (f"{major}.{minor}.{int(patch) + 1}.dev0", True), # Past versions with same major and minor should be compatible (f"{major}.{minor}.{int(patch) - 1}", True), (f"{major}.{minor}.{int(patch) - 1}.dev0", True), # Future versions with different minor number should be incompatible (f"{major}.{int(minor) + 1}.{patch}", False), (f"{major}.{int(minor) + 1}.{patch}.dev0", False), # Past versions with different minor number should be incompatible (f"{major}.{int(minor) - 1}.{patch}", False), (f"{major}.{int(minor) - 1}.{patch}.dev0", False), ] @pytest.mark.parametrize("core_version, is_compatible", core_version_is_compatible) def test_load_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f""" [TAIPY] [JOB] mode = "standalone" max_nb_of_workers = "2:int" [CORE] root_folder = "./taipy/" storage_folder = ".data/" repository_type = "filesystem" read_entity_retry = "0:int" mode = "development" version_number = "" force = "False:bool" core_version = "{core_version}" [VERSION_MIGRATION.migration_fcts] """ ) if is_compatible: Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.load(file_config.filename) @pytest.mark.parametrize("core_version,is_compatible", core_version_is_compatible) def test_override_configuration_file(self, core_version, is_compatible): file_config = NamedTemporaryFile( f""" [TAIPY] [JOB] mode = "standalone" max_nb_of_workers = "2:int" [CORE] root_folder = "./taipy/" storage_folder = ".data/" repository_type = "filesystem" read_entity_retry = "0:int" mode = "development" version_number = "" force = "False:bool" core_version = "{core_version}" [VERSION_MIGRATION.migration_fcts] """ ) if is_compatible: Config.override(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION else: with pytest.raises(ConfigCoreVersionMismatched): Config.override(file_config.filename) def test_load_configuration_file_without_core_section(self): file_config = NamedTemporaryFile( """ [TAIPY] [JOB] mode = "standalone" max_nb_of_workers = "2:int" [CORE] root_folder = "./taipy/" storage_folder = ".data/" repository_type = "filesystem" read_entity_retry = "0:int" mode = "development" version_number = "" force = "False:bool" [VERSION_MIGRATION.migration_fcts] """ ) Config.load(file_config.filename) assert Config.unique_sections[CoreSection.name]._core_version == _MOCK_CORE_VERSION
|
from datetime import timedelta from taipy.config import Config from taipy.config.common.scope import Scope class TestConfig: def test_configure_csv_data_node(self): a, b, c, d, e, f = "foo", "path", True, "numpy", Scope.SCENARIO, timedelta(1) Config.configure_csv_data_node(a, b, c, d, e, f) assert len(Config.data_nodes) == 2 def test_configure_excel_data_node(self): a, b, c, d, e, f, g = "foo", "path", True, "Sheet1", "numpy", Scope.SCENARIO, timedelta(1) Config.configure_excel_data_node(a, b, c, d, e, f, g) assert len(Config.data_nodes) == 2 def test_configure_generic_data_node(self): a, b, c, d, e, f, g, h = "foo", print, print, tuple([]), tuple([]), Scope.SCENARIO, timedelta(1), "qux" Config.configure_generic_data_node(a, b, c, d, e, f, g, property=h) assert len(Config.data_nodes) == 2 def test_configure_in_memory_data_node(self): a, b, c, d, e = "foo", 0, Scope.SCENARIO, timedelta(1), "qux" Config.configure_in_memory_data_node(a, b, c, d, property=e) assert len(Config.data_nodes) == 2 def test_configure_pickle_data_node(self): a, b, c, d, e = "foo", 0, Scope.SCENARIO, timedelta(1), "path" Config.configure_pickle_data_node(a, b, c, d, path=e) assert len(Config.data_nodes) == 2 def test_configure_json_data_node(self): a, dp, ec, dc, sc, f, p = "foo", "path", "ec", "dc", Scope.SCENARIO, timedelta(1), "qux" Config.configure_json_data_node(a, dp, ec, dc, sc, f, path=p) assert len(Config.data_nodes) == 2 def test_configure_sql_table_data_node(self): a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, k = ( "foo", "user", "pwd", "db", "engine", "table_name", "port", "host", "driver", {"foo": "bar"}, "exposed_type", Scope.SCENARIO, timedelta(1), "qux", ) Config.configure_sql_table_data_node(a, b, c, d, e, f, g, h, i, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_sql_data_node(self): a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, k = ( "foo", "user", "pwd", "db", "engine", "read_query", "write_query_builder", "append_query_builder", "port", "host", "driver", {"foo": "bar"}, "exposed_type", Scope.SCENARIO, timedelta(1), "qux", ) Config.configure_sql_data_node(a, b, c, d, e, f, g, h, i, j, k, extra_args, exposed_type, scope, vp, property=k) assert len(Config.data_nodes) == 2 def test_configure_mongo_data_node(self): a, b, c, d, e, f, g, h, extra_args, scope, vp, k = ( "foo", "db_name", "collection_name", None, "user", "pwd", "host", "port", {"foo": "bar"}, Scope.SCENARIO, timedelta(1), "qux", ) Config.configure_mongo_collection_data_node(a, b, c, d, e, f, g, h, extra_args, scope, vp, property=k) assert len(Config.data_nodes) == 2
|
from unittest.mock import patch from src.taipy.core import Core from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from taipy.config import Config from tests.core.utils.named_temporary_file import NamedTemporaryFile def test_core_section(): with patch("sys.argv", ["prog"]): core = Core() core.run() assert Config.core.mode == "development" assert Config.core.version_number == _VersionManagerFactory._build_manager()._get_development_version() assert not Config.core.force core.stop() with patch("sys.argv", ["prog"]): Config.configure_core(mode="experiment", version_number="test_num", force=True) core = Core() core.run() assert Config.core.mode == "experiment" assert Config.core.version_number == "test_num" assert Config.core.force core.stop() toml_config = NamedTemporaryFile( content=""" [TAIPY] [CORE] mode = "production" version_number = "test_num_2" force = "true:bool" """ ) Config.load(toml_config.filename) with patch("sys.argv", ["prog"]): core = Core() core.run() assert Config.core.mode == "production" assert Config.core.version_number == "test_num_2" assert Config.core.force core.stop() with patch("sys.argv", ["prog", "--experiment", "test_num_3", "--no-taipy-force"]): core = Core() core.run() assert Config.core.mode == "experiment" assert Config.core.version_number == "test_num_3" assert not Config.core.force core.stop() def test_clean_config(): core_config = Config.configure_core(mode="experiment", version_number="test_num", force=True) assert Config.core is core_config core_config._clean() # Check if the instance before and after _clean() is the same assert Config.core is core_config assert core_config.mode == "development" assert core_config.version_number == "" assert core_config.force is False assert core_config.properties == {}
|
from unittest.mock import patch import pytest from src.taipy.core import Core from src.taipy.core._version._version_manager import _VersionManager from src.taipy.core.config import MigrationConfig from taipy.config.config import Config def mock_func(): pass def test_check_if_entity_property_key_used_is_predefined(caplog): with patch("sys.argv", ["prog", "--production", "1.0"]): core = Core() core.run() assert caplog.text == "" core.stop() caplog.clear() Config.unique_sections[MigrationConfig.name]._properties["_entity_owner"] = None with patch("sys.argv", ["prog", "--production", "1.0"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert ( "Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property." in caplog.text ) caplog.clear() Config.unique_sections[MigrationConfig.name]._properties["_entity_owner"] = "entity_owner" with patch("sys.argv", ["prog", "--production", "1.0"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( "Properties of MigrationConfig `VERSION_MIGRATION` cannot have `_entity_owner` as its property." ' Current value of property `_entity_owner` is "entity_owner".' ) assert expected_error_message in caplog.text def test_check_valid_version(caplog): data_nodes1 = Config.configure_data_node("data_nodes1", "pickle") Config.add_migration_function("2.0", data_nodes1, mock_func) with patch("sys.argv", ["prog", "--production", "1.0"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() assert "The target version for a migration function must be a production version." in caplog.text caplog.clear() Config.unblock_update() with patch("sys.argv", ["prog", "--production", "2.0"]): core = Core() core.run() assert caplog.text == "" core.stop() def test_check_callable_function(caplog): data_nodes1 = Config.configure_data_node("data_nodes1", "pickle") Config.add_migration_function("1.0", data_nodes1, 1) with patch("sys.argv", ["prog", "--production", "1.0"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( "The migration function of config `data_nodes1` from version 1.0 must be populated with" " Callable value. Current value of property `migration_fcts` is 1." ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function("1.0", data_nodes1, "bar") with patch("sys.argv", ["prog", "--production", "1.0"]): with pytest.raises(SystemExit): core = Core() core.run() core.stop() expected_error_message = ( "The migration function of config `data_nodes1` from version 1.0 must be populated with" ' Callable value. Current value of property `migration_fcts` is "bar".' ) assert expected_error_message in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function("1.0", data_nodes1, mock_func) with patch("sys.argv", ["prog", "--production", "1.0"]): core = Core() core.run() core.stop() def test_check_migration_from_productions_to_productions_exist(caplog): _VersionManager._set_production_version("1.0", True) _VersionManager._set_production_version("1.1", True) _VersionManager._set_production_version("1.2", True) with patch("sys.argv", ["prog", "--production", "1.0"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version "1.0" to version "1.1".' in caplog.text assert 'There is no migration function from production version "1.1" to version "1.2".' in caplog.text caplog.clear() Config.unblock_update() Config.add_migration_function("1.2", "data_nodes1", mock_func) with patch("sys.argv", ["prog", "--production", "1.0"]): core = Core() core.run() core.stop() assert 'There is no migration function from production version "1.0" to version "1.1".' in caplog.text
|
import pytest from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestConfigIdChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id="foo", storage_type="in_memory") Config.configure_scenario(id="bar", task_configs=[], additional_data_node_configs=[]) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id="bar", task_configs=[]) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( "`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'SCENARIO']" ' Current value of property `config_id` is "bar".' ) assert expected_error_message in caplog.text Config.configure_task(id="bar", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( "`bar` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK', 'SCENARIO']" ' Current value of property `config_id` is "bar".' ) assert expected_error_message in caplog.text Config.configure_task(id="foo", function=print) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 2 expected_error_message = ( "`foo` is used as the config_id of multiple configurations ['DATA_NODE', 'TASK']" ' Current value of property `config_id` is "foo".' ) assert expected_error_message in caplog.text
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import pytest from src.taipy.core.config.job_config import JobConfig from taipy.config.checker.issue_collector import IssueCollector from taipy.config.config import Config class TestJobConfigChecker: def test_check_standalone_mode(self, caplog): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_data_node(id="foo", storage_type="in_memory") Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE, max_nb_of_workers=2) Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 0 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=1) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) with pytest.raises(SystemExit): Config._collector = IssueCollector() Config.check() assert len(Config._collector.errors) == 1 expected_error_message = ( "DataNode `foo`: In-memory storage type can ONLY be used in development mode. Current" ' value of property `storage_type` is "in_memory".' ) assert expected_error_message in caplog.text
|
from src.taipy.core.config.checkers._core_section_checker import _CoreSectionChecker from src.taipy.core.config.core_section import CoreSection from taipy.config import Config from taipy.config.checker.issue_collector import IssueCollector class TestCoreSectionChecker: _CoreSectionChecker._ACCEPTED_REPOSITORY_TYPES.update(["mock_repo_type"]) def test_check_valid_repository(self): Config.configure_core(repository_type="mock_repo_type") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 Config.configure_core(repository_type="filesystem") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 Config.configure_core(repository_type="sql") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 0 def test_check_repository_type_value_wrong_str(self): Config.configure_core(repository_type="any") Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 1 assert Config._collector.warnings[0].field == CoreSection._REPOSITORY_TYPE_KEY assert Config._collector.warnings[0].value == "any" def test_check_repository_type_value_wrong_type(self): Config.configure_core(repository_type=1) Config._collector = IssueCollector() Config.check() assert len(Config._collector.warnings) == 1 assert Config._collector.warnings[0].field == CoreSection._REPOSITORY_TYPE_KEY assert Config._collector.warnings[0].value == 1
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from queue import SimpleQueue from src.taipy.core import taipy as tp from src.taipy.core.notification.core_event_consumer import CoreEventConsumerBase from src.taipy.core.notification.event import Event, EventEntityType, EventOperation from src.taipy.core.notification.notifier import Notifier from taipy.config import Config, Frequency from tests.core.utils import assert_true_after_time class AllCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.event_collected = 0 self.event_entity_type_collected: dict = {} self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.event_collected += 1 self.event_entity_type_collected[event.entity_type] = ( self.event_entity_type_collected.get(event.entity_type, 0) + 1 ) self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class ScenarioCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.scenario_event_collected = 0 self.event_operation_collected: dict = {} super().__init__(registration_id, queue) def process_event(self, event: Event): self.scenario_event_collected += 1 self.event_operation_collected[event.operation] = self.event_operation_collected.get(event.operation, 0) + 1 class TaskCreationCoreEventConsumerProcessor(CoreEventConsumerBase): def __init__(self, registration_id: str, queue: SimpleQueue): self.task_event_collected = 0 self.creation_event_operation_collected = 0 super().__init__(registration_id, queue) def process_event(self, event: Event): self.task_event_collected += 1 self.creation_event_operation_collected += 1 def test_core_event_consumer(): register_id_0, register_queue_0 = Notifier.register() all_evt_csumer_0 = AllCoreEventConsumerProcessor(register_id_0, register_queue_0) register_id_1, register_queue_1 = Notifier.register(entity_type=EventEntityType.SCENARIO) sc_evt_csumer_1 = ScenarioCoreEventConsumerProcessor(register_id_1, register_queue_1) register_id_2, register_queue_2 = Notifier.register( entity_type=EventEntityType.TASK, operation=EventOperation.CREATION ) task_creation_evt_csumer_2 = TaskCreationCoreEventConsumerProcessor(register_id_2, register_queue_2) all_evt_csumer_0.start() sc_evt_csumer_1.start() task_creation_evt_csumer_2.start() dn_config = Config.configure_data_node("dn_config") task_config = Config.configure_task("task_config", print, [dn_config]) scenario_config = Config.configure_scenario( "scenario_config", [task_config], frequency=Frequency.DAILY, sequences={"seq": [task_config]} ) # Create a scenario trigger 5 creation events scenario = tp.create_scenario(scenario_config) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 5, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.CREATION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 1, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.CREATION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) # Delete a scenario trigger 5 update events tp.delete(scenario.id) assert_true_after_time(lambda: all_evt_csumer_0.event_collected == 10, time=10) assert_true_after_time(lambda: len(all_evt_csumer_0.event_entity_type_collected) == 5, time=10) assert_true_after_time(lambda: all_evt_csumer_0.event_operation_collected[EventOperation.DELETION] == 5, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.scenario_event_collected == 2, time=10) assert_true_after_time(lambda: sc_evt_csumer_1.event_operation_collected[EventOperation.DELETION] == 1, time=10) assert_true_after_time(lambda: len(sc_evt_csumer_1.event_operation_collected) == 2, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.task_event_collected == 1, time=10) assert_true_after_time(lambda: task_creation_evt_csumer_2.creation_event_operation_collected == 1, time=10) all_evt_csumer_0.stop() sc_evt_csumer_1.stop() task_creation_evt_csumer_2.stop()
|
from queue import SimpleQueue from src.taipy.core.notification import EventEntityType, EventOperation from src.taipy.core.notification._registration import _Registration from src.taipy.core.notification._topic import _Topic def test_create_registration(): registration_0 = _Registration() assert isinstance(registration_0.registration_id, str) assert registration_0.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_0.queue, SimpleQueue) assert registration_0.queue.qsize() == 0 assert isinstance(registration_0.topic, _Topic) assert registration_0.topic.entity_type is None assert registration_0.topic.entity_id is None assert registration_0.topic.operation is None assert registration_0.topic.attribute_name is None registration_1 = _Registration( entity_type=EventEntityType.SCENARIO, entity_id="SCENARIO_scenario_id", operation=EventOperation.CREATION ) assert isinstance(registration_1.registration_id, str) assert registration_1.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_1.queue, SimpleQueue) assert registration_1.queue.qsize() == 0 assert isinstance(registration_1.topic, _Topic) assert registration_1.topic.entity_type == EventEntityType.SCENARIO assert registration_1.topic.entity_id == "SCENARIO_scenario_id" assert registration_1.topic.operation == EventOperation.CREATION assert registration_1.topic.attribute_name is None registration_2 = _Registration( entity_type=EventEntityType.SEQUENCE, entity_id="SEQUENCE_scenario_id", operation=EventOperation.UPDATE, attribute_name="tasks", ) assert isinstance(registration_2.registration_id, str) assert registration_2.registration_id.startswith(_Registration._ID_PREFIX) assert isinstance(registration_2.queue, SimpleQueue) assert registration_2.queue.qsize() == 0 assert isinstance(registration_2.topic, _Topic) assert registration_2.topic.entity_type == EventEntityType.SEQUENCE assert registration_2.topic.entity_id == "SEQUENCE_scenario_id" assert registration_2.topic.operation == EventOperation.UPDATE assert registration_2.topic.attribute_name == "tasks"
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os import pytest from src.taipy.core.cycle._cycle_fs_repository import _CycleFSRepository from src.taipy.core.cycle._cycle_sql_repository import _CycleSQLRepository from src.taipy.core.cycle.cycle import Cycle, CycleId from src.taipy.core.exceptions import ModelNotFound class TestCycleRepositories: @pytest.mark.parametrize("repo", [_CycleFSRepository, _CycleSQLRepository]) def test_save_and_load(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) obj = repository._load(cycle.id) assert isinstance(obj, Cycle) @pytest.mark.parametrize("repo", [_CycleFSRepository, _CycleSQLRepository]) def test_exists(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) assert repository._exists(cycle.id) assert not repository._exists("not-existed-cycle") @pytest.mark.parametrize("repo", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f"cycle-{i}") repository._save(cycle) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize("repo", [_CycleFSRepository, _CycleSQLRepository]) def test_load_all_with_filters(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f"cycle-{i}") cycle._name = f"cycle-{i}" repository._save(cycle) objs = repository._load_all(filters=[{"id": "cycle-2"}]) assert len(objs) == 1 @pytest.mark.parametrize("repo", [_CycleSQLRepository]) def test_delete(self, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._delete(cycle.id) with pytest.raises(ModelNotFound): repository._load(cycle.id) @pytest.mark.parametrize("repo", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_all(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f"cycle-{i}") repository._save(cycle) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize("repo", [_CycleFSRepository, _CycleSQLRepository]) def test_delete_many(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f"cycle-{i}") repository._save(cycle) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize("repo", [_CycleFSRepository, _CycleSQLRepository]) def test_search(self, cycle, repo, init_sql_repo): repository = repo() for i in range(10): cycle.id = CycleId(f"cycle-{i}") cycle.name = f"cycle-{i}" repository._save(cycle) assert len(repository._load_all()) == 10 objs = repository._search("name", "cycle-2") assert len(objs) == 1 assert isinstance(objs[0], Cycle) @pytest.mark.parametrize("repo", [_CycleFSRepository, _CycleSQLRepository]) def test_export(self, tmpdir, cycle, repo, init_sql_repo): repository = repo() repository._save(cycle) repository._export(cycle.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _CycleFSRepository else os.path.join(tmpdir.strpath, "cycle") assert os.path.exists(os.path.join(dir_path, f"{cycle.id}.json"))
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import dataclasses import pathlib from dataclasses import dataclass from typing import Any, Dict, Iterable, List, Optional, Union from src.taipy.core._manager._manager import _Manager from src.taipy.core._repository._abstract_converter import _AbstractConverter from src.taipy.core._repository._abstract_repository import _AbstractRepository from src.taipy.core._repository._filesystem_repository import _FileSystemRepository from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config @dataclass class MockModel: id: str name: str version: str def to_dict(self): return dataclasses.asdict(self) @staticmethod def from_dict(data: Dict[str, Any]): return MockModel(id=data["id"], name=data["name"], version=data["version"]) @dataclass class MockEntity: def __init__(self, id: str, name: str, version: str = None) -> None: self.id = id self.name = name if version: self._version = version else: self._version = _VersionManager._get_latest_version() class MockConverter(_AbstractConverter): @classmethod def _entity_to_model(cls, entity: MockEntity) -> MockModel: return MockModel(id=entity.id, name=entity.name, version=entity._version) @classmethod def _model_to_entity(cls, model: MockModel) -> MockEntity: return MockEntity(id=model.id, name=model.name, version=model.version) class MockRepository(_AbstractRepository): # type: ignore def __init__(self, **kwargs): self.repo = _FileSystemRepository(**kwargs, converter=MockConverter) def _to_model(self, obj: MockEntity): return MockModel(obj.id, obj.name, obj._version) def _from_model(self, model: MockModel): return MockEntity(model.id, model.name, model.version) def _load(self, entity_id: str) -> MockEntity: return self.repo._load(entity_id) def _load_all(self, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._load_all(filters) def _save(self, entity: MockEntity): return self.repo._save(entity) def _exists(self, entity_id: str) -> bool: return self.repo._exists(entity_id) def _delete(self, entity_id: str): return self.repo._delete(entity_id) def _delete_all(self): return self.repo._delete_all() def _delete_many(self, ids: Iterable[str]): return self.repo._delete_many(ids) def _delete_by(self, attribute: str, value: str): return self.repo._delete_by(attribute, value) def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[MockEntity]: return self.repo._search(attribute, value, filters) def _export(self, entity_id: str, folder_path: Union[str, pathlib.Path]): return self.repo._export(self, entity_id, folder_path) @property def _storage_folder(self) -> pathlib.Path: return pathlib.Path(Config.core.storage_folder) # type: ignore class MockManager(_Manager[MockEntity]): _ENTITY_NAME = MockEntity.__name__ _repository = MockRepository(model_type=MockModel, dir_name="foo") class TestManager: def test_save_and_fetch_model(self): m = MockEntity("uuid", "foo") MockManager._set(m) fetched_model = MockManager._get(m.id) assert m == fetched_model def test_exists(self): m = MockEntity("uuid", "foo") MockManager._set(m) assert MockManager._exists(m.id) def test_get(self): m = MockEntity("uuid", "foo") MockManager._set(m) assert MockManager._get(m.id) == m def test_get_all(self): MockManager._delete_all() objs = [] for i in range(5): m = MockEntity(f"uuid-{i}", f"Foo{i}") objs.append(m) MockManager._set(m) _objs = MockManager._get_all() assert len(_objs) == 5 def test_delete(self): m = MockEntity("uuid", "foo") MockManager._set(m) MockManager._delete(m.id) assert MockManager._get(m.id) is None def test_delete_all(self): objs = [] for i in range(5): m = MockEntity(f"uuid-{i}", f"Foo{i}") objs.append(m) MockManager._set(m) MockManager._delete_all() assert MockManager._get_all() == [] def test_delete_many(self): objs = [] for i in range(5): m = MockEntity(f"uuid-{i}", f"Foo{i}") objs.append(m) MockManager._set(m) MockManager._delete_many(["uuid-0", "uuid-1"]) assert len(MockManager._get_all()) == 3 def test_is_editable(self): m = MockEntity("uuid", "Foo") MockManager._set(m) assert MockManager._is_editable(m) def test_is_readable(self): m = MockEntity("uuid", "Foo") MockManager._set(m) assert MockManager._is_readable(m)
|
class NotifyMock: """ A shared class for testing notification on jobStatus of sequence level and scenario level "entity" can be understood as either "scenario" or "sequence". """ def __init__(self, entity): self.scenario = entity self.nb_called = 0 self.__name__ = "NotifyMock" def __call__(self, entity, job): assert entity == self.scenario if self.nb_called == 0: assert job.is_pending() if self.nb_called == 1: assert job.is_running() if self.nb_called == 2: assert job.is_finished() self.nb_called += 1 def assert_called_3_times(self): assert self.nb_called == 3 def assert_not_called(self): assert self.nb_called == 0 def reset(self): self.nb_called = 0
|
def assert_true_after_time(assertion, msg=None, time=120): from datetime import datetime from time import sleep loops = 0 start = datetime.now() while (datetime.now() - start).seconds < time: sleep(1) # Limit CPU usage try: if assertion(): return except BaseException as e: print("Raise : ", e) loops += 1 continue if msg: print(msg) assert assertion()
|
import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile("w", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, "r") as fp: return fp.read() def __del__(self): os.unlink(self.filename)
|
from unittest import mock import pytest from src.taipy.core import taipy from src.taipy.core._entity._labeled import _Labeled from taipy.config import Config, Frequency, Scope class MockOwner: label = "owner_label" def get_label(self): return self.label def test_get_label(): labeled_entity = _Labeled() with pytest.raises(NotImplementedError): labeled_entity.get_label() with pytest.raises(NotImplementedError): labeled_entity.get_simple_label() with pytest.raises(AttributeError): labeled_entity._get_label() with pytest.raises(AttributeError): labeled_entity._get_simple_label() labeled_entity.id = "id" assert labeled_entity._get_label() == "id" assert labeled_entity._get_simple_label() == "id" labeled_entity.config_id = "the config id" assert labeled_entity._get_label() == "the config id" assert labeled_entity._get_simple_label() == "the config id" labeled_entity._properties = {"name": "a name"} assert labeled_entity._get_label() == "a name" assert labeled_entity._get_simple_label() == "a name" labeled_entity.owner_id = "owner_id" with mock.patch("src.taipy.core.get") as get_mck: get_mck.return_value = MockOwner() assert labeled_entity._get_label() == "owner_label > a name" assert labeled_entity._get_simple_label() == "a name" labeled_entity._properties["label"] = "a wonderful label" assert labeled_entity._get_label() == "a wonderful label" assert labeled_entity._get_simple_label() == "a wonderful label" def mult(n1, n2): return n1 * n2 def test_get_label_complex_case(): dn1_cfg = Config.configure_data_node("dn1", scope=Scope.GLOBAL) dn2_cfg = Config.configure_data_node("dn2", scope=Scope.CYCLE) dn3_cfg = Config.configure_data_node("dn3", scope=Scope.CYCLE) dn4_cfg = Config.configure_data_node("dn4", scope=Scope.SCENARIO) dn5_cfg = Config.configure_data_node("dn5", scope=Scope.SCENARIO) tA_cfg = Config.configure_task("t_A_C", mult, [dn1_cfg, dn2_cfg], dn3_cfg) tB_cfg = Config.configure_task("t_B_S", mult, [dn3_cfg, dn4_cfg], dn5_cfg) scenario_cfg = Config.configure_scenario("scenario_cfg", [tA_cfg, tB_cfg], [], Frequency.DAILY) scenario_cfg.add_sequences( { "sequence_C": [tA_cfg], "sequence_S": [tA_cfg, tB_cfg], } ) scenario = taipy.create_scenario(scenario_cfg, name="My Name") cycle = scenario.cycle cycle.name = "Today" sequence_C = scenario.sequence_C sequence_S = scenario.sequence_S tA = scenario.t_A_C tB = scenario.t_B_S dn1 = scenario.dn1 dn2 = scenario.dn2 dn3 = scenario.dn3 dn4 = scenario.dn4 dn5 = scenario.dn5 assert cycle.get_label() == scenario.cycle.name assert cycle.get_simple_label() == scenario.cycle.name assert scenario.get_label() == "Today > My Name" assert scenario.get_simple_label() == "My Name" assert sequence_C.get_label() == "Today > My Name > sequence_C" assert sequence_C.get_simple_label() == "sequence_C" assert sequence_S.get_label() == "Today > My Name > sequence_S" assert sequence_S.get_simple_label() == "sequence_S" assert tA.get_label() == "Today > t_A_C" assert tA.get_simple_label() == "t_A_C" assert tB.get_label() == "Today > My Name > t_B_S" assert tB.get_simple_label() == "t_B_S" assert dn1.get_label() == "dn1" assert dn1.get_simple_label() == "dn1" assert dn2.get_label() == "Today > dn2" assert dn2.get_simple_label() == "dn2" assert dn3.get_label() == "Today > dn3" assert dn3.get_simple_label() == "dn3" assert dn4.get_label() == "Today > My Name > dn4" assert dn4.get_simple_label() == "dn4" assert dn5.get_label() == "Today > My Name > dn5" assert dn5.get_simple_label() == "dn5"
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from src.taipy.core._entity._entity_ids import _EntityIds class TestEntityIds: def test_add_two_entity_ids(self): entity_ids_1 = _EntityIds() entity_ids_2 = _EntityIds() entity_ids_1_address = id(entity_ids_1) entity_ids_1.data_node_ids.update(["data_node_id_1", "data_node_id_2"]) entity_ids_1.task_ids.update(["task_id_1", "task_id_2"]) entity_ids_1.job_ids.update(["job_id_1", "job_id_2"]) entity_ids_1.sequence_ids.update(["sequence_id_1", "sequence_id_2"]) entity_ids_1.scenario_ids.update(["scenario_id_1", "scenario_id_2"]) entity_ids_1.cycle_ids.update(["cycle_id_1", "cycle_id_2"]) entity_ids_2.data_node_ids.update(["data_node_id_2", "data_node_id_3"]) entity_ids_2.task_ids.update(["task_id_2", "task_id_3"]) entity_ids_2.job_ids.update(["job_id_2", "job_id_3"]) entity_ids_2.sequence_ids.update(["sequence_id_2", "sequence_id_3"]) entity_ids_2.scenario_ids.update(["scenario_id_2", "scenario_id_3"]) entity_ids_2.cycle_ids.update(["cycle_id_2", "cycle_id_3"]) entity_ids_1 += entity_ids_2 # += operator should not change the address of entity_ids_1 assert id(entity_ids_1) == entity_ids_1_address assert entity_ids_1.data_node_ids == {"data_node_id_1", "data_node_id_2", "data_node_id_3"} assert entity_ids_1.task_ids == {"task_id_1", "task_id_2", "task_id_3"} assert entity_ids_1.job_ids == {"job_id_1", "job_id_2", "job_id_3"} assert entity_ids_1.sequence_ids == {"sequence_id_1", "sequence_id_2", "sequence_id_3"} assert entity_ids_1.scenario_ids == {"scenario_id_1", "scenario_id_2", "scenario_id_3"} assert entity_ids_1.cycle_ids == {"cycle_id_1", "cycle_id_2", "cycle_id_3"}
|
import pytest from src.taipy.core.common._utils import _retry_read_entity from taipy.config import Config def test_retry_decorator(mocker): func = mocker.Mock(side_effect=Exception()) @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(Exception): decorated_func() # Called once in the normal flow and no retry # The Config.core.read_entity_retry is set to 0 at conftest.py assert Config.core.read_entity_retry == 0 assert func.call_count == 1 func.reset_mock() Config.core.read_entity_retry = 3 with pytest.raises(Exception): decorated_func() # Called once in the normal flow and 3 more times on the retry flow assert func.call_count == 4 def test_retry_decorator_exception_not_in_list(mocker): func = mocker.Mock(side_effect=KeyError()) Config.core.read_entity_retry = 3 @_retry_read_entity((Exception,)) def decorated_func(): func() with pytest.raises(KeyError): decorated_func() # Called only on the first time and not trigger retry because KeyError is not on the exceptions list assert func.called == 1
|
from src.taipy.core.common.warn_if_inputs_not_ready import _warn_if_inputs_not_ready from src.taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.config import Config def test_warn_inputs_all_not_ready(caplog): one = Config.configure_data_node("one") two = Config.configure_data_node("two") three = Config.configure_data_node("three") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text expected_outputs = [ f"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong " f"path : {input_dn.path} " for input_dn in data_nodes ] assert all([expected_output in stdout for expected_output in expected_outputs]) def test_warn_inputs_all_ready(caplog): one = Config.configure_data_node("one", default_data=1) two = Config.configure_data_node("two", default_data=2) three = Config.configure_data_node("three", default_data=3) data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}).values() _warn_if_inputs_not_ready(data_nodes) stdout = caplog.text not_expected_outputs = [ f"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong " f"path : {input_dn.path} " for input_dn in data_nodes ] assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_warn_inputs_one_ready(caplog): one = Config.configure_data_node("one", default_data=1) two = Config.configure_data_node("two") three = Config.configure_data_node("three") data_nodes = _DataManagerFactory._build_manager()._bulk_get_or_create({one, two, three}) _warn_if_inputs_not_ready(data_nodes.values()) stdout = caplog.text expected_outputs = [ f"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong " f"path : {input_dn.path} " for input_dn in [data_nodes[two], data_nodes[three]] ] not_expected_outputs = [ f"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong " f"path : {input_dn.path} " for input_dn in [data_nodes[one]] ] assert all([expected_output in stdout for expected_output in expected_outputs]) assert all([expected_output not in stdout for expected_output in not_expected_outputs]) def test_submit_task_with_input_dn_wrong_file_path(caplog): csv_dn_cfg = Config.configure_csv_data_node("wrong_csv_file_path", default_path="wrong_path.csv") excel_dn_cfg = Config.configure_excel_data_node("wrong_excel_file_path", default_path="wrong_path.xlsx") json_dn_cfg = Config.configure_json_data_node("wrong_json_file_path", default_path="wrong_path.json") pickle_dn_cfg = Config.configure_pickle_data_node("wrong_pickle_file_path", default_path="wrong_path.pickle") parquet_dn_cfg = Config.configure_parquet_data_node("wrong_parquet_file_path", default_path="wrong_path.parquet") input_dn_cfgs = [csv_dn_cfg, excel_dn_cfg, json_dn_cfg, pickle_dn_cfg, parquet_dn_cfg] dn_manager = _DataManagerFactory._build_manager() dns = [dn_manager._bulk_get_or_create([input_dn_cfg])[input_dn_cfg] for input_dn_cfg in input_dn_cfgs] _warn_if_inputs_not_ready(dns) stdout = caplog.text expected_outputs = [ f"{input_dn.id} cannot be read because it has never been written. Hint: The data node may refer to a wrong " f"path : {input_dn.path} " for input_dn in dns ] assert all([expected_output in stdout for expected_output in expected_outputs])
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os import pytest from src.taipy.core.exceptions import ModelNotFound from src.taipy.core.scenario._scenario_fs_repository import _ScenarioFSRepository from src.taipy.core.scenario._scenario_sql_repository import _ScenarioSQLRepository from src.taipy.core.scenario.scenario import Scenario, ScenarioId class TestScenarioFSRepository: @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_save_and_load(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) obj = repository._load(scenario.id) assert isinstance(obj, Scenario) @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_exists(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) assert repository._exists(scenario.id) assert not repository._exists("not-existed-scenario") @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f"scenario-{i}") repository._save(scenario) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_load_all_with_filters(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f"scenario-{i}") repository._save(scenario) objs = repository._load_all(filters=[{"id": "scenario-2"}]) assert len(objs) == 1 @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete(self, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._delete(scenario.id) with pytest.raises(ModelNotFound): repository._load(scenario.id) @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_all(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f"scenario-{i}") repository._save(scenario) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_many(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f"scenario-{i}") repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_delete_by(self, scenario, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): scenario.id = ScenarioId(f"scenario-{i}") scenario._version = f"{(i+1) // 5}.0" repository._save(scenario) objs = repository._load_all() assert len(objs) == 10 repository._delete_by("version", "1.0") assert len(repository._load_all()) == 5 @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_search(self, scenario, repo, init_sql_repo): repository = repo() for i in range(10): scenario.id = ScenarioId(f"scenario-{i}") repository._save(scenario) assert len(repository._load_all()) == 10 objs = repository._search("id", "scenario-2") assert len(objs) == 1 assert isinstance(objs[0], Scenario) objs = repository._search("id", "scenario-2", filters=[{"version": "random_version_number"}]) assert len(objs) == 1 assert isinstance(objs[0], Scenario) assert repository._search("id", "scenario-2", filters=[{"version": "non_existed_version"}]) == [] @pytest.mark.parametrize("repo", [_ScenarioFSRepository, _ScenarioSQLRepository]) def test_export(self, tmpdir, scenario, repo, init_sql_repo): repository = repo() repository._save(scenario) repository._export(scenario.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _ScenarioFSRepository else os.path.join(tmpdir.strpath, "scenario") assert os.path.exists(os.path.join(dir_path, f"{scenario.id}.json"))
|
from src.taipy.core._version._version import _Version from taipy.config.config import Config def test_create_version(): v = _Version("foo", config=Config.configure_data_node("dn")) assert v.id == "foo" assert v.config is not None
|
import os import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_fs_repository import _VersionFSRepository from src.taipy.core._version._version_sql_repository import _VersionSQLRepository from src.taipy.core.exceptions import ModelNotFound class TestVersionFSRepository: @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_save_and_load(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) obj = repository._load(_version.id) assert isinstance(obj, _Version) @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_exists(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) assert repository._exists(_version.id) assert not repository._exists("not-existed-version") @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f"_version_{i}" repository._save(_version) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_load_all_with_filters(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f"_version_{i}" _version.name = f"_version_{i}" repository._save(_version) objs = repository._load_all(filters=[{"id": "_version_2"}]) assert len(objs) == 1 @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_delete(self, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._delete(_version.id) with pytest.raises(ModelNotFound): repository._load(_version.id) @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_all(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f"_version_{i}" repository._save(_version) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_delete_many(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f"_version_{i}" repository._save(_version) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_search(self, _version, repo, init_sql_repo): repository = repo() for i in range(10): _version.id = f"_version_{i}" _version.name = f"_version_{i}" repository._save(_version) assert len(repository._load_all()) == 10 objs = repository._search("id", "_version_2") assert len(objs) == 1 assert isinstance(objs[0], _Version) @pytest.mark.parametrize("repo", [_VersionFSRepository, _VersionSQLRepository]) def test_export(self, tmpdir, _version, repo, init_sql_repo): repository = repo() repository._save(_version) repository._export(_version.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _VersionFSRepository else os.path.join(tmpdir.strpath, "version") assert os.path.exists(os.path.join(dir_path, f"{_version.id}.json"))
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import pytest from src.taipy.core._version._version import _Version from src.taipy.core._version._version_manager import _VersionManager from taipy.config.config import Config def test_save_and_get_version_entity(tmpdir): _VersionManager._repository.base_path = tmpdir assert len(_VersionManager._get_all()) == 0 version = _Version(id="foo", config=Config._applied_config) _VersionManager._get_or_create(id="foo", force=False) version_1 = _VersionManager._get(version.id) assert version_1.id == version.id assert Config._serializer._str(version_1.config) == Config._serializer._str(version.config) assert len(_VersionManager._get_all()) == 1 assert _VersionManager._get(version.id) == version
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from unittest import mock import pytest from src.taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _JobDispatcher, _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator import _Orchestrator from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.exceptions.exceptions import OrchestratorNotBuilt from taipy.config import Config def test_build_orchestrator(): _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator dispatcher = _OrchestratorFactory._build_dispatcher() assert isinstance(dispatcher, _JobDispatcher) assert isinstance(_OrchestratorFactory._dispatcher, _JobDispatcher) _OrchestratorFactory._orchestrator = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is not None with mock.patch( "src.taipy.core._orchestrator._orchestrator_factory._OrchestratorFactory._build_dispatcher" ) as build_dispatcher, mock.patch( "src.taipy.core._orchestrator._orchestrator._Orchestrator.initialize" ) as initialize: orchestrator = _OrchestratorFactory._build_orchestrator() assert orchestrator == _Orchestrator assert _OrchestratorFactory._orchestrator == _Orchestrator build_dispatcher.assert_not_called() initialize.assert_called_once() def test_build_development_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._orchestrator = None _OrchestratorFactory._dispatcher = None assert _OrchestratorFactory._orchestrator is None assert _OrchestratorFactory._dispatcher is None with pytest.raises(OrchestratorNotBuilt): _OrchestratorFactory._build_dispatcher() _OrchestratorFactory._build_orchestrator() assert _OrchestratorFactory._orchestrator is not None assert _OrchestratorFactory._dispatcher is None _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) def test_build_standalone_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher) assert not isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2 _OrchestratorFactory._dispatcher._nb_available_workers = 1 _OrchestratorFactory._build_dispatcher(force_restart=False) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 1 _OrchestratorFactory._build_dispatcher(force_restart=True) assert _OrchestratorFactory._dispatcher.is_running() assert _OrchestratorFactory._dispatcher._nb_available_workers == 2
|
import multiprocessing from concurrent.futures import ProcessPoolExecutor from functools import partial from unittest import mock from unittest.mock import MagicMock from pytest import raises from src.taipy.core import DataNodeId, JobId, TaskId from src.taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher from src.taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory from src.taipy.core.config.job_config import JobConfig from src.taipy.core.data._data_manager import _DataManager from src.taipy.core.job.job import Job from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.task.task import Task from taipy.config.config import Config from tests.core.utils import assert_true_after_time def execute(lock): with lock: ... return None def _error(): raise RuntimeError("Something bad has happened") def test_build_development_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert isinstance(dispatcher, _DevelopmentJobDispatcher) assert dispatcher._nb_available_workers == 1 with raises(NotImplementedError): assert dispatcher.start() assert dispatcher.is_running() with raises(NotImplementedError): dispatcher.stop() def test_build_standalone_job_dispatcher(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) _OrchestratorFactory._build_dispatcher() dispatcher = _OrchestratorFactory._dispatcher assert not isinstance(dispatcher, _DevelopmentJobDispatcher) assert isinstance(dispatcher, _StandaloneJobDispatcher) assert isinstance(dispatcher._executor, ProcessPoolExecutor) assert dispatcher._nb_available_workers == 2 assert_true_after_time(dispatcher.is_running) dispatcher.stop() dispatcher.join() assert_true_after_time(lambda: not dispatcher.is_running()) def test_can_execute_2_workers(): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) m = multiprocessing.Manager() lock = m.Lock() task_id = TaskId("task_id1") output = list(_DataManager._bulk_get_or_create([Config.configure_data_node("input1", default_data=21)]).values()) _OrchestratorFactory._build_dispatcher() task = Task( config_id="name", properties={}, input=[], function=partial(execute, lock), output=output, id=task_id, ) job_id = JobId("id1") job = Job(job_id, task, "submit_id", task.id) dispatcher = _StandaloneJobDispatcher(_OrchestratorFactory._orchestrator) with lock: assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() dispatcher._dispatch(job) assert not dispatcher._can_execute() assert_true_after_time(lambda: dispatcher._can_execute()) def test_can_execute_synchronous(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId("task_id1") task = Task(config_id="name", properties={}, input=[], function=print, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job_id = JobId("id1") job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher assert dispatcher._can_execute() dispatcher._dispatch(job) assert dispatcher._can_execute() def test_exception_in_user_function(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId("task_id1") job_id = JobId("id1") task = Task(config_id="name", properties={}, input=[], function=_error, output=[], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher dispatcher._dispatch(job) assert job.is_failed() assert 'RuntimeError("Something bad has happened")' in str(job.stacktrace[0]) def test_exception_in_writing_data(): Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE) _OrchestratorFactory._build_dispatcher() task_id = TaskId("task_id1") job_id = JobId("id1") output = MagicMock() output.id = DataNodeId("output_id") output.config_id = "my_raising_datanode" output._is_in_cache = False output.write.side_effect = ValueError() task = Task(config_id="name", properties={}, input=[], function=print, output=[output], id=task_id) submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX) job = Job(job_id, task, submission.id, task.id) dispatcher = _OrchestratorFactory._dispatcher with mock.patch("src.taipy.core.data._data_manager._DataManager._get") as get: get.return_value = output dispatcher._dispatch(job) assert job.is_failed() assert "node" in job.stacktrace[0]
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from src.taipy.core.data import InMemoryDataNode from src.taipy.core.data._data_manager_factory import _DataManagerFactory from src.taipy.core.task._task_model import _TaskModel from taipy.config.common.scope import Scope def test_none_properties_attribute_compatible(): model = _TaskModel.from_dict( { "id": "id", "config_id": "config_id", "parent_id": "owner_id", "parent_ids": ["parent_id"], "input_ids": ["input_id"], "function_name": "function_name", "function_module": "function_module", "output_ids": ["output_id"], "version": "latest", "skippable": False, } ) assert len(model.properties) == 0 def test_skippable_compatibility_with_non_existing_output(): model = _TaskModel.from_dict( { "id": "id", "config_id": "config_id", "owner_id": "owner_id", "parent_ids": ["parent_id"], "input_ids": ["input_id"], "function_name": "function_name", "function_module": "function_module", "output_ids": ["output_id"], "version": "latest", "skippable": False, } ) assert not model.skippable def test_skippable_compatibility_with_no_output(): model = _TaskModel.from_dict( { "id": "id", "config_id": "config_id", "owner_id": "owner_id", "parent_ids": ["parent_id"], "input_ids": ["input_id"], "function_name": "function_name", "function_module": "function_module", "output_ids": [], "version": "latest", "skippable": False, } ) assert not model.skippable def test_skippable_compatibility_with_one_output(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode("cfg_id", Scope.SCENARIO, id="dn_id")) model = _TaskModel.from_dict( { "id": "id", "config_id": "config_id", "owner_id": "owner_id", "parent_ids": ["parent_id"], "input_ids": ["input_id"], "function_name": "function_name", "function_module": "function_module", "output_ids": ["dn_id"], "version": "latest", "skippable": True, } ) assert model.skippable def test_skippable_compatibility_with_many_outputs(): manager = _DataManagerFactory._build_manager() manager._set(InMemoryDataNode("cfg_id", Scope.SCENARIO, id="dn_id")) manager._set(InMemoryDataNode("cfg_id_2", Scope.SCENARIO, id="dn_2_id")) model = _TaskModel.from_dict( { "id": "id", "config_id": "config_id", "owner_id": "owner_id", "parent_ids": ["parent_id"], "input_ids": ["input_id"], "function_name": "function_name", "function_module": "function_module", "output_ids": ["dn_id", "dn_2_id"], "version": "latest", "skippable": True, } ) assert model.skippable
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from src.taipy.core.sequence._sequence_converter import _SequenceConverter from src.taipy.core.sequence.sequence import Sequence from src.taipy.core.task.task import Task def test_entity_to_model(sequence): sequence_model_1 = _SequenceConverter._entity_to_model(sequence) expected_sequence_model_1 = { "id": "sequence_id", "owner_id": "owner_id", "parent_ids": ["parent_id_1", "parent_id_2"], "properties": {}, "tasks": [], "subscribers": [], "version": "random_version_number", } sequence_model_1["parent_ids"] = sorted(sequence_model_1["parent_ids"]) assert sequence_model_1 == expected_sequence_model_1 task_1 = Task("task_1", {}, print) task_2 = Task("task_2", {}, print) sequence_2 = Sequence( {"name": "sequence_2"}, [task_1, task_2], "SEQUENCE_sq_1_SCENARIO_sc", "SCENARIO_sc", ["SCENARIO_sc"], [], "random_version", ) sequence_model_2 = _SequenceConverter._entity_to_model(sequence_2) expected_sequence_model_2 = { "id": "SEQUENCE_sq_1_SCENARIO_sc", "owner_id": "SCENARIO_sc", "parent_ids": ["SCENARIO_sc"], "properties": {"name": "sequence_2"}, "tasks": [task_1.id, task_2.id], "subscribers": [], "version": "random_version", } assert sequence_model_2 == expected_sequence_model_2
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from src.taipy.core.data.data_node import DataNode from src.taipy.core.data.in_memory import InMemoryDataNode from taipy.config.common.scope import Scope class FakeDataNode(InMemoryDataNode): read_has_been_called = 0 write_has_been_called = 0 def __init__(self, config_id, **kwargs): scope = kwargs.pop("scope", Scope.SCENARIO) super().__init__(config_id=config_id, scope=scope, **kwargs) def _read(self, query=None): self.read_has_been_called += 1 def _write(self, data): self.write_has_been_called += 1 @classmethod def storage_type(cls) -> str: return "fake_inmemory" write = DataNode.write # Make sure that the writing behavior comes from DataNode class FakeDataframeDataNode(DataNode): COLUMN_NAME_1 = "a" COLUMN_NAME_2 = "b" def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = default_data_frame def _read(self): return self.data @classmethod def storage_type(cls) -> str: return "fake_df_dn" class FakeNumpyarrayDataNode(DataNode): def __init__(self, config_id, default_array, **kwargs): super().__init__(config_id, **kwargs) self.data = default_array def _read(self): return self.data @classmethod def storage_type(cls) -> str: return "fake_np_dn" class FakeListDataNode(DataNode): class Row: def __init__(self, value): self.value = value def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [self.Row(i) for i in range(10)] def _read(self): return self.data @classmethod def storage_type(cls) -> str: return "fake_list_dn" class CustomClass: def __init__(self, a, b): self.a = a self.b = b class FakeCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = [CustomClass(i, i * 2) for i in range(10)] def _read(self): return self.data class FakeMultiSheetExcelDataFrameDataNode(DataNode): def __init__(self, config_id, default_data_frame, **kwargs): super().__init__(config_id, **kwargs) self.data = { "Sheet1": default_data_frame, "Sheet2": default_data_frame, } def _read(self): return self.data class FakeMultiSheetExcelCustomDataNode(DataNode): def __init__(self, config_id, **kwargs): super().__init__(config_id, **kwargs) self.data = { "Sheet1": [CustomClass(i, i * 2) for i in range(10)], "Sheet2": [CustomClass(i, i * 2) for i in range(10)], } def _read(self): return self.data
|
import os import pytest from src.taipy.core.data._data_fs_repository import _DataFSRepository from src.taipy.core.data._data_sql_repository import _DataSQLRepository from src.taipy.core.data.data_node import DataNode, DataNodeId from src.taipy.core.exceptions import ModelNotFound class TestDataNodeRepository: @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_save_and_load(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) obj = repository._load(data_node.id) assert isinstance(obj, DataNode) @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_exists(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) assert repository._exists(data_node.id) assert not repository._exists("not-existed-data-node") @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_load_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f"data_node-{i}") repository._save(data_node) data_nodes = repository._load_all() assert len(data_nodes) == 10 @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_load_all_with_filters(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f"data_node-{i}") data_node.owner_id = f"task-{i}" repository._save(data_node) objs = repository._load_all(filters=[{"owner_id": "task-2"}]) assert len(objs) == 1 @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_delete(self, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._delete(data_node.id) with pytest.raises(ModelNotFound): repository._load(data_node.id) @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_delete_all(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f"data_node-{i}") repository._save(data_node) assert len(repository._load_all()) == 10 repository._delete_all() assert len(repository._load_all()) == 0 @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_delete_many(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f"data_node-{i}") repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 ids = [x.id for x in objs[:3]] repository._delete_many(ids) assert len(repository._load_all()) == 7 @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_delete_by(self, data_node, repo, init_sql_repo): repository = repo() # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): data_node.id = DataNodeId(f"data_node-{i}") data_node._version = f"{(i+1) // 5}.0" repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 repository._delete_by("version", "1.0") assert len(repository._load_all()) == 5 @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_search(self, data_node, repo, init_sql_repo): repository = repo() for i in range(10): data_node.id = DataNodeId(f"data_node-{i}") data_node.owner_id = f"task-{i}" repository._save(data_node) assert len(repository._load_all()) == 10 objs = repository._search("owner_id", "task-2") assert len(objs) == 1 assert isinstance(objs[0], DataNode) objs = repository._search("owner_id", "task-2", filters=[{"version": "random_version_number"}]) assert len(objs) == 1 assert isinstance(objs[0], DataNode) assert repository._search("owner_id", "task-2", filters=[{"version": "non_existed_version"}]) == [] @pytest.mark.parametrize("repo", [_DataFSRepository, _DataSQLRepository]) def test_export(self, tmpdir, data_node, repo, init_sql_repo): repository = repo() repository._save(data_node) repository._export(data_node.id, tmpdir.strpath) dir_path = repository.dir_path if repo == _DataFSRepository else os.path.join(tmpdir.strpath, "data_node") assert os.path.exists(os.path.join(dir_path, f"{data_node.id}.json"))
|
import pytest from src.taipy.core.data.data_node_id import DataNodeId from src.taipy.core.data.in_memory import InMemoryDataNode from src.taipy.core.exceptions.exceptions import NoData from taipy.config.common.scope import Scope from taipy.config.exceptions.exceptions import InvalidConfigurationId class TestInMemoryDataNodeEntity: def test_create(self): dn = InMemoryDataNode( "foobar_bazy", Scope.SCENARIO, DataNodeId("id_uio"), "owner_id", properties={"default_data": "In memory Data Node", "name": "my name"}, ) assert isinstance(dn, InMemoryDataNode) assert dn.storage_type() == "in_memory" assert dn.config_id == "foobar_bazy" assert dn.scope == Scope.SCENARIO assert dn.id == "id_uio" assert dn.name == "my name" assert dn.owner_id == "owner_id" assert dn.last_edit_date is not None assert dn.job_ids == [] assert dn.is_ready_for_reading assert dn.read() == "In memory Data Node" dn_2 = InMemoryDataNode("foo", Scope.SCENARIO) assert dn_2.last_edit_date is None assert not dn_2.is_ready_for_reading with pytest.raises(InvalidConfigurationId): InMemoryDataNode("foo bar", Scope.SCENARIO, DataNodeId("dn_id")) def test_get_user_properties(self): dn = InMemoryDataNode("foo", Scope.SCENARIO, properties={"default_data": 1, "foo": "bar"}) assert dn._get_user_properties() == {"foo": "bar"} def test_read_and_write(self): no_data_dn = InMemoryDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id")) with pytest.raises(NoData): assert no_data_dn.read() is None no_data_dn.read_or_raise() in_mem_dn = InMemoryDataNode("foo", Scope.SCENARIO, properties={"default_data": "bar"}) assert isinstance(in_mem_dn.read(), str) assert in_mem_dn.read() == "bar" in_mem_dn.properties["default_data"] = "baz" # this modifies the default data value but not the data itself assert in_mem_dn.read() == "bar" in_mem_dn.write("qux") assert in_mem_dn.read() == "qux" in_mem_dn.write(1998) assert isinstance(in_mem_dn.read(), int) assert in_mem_dn.read() == 1998
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from datetime import datetime from time import sleep from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus from src.taipy.core.task.task import Task def test_create_submission(scenario): submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id, scenario._ID_PREFIX) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(): submission_manager = _SubmissionManagerFactory._build_manager() assert submission_manager._get("random_submission_id") is None submission_1 = submission_manager._create("entity_id", "ENTITY_TYPE") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == "entity_id" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(): submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission("entity_id", "submission_id", version=version_manager._get_latest_version())) for version_name in ["abc", "xyz"]: for i in range(10): submission_manager._set( Submission("entity_id", f"submission_{version_name}_{i}", version=f"{version_name}") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version("xyz") version_manager._set_experiment_version("abc") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all("abc")) == 10 assert len(submission_manager._get_all("xyz")) == 10 def test_get_latest_submission(): task_1 = Task("task_config_1", {}, print, id="task_id_1") task_2 = Task("task_config_2", {}, print, id="task_id_2") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(): submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission("entity_id", "submission_id") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission("entity_id", f"submission_{i}")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0
|
from datetime import datetime from time import sleep from src.taipy.core import Task from src.taipy.core._repository.db._sql_connection import _SQLConnection from src.taipy.core._version._version_manager_factory import _VersionManagerFactory from src.taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory from src.taipy.core.submission.submission import Submission from src.taipy.core.submission.submission_status import SubmissionStatus def init_managers(): _VersionManagerFactory._build_manager()._delete_all() _SubmissionManagerFactory._build_manager()._delete_all() def test_create_submission(scenario, init_sql_repo): init_managers() submission_1 = _SubmissionManagerFactory._build_manager()._create(scenario.id, scenario._ID_PREFIX) assert submission_1.id is not None assert submission_1.entity_id == scenario.id assert submission_1.jobs == [] assert isinstance(submission_1.creation_date, datetime) assert submission_1._submission_status == SubmissionStatus.SUBMITTED def test_get_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create("entity_id", "ENTITY_TYPE") submission_2 = submission_manager._get(submission_1.id) assert submission_1.id == submission_2.id assert submission_1.entity_id == submission_2.entity_id == "entity_id" assert submission_1.jobs == submission_2.jobs assert submission_1.creation_date == submission_2.creation_date assert submission_1.submission_status == submission_2.submission_status def test_get_all_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() version_manager = _VersionManagerFactory._build_manager() submission_manager._set(Submission("entity_id", "submission_id", version=version_manager._get_latest_version())) for version_name in ["abc", "xyz"]: for i in range(10): submission_manager._set( Submission("entity_id", f"submission_{version_name}_{i}", version=f"{version_name}") ) assert len(submission_manager._get_all()) == 1 version_manager._set_experiment_version("xyz") version_manager._set_experiment_version("abc") assert len(submission_manager._get_all()) == 10 assert len(submission_manager._get_all("abc")) == 10 assert len(submission_manager._get_all("xyz")) == 10 def test_get_latest_submission(init_sql_repo): init_managers() task_1 = Task("task_config_1", {}, print, id="task_id_1") task_2 = Task("task_config_2", {}, print, id="task_id_2") submission_manager = _SubmissionManagerFactory._build_manager() submission_1 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) is None sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_2 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_1 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_3 = submission_manager._create(task_1.id, task_1._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_2 sleep(0.01) # Comparison is based on time, precision on Windows is not enough important submission_4 = submission_manager._create(task_2.id, task_2._ID_PREFIX) assert submission_manager._get_latest(task_1) == submission_3 assert submission_manager._get_latest(task_2) == submission_4 def test_delete_submission(init_sql_repo): init_managers() submission_manager = _SubmissionManagerFactory._build_manager() submission = Submission("entity_id", "submission_id") submission_manager._set(submission) for i in range(10): submission_manager._set(Submission("entity_id", f"submission_{i}")) assert len(submission_manager._get_all()) == 11 assert isinstance(submission_manager._get(submission.id), Submission) submission_manager._delete(submission.id) assert len(submission_manager._get_all()) == 10 assert submission_manager._get(submission.id) is None submission_manager._delete_all() assert len(submission_manager._get_all()) == 0
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os import pathlib from unittest import TestCase, mock from src.taipy.logger._taipy_logger import _TaipyLogger class TestTaipyLogger(TestCase): def test_taipy_logger(self): _TaipyLogger._get_logger().info("baz") _TaipyLogger._get_logger().debug("qux") def test_taipy_logger_configured_by_file(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), "logger.conf") with mock.patch.dict(os.environ, {"TAIPY_LOGGER_CONFIG_PATH": path}): _TaipyLogger._get_logger().info("baz") _TaipyLogger._get_logger().debug("qux")
|
import os import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.named_temporary_file import NamedTemporaryFile config_from_filename = NamedTemporaryFile( """ [TAIPY] custom_property_not_overwritten = true custom_property_overwritten = 10 """ ) config_from_environment = NamedTemporaryFile( """ [TAIPY] custom_property_overwritten = 11 """ ) def test_load_from_environment_overwrite_load_from_filename(): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 11 os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) def test_block_load_from_environment_overwrite_load_from_filename(): Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 # The Config.load is failed to override
|
import pytest from src.taipy.config._config import _Config from src.taipy.config._config_comparator._config_comparator import _ConfigComparator from src.taipy.config._serializer._toml_serializer import _TomlSerializer from src.taipy.config.checker.issue_collector import IssueCollector from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest @pytest.fixture(scope="function", autouse=True) def reset(): reset_configuration_singleton() register_test_sections() def reset_configuration_singleton(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = _Config() Config._env_file_config = _Config() Config._applied_config = _Config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() Config._comparator = _ConfigComparator() def register_test_sections(): Config._register_default(UniqueSectionForTest("default_attribute")) Config.configure_unique_section_for_tests = UniqueSectionForTest._configure Config.unique_section_name = Config.unique_sections[UniqueSectionForTest.name] Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop="default_prop", prop_int=0)) Config.configure_section_for_tests = SectionForTest._configure Config.section_name = Config.sections[SectionForTest.name]
|
import os from unittest import mock import pytest from src.taipy.config.exceptions.exceptions import InvalidConfigurationId from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class WrongUniqueSection(UniqueSectionForTest): name = "1wrong_id" class WrongSection(SectionForTest): name = "correct_name" def test_section_uses_valid_id(): with pytest.raises(InvalidConfigurationId): WrongUniqueSection(attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("wrong id", attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("1wrong_id", attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("wrong_@id", attribute="foo") def test_templated_properties_are_replaced(): with mock.patch.dict(os.environ, {"foo": "bar", "baz": "1"}): u_sect = UniqueSectionForTest(attribute="attribute", tpl_property="ENV[foo]") assert u_sect.tpl_property == "bar" sect = SectionForTest(id="my_id", attribute="attribute", tpl_property="ENV[baz]:int") assert sect.tpl_property == 1
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from src.taipy.config.config import Config from src.taipy.config.global_app.global_app_config import GlobalAppConfig from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 1 assert default_config._unique_sections[UniqueSectionForTest.name] is not None assert default_config._unique_sections[UniqueSectionForTest.name].attribute == "default_attribute" assert default_config._sections is not None assert len(default_config._sections) == 1 _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) def test_register_default_configuration(): Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop1="prop1")) # Replace the first default section Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop2="prop2")) default_section = Config.sections[SectionForTest.name][Section._DEFAULT_KEY] assert len(default_section.properties) == 1 assert default_section.prop2 == "prop2" assert default_section.prop1 is None
|
import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import LoadingError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_node_can_not_appear_twice(): config = NamedTemporaryFile( """ [unique_section_name] attribute = "my_attribute" [unique_section_name] attribute = "other_attribute" """ ) with pytest.raises(LoadingError, match="Can not load configuration"): Config.load(config.filename) def test_skip_configuration_outside_nodes(): config = NamedTemporaryFile( """ foo = "bar" """ ) Config.load(config.filename) assert Config.global_config.foo is None
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from src.taipy.config._config import _Config from src.taipy.config.checker._checker import _Checker class TestDefaultConfigChecker: def test_check_default_config(self): config = _Config._default_config() collector = _Checker._check(config) assert len(collector._errors) == 0 assert len(collector._infos) == 0 assert len(collector._warnings) == 0
|
from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class TestIssueCollector: def test_add_error(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_error("field", "value", "message", "checker") assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") collector._add_error("field", "value", "message", "checker") assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") def test_add_warning(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_warning("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 1 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") collector._add_warning("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 2 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") def test_add_info(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_info("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 1 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") collector._add_info("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 2 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") def test_all(self): collector = IssueCollector() collector._add_info("foo", "bar", "baz", "qux") assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_warning("foo2", "bar2", "baz2", "qux2") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_warning("foo3", "bar3", "baz3", "qux3") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "foo3", "bar3", "baz3", "qux3") assert collector.all[2] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_info("field", "value", "message", "checker") collector._add_error("field", "value", "message", "checker") assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[2] == Issue(IssueCollector._WARNING_LEVEL, "foo3", "bar3", "baz3", "qux3") assert collector.all[3] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") assert collector.all[4] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker")
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os from unittest import mock from unittest.mock import MagicMock from src.taipy.config import Config from src.taipy.config.checker._checker import _Checker from src.taipy.config.checker.issue_collector import IssueCollector from tests.config.utils.checker_for_tests import CheckerForTest def test_register_checker(): checker = CheckerForTest checker._check = MagicMock() _Checker.add_checker(checker) Config.check() checker._check.assert_called_once()
|
import logging from unittest import mock from src.taipy.config._config import _Config from src.taipy.config.checker._checkers._config_checker import _ConfigChecker from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class MyCustomChecker(_ConfigChecker): def _check(self) -> IssueCollector: # type: ignore pass def test__error(): with mock.patch.object(logging.Logger, "error"): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._error("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._error("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.errors[1] == Issue(IssueCollector._ERROR_LEVEL, "foo", "bar", "baz", "MyCustomChecker") def test__warning(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._warning("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.warnings) == 1 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._warning("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.warnings) == 2 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.warnings[1] == Issue(IssueCollector._WARNING_LEVEL, "foo", "bar", "baz", "MyCustomChecker") def test__info(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._info("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.infos) == 1 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._info("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.infos) == 2 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.infos[1] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "MyCustomChecker")
|
from src.taipy.config import IssueCollector from src.taipy.config.checker._checkers._config_checker import _ConfigChecker class CheckerForTest(_ConfigChecker): def _check(self) -> IssueCollector: return self._collector
|
from copy import copy from typing import Any, Dict, List, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from .section_for_tests import SectionForTest class SectionOfSectionsListForTest(Section): name = "list_section_name" _MY_ATTRIBUTE_KEY = "attribute" _SECTIONS_LIST_KEY = "sections_list" def __init__(self, id: str, attribute: Any = None, sections_list: List = None, **properties): self._attribute = attribute self._sections_list = sections_list if sections_list else [] super().__init__(id, **properties) def __copy__(self): return SectionOfSectionsListForTest( self.id, self._attribute, copy(self._sections_list), **copy(self._properties) ) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val @property def sections_list(self): return list(self._sections_list) @sections_list.setter # type: ignore @_ConfigBlocker._check() def sections_list(self, val): self._sections_list = val def _clean(self): self._attribute = None self._sections_list = [] self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute if self._sections_list: as_dict[self._SECTIONS_LIST_KEY] = self._sections_list as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) section_configs = config._sections.get(SectionForTest.name, None) or [] # type: ignore sections_list = [] if inputs_as_str := as_dict.pop(cls._SECTIONS_LIST_KEY, None): for section_id in inputs_as_str: if section_id in section_configs: sections_list.append(section_configs[section_id]) else: sections_list.append(section_id) return SectionOfSectionsListForTest(id=id, attribute=attribute, sections_list=sections_list, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._sections_list = as_dict.pop(self._SECTIONS_LIST_KEY, self._sections_list) if self._sections_list is None and default_section: self._sections_list = default_section._sections_list self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, sections_list: List = None, **properties): section = SectionOfSectionsListForTest(id, attribute, sections_list, **properties) Config._register(section) return Config.sections[SectionOfSectionsListForTest.name][id]
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile("w", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, "r") as fp: return fp.read() def __del__(self): os.unlink(self.filename)
|
from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker class SectionForTest(Section): name = "section_name" _MY_ATTRIBUTE_KEY = "attribute" def __init__(self, id: str, attribute: Any = None, **properties): self._attribute = attribute super().__init__(id, **properties) def __copy__(self): return SectionForTest(self.id, self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return SectionForTest(id=id, attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, **properties): section = SectionForTest(id, attribute, **properties) Config._register(section) return Config.sections[SectionForTest.name][id]
|
from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from src.taipy.config.unique_section import UniqueSection class UniqueSectionForTest(UniqueSection): name = "unique_section_name" _MY_ATTRIBUTE_KEY = "attribute" def __init__(self, attribute: str = None, **properties): self._attribute = attribute super().__init__(**properties) def __copy__(self): return UniqueSectionForTest(self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, None) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return UniqueSectionForTest(attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(attribute: str, **properties): section = UniqueSectionForTest(attribute, **properties) Config._register(section) return Config.unique_sections[UniqueSectionForTest.name]
|
import pytest from src.taipy.config.common._validate_id import _validate_id from src.taipy.config.exceptions.exceptions import InvalidConfigurationId class TestId: def test_validate_id(self): s = _validate_id("foo") assert s == "foo" with pytest.raises(InvalidConfigurationId): _validate_id("1foo") with pytest.raises(InvalidConfigurationId): _validate_id("foo bar") with pytest.raises(InvalidConfigurationId): _validate_id("foo/foo$") with pytest.raises(InvalidConfigurationId): _validate_id("") with pytest.raises(InvalidConfigurationId): _validate_id(" ") with pytest.raises(InvalidConfigurationId): _validate_id("class") with pytest.raises(InvalidConfigurationId): _validate_id("def") with pytest.raises(InvalidConfigurationId): _validate_id("with") with pytest.raises(InvalidConfigurationId): _validate_id("CYCLE") with pytest.raises(InvalidConfigurationId): _validate_id("SCENARIO") with pytest.raises(InvalidConfigurationId): _validate_id("SEQUENCE") with pytest.raises(InvalidConfigurationId): _validate_id("TASK") with pytest.raises(InvalidConfigurationId): _validate_id("DATANODE")
|
import pytest from src.taipy.config.common.scope import Scope def test_scope(): # Test __ge__ method assert Scope.GLOBAL >= Scope.GLOBAL assert Scope.GLOBAL >= Scope.CYCLE assert Scope.CYCLE >= Scope.CYCLE assert Scope.GLOBAL >= Scope.SCENARIO assert Scope.CYCLE >= Scope.SCENARIO assert Scope.SCENARIO >= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO >= "testing string" # Test __gt__ method assert Scope.GLOBAL > Scope.CYCLE assert Scope.GLOBAL > Scope.SCENARIO assert Scope.CYCLE > Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO > "testing string" # Test __le__ method assert Scope.GLOBAL <= Scope.GLOBAL assert Scope.CYCLE <= Scope.GLOBAL assert Scope.CYCLE <= Scope.CYCLE assert Scope.SCENARIO <= Scope.GLOBAL assert Scope.SCENARIO <= Scope.CYCLE assert Scope.SCENARIO <= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO <= "testing string" # Test __lt__ method assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.CYCLE with pytest.raises(TypeError): assert Scope.SCENARIO < "testing string"
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import argparse import re import sys import pytest from src.taipy._cli._base_cli import _CLI if sys.version_info >= (3, 10): argparse_options_str = "options:" else: argparse_options_str = "optional arguments:" def preprocess_stdout(stdout): stdout = stdout.replace("\n", " ").replace("\t", " ") return re.sub(" +", " ", stdout) def remove_subparser(name: str): """Remove a subparser from argparse.""" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope="function") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler="resolve") _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield def test_subparser(capfd): subcommand_1 = _CLI._add_subparser("subcommand_1", help="subcommand_1 help") subcommand_1.add_argument("--foo", "-f", help="foo help") subcommand_1.add_argument("--bar", "-b", help="bar help") subcommand_2 = _CLI._add_subparser("subcommand_2", help="subcommand_2 help") subcommand_2.add_argument("--doo", "-d", help="doo help") subcommand_2.add_argument("--baz", "-z", help="baz help") expected_subcommand_1_help_message = f"""subcommand_1 [-h] [--foo FOO] [--bar BAR] {argparse_options_str} -h, --help show this help message and exit --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help """ subcommand_1.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_1_help_message) in preprocess_stdout(stdout) expected_subcommand_2_help_message = f"""subcommand_2 [-h] [--doo DOO] [--baz BAZ] {argparse_options_str} -h, --help show this help message and exit --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help """ subcommand_2.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_2_help_message) in preprocess_stdout(stdout) def test_duplicate_subcommand(): subcommand_1 = _CLI._add_subparser("subcommand_1", help="subcommand_1 help") subcommand_1.add_argument("--foo", "-f", help="foo help") subcommand_2 = _CLI._add_subparser("subcommand_1", help="subcommand_2 help") subcommand_2.add_argument("--bar", "-b", help="bar help") # The title of subcommand_2 is duplicated with subcommand_1, and therefore # there will be no new subcommand created assert len(_CLI._sub_taipyparsers) == 1 def test_groupparser(capfd): group_1 = _CLI._add_groupparser("group_1", "group_1 desc") group_1.add_argument("--foo", "-f", help="foo help") group_1.add_argument("--bar", "-b", help="bar help") group_2 = _CLI._add_groupparser("group_2", "group_2 desc") group_2.add_argument("--doo", "-d", help="doo help") group_2.add_argument("--baz", "-z", help="baz help") expected_help_message = """ group_1: group_1 desc --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help group_2: group_2 desc --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help """.strip() _CLI._parser.print_help() stdout, _ = capfd.readouterr() assert expected_help_message in stdout def test_duplicate_group(): group_1 = _CLI._add_groupparser("group_1", "group_1 desc") group_1.add_argument("--foo", "-f", help="foo help") group_2 = _CLI._add_groupparser("group_1", "group_2 desc") group_2.add_argument("--bar", "-b", help="bar help") # The title of group_2 is duplicated with group_1, and therefore # there will be no new group created assert len(_CLI._arg_groups) == 1
|
import pytest from src.taipy.config.common._classproperty import _Classproperty class TestClassProperty: def test_class_property(self): class TestClass: @_Classproperty def test_property(cls): return "test_property" assert TestClass.test_property == "test_property" assert TestClass().test_property == "test_property" with pytest.raises(TypeError): TestClass.test_property()
|
import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked def test_global_config_with_env_variable_value(): with mock.patch.dict(os.environ, {"FOO": "bar", "BAZ": "qux"}): Config.configure_global_app(foo="ENV[FOO]", bar="ENV[BAZ]") assert Config.global_config.foo == "bar" assert Config.global_config.bar == "qux" def test_default_global_app_config(): global_config = Config.global_config assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_block_update_global_app_config(): Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_global_app(foo="bar") with pytest.raises(ConfigurationUpdateBlocked): Config.global_config.properties = {"foo": "bar"} # Test if the global_config stay as default assert Config.global_config.foo is None assert len(Config.global_config.properties) == 0
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import argparse import re from unittest.mock import patch import pytest from src.taipy._entrypoint import _entrypoint from taipy._cli._base_cli import _CLI def preprocess_stdout(stdout): stdout = stdout.replace("\n", " ").replace("\t", " ") return re.sub(" +", " ", stdout) def remove_subparser(name: str): """Remove a subparser from the _CLI class.""" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope="function") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler="resolve") _CLI._subparser_action = None _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield expected_help = """{run,manage-versions,create,migrate,help} ... positional arguments: {run,manage-versions,create,migrate,help} run Run a Taipy application. manage-versions Taipy version control system. create Create a new Taipy application. migrate Migrate entities created from old taipy versions to be compatible with the current taipy version. The entity migration should be performed only after updating taipy code to the current version. help Show the Taipy help message. """ def test_taipy_command_alone_print_help(capsys): with patch("sys.argv", ["prog"]): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out) def test_taipy_help_command(capsys): with patch("sys.argv", ["prog", "help"]): with pytest.raises(SystemExit): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out) def test_help_non_existed_command(caplog): with patch("sys.argv", ["prog", "help", "non_existed_command"]): with pytest.raises(SystemExit): _entrypoint() assert "non_existed_command is not a valid command." in caplog.text def test_taipy_create_help(capsys): expected_help = "create [-h] [--template" with patch("sys.argv", ["prog", "help", "create"]): with pytest.raises(SystemExit): _entrypoint() out, _ = capsys.readouterr() assert preprocess_stdout(expected_help) in preprocess_stdout(out)
|
import os import sys from importlib.util import find_spec from pathlib import Path import pandas as pd # type: ignore import pytest from flask import Flask, g def pytest_configure(config): if (find_spec("src") and find_spec("src.taipy")) and (not find_spec("taipy") or not find_spec("taipy.gui")): import src.taipy.gui import src.taipy.gui._renderers.builder import src.taipy.gui._warnings import src.taipy.gui.builder import src.taipy.gui.data.decimator.lttb import src.taipy.gui.data.decimator.minmax import src.taipy.gui.data.decimator.rdp import src.taipy.gui.data.decimator.scatter_decimator import src.taipy.gui.data.utils import src.taipy.gui.extension import src.taipy.gui.utils._map_dict import src.taipy.gui.utils._variable_directory import src.taipy.gui.utils.expr_var_name sys.modules["taipy.gui._warnings"] = sys.modules["src.taipy.gui._warnings"] sys.modules["taipy.gui._renderers.builder"] = sys.modules["src.taipy.gui._renderers.builder"] sys.modules["taipy.gui.utils._variable_directory"] = sys.modules["src.taipy.gui.utils._variable_directory"] sys.modules["taipy.gui.utils.expr_var_name"] = sys.modules["src.taipy.gui.utils.expr_var_name"] sys.modules["taipy.gui.utils._map_dict"] = sys.modules["src.taipy.gui.utils._map_dict"] sys.modules["taipy.gui.extension"] = sys.modules["src.taipy.gui.extension"] sys.modules["taipy.gui.data.utils"] = sys.modules["src.taipy.gui.data.utils"] sys.modules["taipy.gui.data.decimator.lttb"] = sys.modules["src.taipy.gui.data.decimator.lttb"] sys.modules["taipy.gui.data.decimator.rdp"] = sys.modules["src.taipy.gui.data.decimator.rdp"] sys.modules["taipy.gui.data.decimator.minmax"] = sys.modules["src.taipy.gui.data.decimator.minmax"] sys.modules["taipy.gui.data.decimator.scatter_decimator"] = sys.modules[ "src.taipy.gui.data.decimator.scatter_decimator" ] sys.modules["taipy.gui"] = sys.modules["src.taipy.gui"] sys.modules["taipy.gui.builder"] = sys.modules["src.taipy.gui.builder"] csv = pd.read_csv( f"{Path(Path(__file__).parent.resolve())}{os.path.sep}current-covid-patients-hospital.csv", parse_dates=["Day"] ) small_dataframe_data = {"name": ["A", "B", "C"], "value": [1, 2, 3]} @pytest.fixture(scope="function") def csvdata(): yield csv @pytest.fixture(scope="function") def small_dataframe(): yield small_dataframe_data @pytest.fixture(scope="function") def gui(helpers): from taipy.gui import Gui gui = Gui() yield gui # Delete Gui instance and state of some classes after each test gui.stop() helpers.test_cleanup() @pytest.fixture def helpers(): from .helpers import Helpers return Helpers @pytest.fixture def test_client(): flask_app = Flask("Test App") # Create a test client using the Flask application configured for testing with flask_app.test_client() as testing_client: # Establish an application context with flask_app.app_context(): g.client_id = "test client id" yield testing_client # this is where the testing happens!
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import inspect from taipy.gui import Gui, Html def test_simple_html(gui: Gui, helpers): # html_string = "<html><head></head><body><h1>test</h1><taipy:field value=\"test\"/></body></html>" html_string = "<html><head></head><body><h1>test</h1></body></html>" gui._set_frame(inspect.currentframe()) gui.add_page("test", Html(html_string)) gui.run(run_server=False) client = gui._server.test_client() jsx = client.get("/taipy-jsx/test").json["jsx"] assert jsx == "<h1>test</h1>"
|
import pytest from taipy.gui import Gui def test_invalid_control_name(gui: Gui, helpers): md_string = "<|invalid|invalid|>" expected_list = ["INVALID SYNTAX - Control is 'invalid'"] helpers.test_control_md(gui, md_string, expected_list) def test_value_to_negated_property(gui: Gui, helpers): md_string = "<|button|not active=true|>" expected_list = ["<Button", "active={false}"] helpers.test_control_md(gui, md_string, expected_list) def test_invalid_property_value(gui: Gui, helpers): md_string = "<|button|let's try that!|>" expected_list = ["<Button", 'label="<Empty>"'] helpers.test_control_md(gui, md_string, expected_list) def test_unclosed_block(gui: Gui, helpers): md_string = "<|" expected_list = ["<Part", "</Part>"] helpers.test_control_md(gui, md_string, expected_list) def test_opening_unknown_block(gui: Gui, helpers): md_string = "<|unknown" expected_list = ["<Part", 'className="unknown"'] helpers.test_control_md(gui, md_string, expected_list) def test_closing_unknown_block(gui: Gui, helpers): md_string = "|>" expected_list = ["<div>", "No matching opened tag", "</div>"] helpers.test_control_md(gui, md_string, expected_list) def test_md_link(gui: Gui, helpers): md_string = "[content](link)" expected_list = ["<a", 'href="link"', "content</a>"] helpers.test_control_md(gui, md_string, expected_list)
|
import pytest from taipy.gui.utils._bindings import _Bindings def test_exception_binding_twice(gui, test_client): bind = _Bindings(gui) bind._new_scopes() bind._bind("x", 10) with pytest.raises(ValueError): bind._bind("x", 10) def test_exception_binding_invalid_name(gui): bind = _Bindings(gui) bind._new_scopes() with pytest.raises(ValueError): bind._bind("invalid identifier", 10)
|
from email import message import pytest from taipy.gui._page import _Page def test_exception_page(gui): page = _Page() page._route = "page1" with pytest.raises(RuntimeError, match="Can't render page page1: no renderer found"): page.render(gui)
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.