text
stringlengths
0
105k
import datetime as dt import os import pathlib from taipy.config import Config, Frequency, Scope from .complex_application_algos import ( average, create_metrics, create_results, create_train_test_data, divide, forecast, forecast_baseline, mult, mult_by_2, preprocess_dataset, return_a_number, roc_from_scratch, subtract, sum, train_model, train_model_baseline, ) def build_skipped_jobs_config(): input_config = Config.configure_data_node(id="input") intermediate_config = Config.configure_data_node(id="intermediate") output_config = Config.configure_data_node(id="output") task_config_1 = Config.configure_task("first", mult_by_2, input_config, intermediate_config, skippable=True) task_config_2 = Config.configure_task("second", mult_by_2, intermediate_config, output_config, skippable=True) scenario_config = Config.configure_scenario("scenario", [task_config_1, task_config_2]) return scenario_config def build_complex_required_file_paths(): csv_path_inp = os.path.join( pathlib.Path(__file__).parent.resolve(), "shared_test_cases/data_sample/example.csv", ) excel_path_inp = os.path.join( pathlib.Path(__file__).parent.resolve(), "shared_test_cases/data_sample/example.xlsx", ) csv_path_sum = os.path.join(pathlib.Path(__file__).parent.resolve(), "shared_test_cases/data_sample/sum.csv") excel_path_sum = os.path.join( pathlib.Path(__file__).parent.resolve(), "shared_test_cases/data_sample/sum.xlsx", ) excel_path_out = os.path.join( pathlib.Path(__file__).parent.resolve(), "shared_test_cases/data_sample/res.xlsx", ) csv_path_out = os.path.join(pathlib.Path(__file__).parent.resolve(), "shared_test_cases/data_sample/res.csv") return ( csv_path_inp, excel_path_inp, csv_path_sum, excel_path_sum, excel_path_out, csv_path_out, ) def build_complex_config(): ( csv_path_inp, excel_path_inp, csv_path_sum, excel_path_sum, excel_path_out, csv_path_out, ) = build_complex_required_file_paths() inp_csv_dn_1 = Config.configure_csv_data_node("dn_csv_in_1", default_path=csv_path_inp) inp_csv_dn_2 = Config.configure_csv_data_node("dn_csv_in_2", default_path=csv_path_inp) inp_excel_dn_1 = Config.configure_excel_data_node("dn_excel_in_1", default_path=excel_path_inp, sheet_name="Sheet1") inp_excel_dn_2 = Config.configure_excel_data_node("dn_excel_in_2", default_path=excel_path_inp, sheet_name="Sheet1") placeholder = Config.configure_data_node(id="dn_placeholder", default_data=10) dn_csv_sum = Config.configure_csv_data_node("dn_sum_csv", default_path=csv_path_sum) dn_excel_sum = Config.configure_excel_data_node("dn_sum_excel", default_path=excel_path_sum, sheet_name="Sheet1") dn_subtract_csv_excel = Config.configure_pickle_data_node("dn_subtract_csv_excel") dn_mult = Config.configure_pickle_data_node("dn_mult") dn_div = Config.configure_pickle_data_node("dn_div") output_csv_dn = Config.configure_csv_data_node("csv_out", csv_path_out) output_excel_dn = Config.configure_excel_data_node("excel_out", excel_path_out) task_print_csv = Config.configure_task("task_print_csv", print, input=inp_csv_dn_1) task_print_excel = Config.configure_task("task_print_excel", print, input=inp_excel_dn_1) task_sum_csv = Config.configure_task("task_sum_csv", sum, input=[inp_csv_dn_2, inp_csv_dn_1], output=dn_csv_sum) task_sum_excel = Config.configure_task( "task_sum_excel", sum, input=[inp_excel_dn_2, inp_excel_dn_1], output=dn_excel_sum, ) task_subtract_csv_excel = Config.configure_task( "task_subtract_csv_excel", subtract, input=[dn_csv_sum, dn_excel_sum], output=dn_subtract_csv_excel, ) task_insert_placeholder = Config.configure_task("task_insert_placeholder", return_a_number, output=[placeholder]) task_mult = Config.configure_task( "task_mult_by_placeholder", mult, input=[dn_subtract_csv_excel, placeholder], output=dn_mult, ) task_div = Config.configure_task("task_div_by_placeholder", divide, input=[dn_mult, placeholder], output=dn_div) task_avg_div = Config.configure_task("task_avg_div", average, input=dn_div, output=output_csv_dn) task_avg_mult = Config.configure_task("task_avg_mult", average, input=dn_mult, output=output_excel_dn) scenario_config = Config.configure_scenario( "scenario", [ task_print_csv, task_print_excel, task_sum_csv, task_sum_excel, task_subtract_csv_excel, task_insert_placeholder, task_mult, task_div, task_avg_div, task_avg_mult, ], ) return scenario_config def build_churn_classification_required_file_paths(): csv_path_inp = os.path.join( pathlib.Path(__file__).parent.resolve(), "shared_test_cases/data_sample/churn.csv", ) return csv_path_inp def build_churn_classification_config(): csv_path_inp = build_churn_classification_required_file_paths() # path for csv and file_path for pickle initial_dataset = Config.configure_data_node( id="initial_dataset", path=csv_path_inp, storage_type="csv", has_header=True ) date_cfg = Config.configure_data_node(id="date", default_data="None") preprocessed_dataset = Config.configure_data_node( id="preprocessed_dataset", cacheable=True, validity_period=dt.timedelta(days=1) ) # the final datanode that contains the processed data train_dataset = Config.configure_data_node(id="train_dataset", cacheable=True, validity_period=dt.timedelta(days=1)) # the final datanode that contains the processed data trained_model = Config.configure_data_node(id="trained_model", cacheable=True, validity_period=dt.timedelta(days=1)) trained_model_baseline = Config.configure_data_node( id="trained_model_baseline", cacheable=True, validity_period=dt.timedelta(days=1), ) # the final datanode that contains the processed data test_dataset = Config.configure_data_node(id="test_dataset", cacheable=True, validity_period=dt.timedelta(days=1)) forecast_baseline_dataset = Config.configure_data_node( id="forecast_baseline_dataset", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) forecast_test_dataset = Config.configure_data_node( id="forecast_test_dataset", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) roc_data = Config.configure_data_node( id="roc_data", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) score_auc = Config.configure_data_node( id="score_auc", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) metrics = Config.configure_data_node( id="metrics", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) feature_importance_cfg = Config.configure_data_node(id="feature_importance", scope=Scope.SCENARIO) results = Config.configure_data_node( id="results", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) ############################################################################################################################## # Creation of the tasks ############################################################################################################################## # the task will make the link between the input data node # and the output data node while executing the function # initial_dataset --> preprocess dataset --> preprocessed_dataset task_preprocess_dataset = Config.configure_task( id="preprocess_dataset", input=[initial_dataset, date_cfg], function=preprocess_dataset, output=preprocessed_dataset, ) # preprocessed_dataset --> create train data --> train_dataset, test_dataset task_create_train_test = Config.configure_task( id="create_train_and_test_data", input=preprocessed_dataset, function=create_train_test_data, output=[train_dataset, test_dataset], ) # train_dataset --> create train_model data --> trained_model task_train_model = Config.configure_task( id="train_model", input=train_dataset, function=train_model, output=[trained_model, feature_importance_cfg], ) # train_dataset --> create train_model data --> trained_model task_train_model_baseline = Config.configure_task( id="train_model_baseline", input=train_dataset, function=train_model_baseline, output=[trained_model_baseline, feature_importance_cfg], ) # test_dataset --> forecast --> forecast_dataset task_forecast = Config.configure_task( id="predict_the_test_data", input=[test_dataset, trained_model], function=forecast, output=forecast_test_dataset, ) # test_dataset --> forecast --> forecast_dataset task_forecast_baseline = Config.configure_task( id="predict_of_baseline", input=[test_dataset, trained_model_baseline], function=forecast_baseline, output=forecast_baseline_dataset, ) task_roc = Config.configure_task( id="task_roc", input=[forecast_test_dataset, test_dataset], function=roc_from_scratch, output=[roc_data, score_auc], ) task_roc_baseline = Config.configure_task( id="task_roc_baseline", input=[forecast_baseline_dataset, test_dataset], function=roc_from_scratch, output=[roc_data, score_auc], ) task_create_metrics = Config.configure_task( id="task_create_metrics", input=[forecast_test_dataset, test_dataset], function=create_metrics, output=metrics, ) task_create_results = Config.configure_task( id="task_create_results", input=[forecast_test_dataset, test_dataset], function=create_results, output=results, ) task_create_baseline_metrics = Config.configure_task( id="task_create_baseline_metrics", input=[forecast_baseline_dataset, test_dataset], function=create_metrics, output=metrics, ) task_create_baseline_results = Config.configure_task( id="task_create_baseline_results", input=[forecast_baseline_dataset, test_dataset], function=create_results, output=results, ) ############################################################################################################################## # Creation of the sequence and the scenario ############################################################################################################################## # the scenario will run the sequences scenario_cfg = Config.configure_scenario( id="churn_classification", task_configs=[ task_preprocess_dataset, task_create_train_test, task_train_model_baseline, task_train_model, task_forecast, task_roc, task_create_metrics, task_create_results, task_forecast_baseline, task_roc_baseline, task_create_metrics, task_create_results, task_create_baseline_metrics, task_create_baseline_results, ], frequency=Frequency.WEEKLY, ) return scenario_cfg
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import random import time from datetime import datetime from typing import Dict, List import pandas as pd def failing_job(historical_daily_temperature: pd.DataFrame): time.sleep(2) print("----- Prepared to raise exception -----") raise Exception def predict(model, dates: List[datetime]) -> Dict[str, List[float]]: res = [t + random.uniform(0, 3) for t in model.forecast(len(dates))] return {"result": res}
from datetime import datetime from taipy import Frequency, Scope from taipy.config.config import Config from .algorithms import failing_job, predict historical_data_set = Config.configure_csv_data_node( id="historical_data_set", path="tests//shared_test_cases/arima/daily-min-temperatures.csv", scope=Scope.GLOBAL ) arima_model = Config.configure_data_node(id="arima_model") dates_to_forecast = Config.configure_data_node( id="dates_to_forecast", scope=Scope.SCENARIO, default_data=[datetime(1991, 1, 1).isoformat()] ) forecast_values = Config.configure_excel_data_node( id="forecast_values", has_header=False, path="tests//shared_test_cases/arima/res.xlsx" ) arima_fail_algo = Config.configure_task( id="arima_training", input=historical_data_set, function=failing_job, output=arima_model ) arima_scoring_algo = Config.configure_task( id="arima_scoring", input=[arima_model, dates_to_forecast], function=predict, output=forecast_values ) arima_sequence = Config.configure_sequence(id="arima_sequences", task_configs=[arima_fail_algo, arima_scoring_algo]) arima_scenario_config = Config.configure_scenario( id="Arima_scenario", sequence_configs=[arima_sequence], frequency=Frequency.DAILY )
from .algorithms import * from .config import *
import random import time from datetime import datetime from typing import Dict, List import pandas as pd from statsmodels.tsa.arima.model import ARIMA def train(historical_daily_temperature: pd.DataFrame): print("----- Started training -----") time.sleep(2) for _ in range(2): print("----- Model is in training -----") return ARIMA(endog=historical_daily_temperature["Temp"].to_numpy(), order=(1, 1, 0)).fit() def predict(model, dates: List[datetime]) -> Dict[str, List]: res = [t + random.uniform(0, 3) for t in model.forecast(len(dates))] return {"result": res}
from datetime import datetime from taipy.config import Frequency, Scope from taipy.config.config import Config from .algorithms import predict, train def build_arima_config(): CSV_INPUT_PATH = "tests/shared_test_cases/arima/daily-min-temperatures.csv" XLSX_OUTPUT_PATH = "tests/shared_test_cases/arima/res.xlsx" historical_data_set = Config.configure_csv_data_node(id="historical_data_set", path=CSV_INPUT_PATH, scope=Scope.GLOBAL) arima_model = Config.configure_data_node(id="arima_model") dates_to_forecast = Config.configure_data_node( id="dates_to_forecast", scope=Scope.SCENARIO, default_data=[datetime(1991, 1, 1).isoformat()] ) forecast_values = Config.configure_excel_data_node(id="forecast_values", has_header=False, path=XLSX_OUTPUT_PATH) arima_training_algo = Config.configure_task( id="arima_training", input=historical_data_set, function=train, output=arima_model ) arima_scoring_algo = Config.configure_task( id="arima_scoring", input=[arima_model, dates_to_forecast], function=predict, output=forecast_values ) arima_scenario_config = Config.configure_scenario( id="Arima_scenario", task_configs=[arima_training_algo, arima_scoring_algo], frequency=Frequency.DAILY ) return arima_scenario_config
from .algorithms import * from .config import *
import pandas as pd def algorithm(df: pd.DataFrame) -> pd.DataFrame: return df
import dataclasses from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm CSV_INPUT_PATH = "tests/shared_test_cases/csv_files/input_1000.csv" CSV_OUTPUT_PATH = "tests/shared_test_cases/csv_files/output_1000.csv" ROW_COUNT = 1000 @dataclasses.dataclass class Row: id: int age: int rating: float def __post_init__(self): for field in dataclasses.fields(self): value = getattr(self, field.name) if not isinstance(value, field.type): setattr(self, field.name, field.type(value)) Config.unblock_update() # Config for Pandas input_dataset_cfg = Config.configure_csv_data_node(id="input_csv_dataset_1", path=CSV_INPUT_PATH, has_header=True) output_dataset_cfg = Config.configure_csv_data_node(id="output_csv_dataset_1", path=CSV_OUTPUT_PATH, has_header=True) task_cfg = Config.configure_task(id="t1", input=input_dataset_cfg, function=algorithm, output=output_dataset_cfg) scenario_cfg = Config.configure_scenario(id="s1", task_configs=[task_cfg], frequency=Frequency.DAILY) # Config for Custom class input_dataset_cfg_2 = Config.configure_csv_data_node( id="input_csv_dataset_2", path=CSV_INPUT_PATH, has_header=True, exposed_type=Row ) output_dataset_cfg_2 = Config.configure_csv_data_node( id="output_csv_dataset_2", path=CSV_OUTPUT_PATH, has_header=True, exposed_type=Row ) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY) # Config for Numpy input_dataset_cfg_3 = Config.configure_csv_data_node( id="input_csv_dataset_3", path=CSV_INPUT_PATH, has_header=True, exposed_type="numpy" ) output_dataset_cfg_3 = Config.configure_csv_data_node( id="output_csv_dataset_3", path=CSV_OUTPUT_PATH, has_header=True, exposed_type="numpy" ) task_cfg_3 = Config.configure_task(id="t3", input=input_dataset_cfg_3, function=algorithm, output=output_dataset_cfg_3) scenario_cfg_3 = Config.configure_scenario(id="s3", task_configs=[task_cfg_3], frequency=Frequency.DAILY) # Config for Modin input_dataset_cfg_4 = Config.configure_csv_data_node( id="input_csv_dataset_4", path=CSV_INPUT_PATH, has_header=True, exposed_type="modin" ) output_dataset_cfg_4 = Config.configure_csv_data_node( id="output_csv_dataset_4", path=CSV_OUTPUT_PATH, has_header=True, exposed_type="modin" ) task_cfg_4 = Config.configure_task(id="t4", input=input_dataset_cfg_4, function=algorithm, output=output_dataset_cfg_4) scenario_cfg_4 = Config.configure_scenario(id="s4", task_configs=[task_cfg_4], frequency=Frequency.DAILY)
from .algorithms import * from .config import *
import pandas as pd def algorithm(df: pd.DataFrame) -> pd.DataFrame: return df
import dataclasses from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm EXCEL_INPUT_PATH = "tests/shared_test_cases/multi_excel_sheets/input_1000_multi_sheets.xlsx" EXCEL_OUTPUT_PATH = "tests/shared_test_cases/multi_excel_sheets/output_1000.xlsx" ROW_COUNT = 1000 SHEET_NAMES = ["Sheet 0", "Sheet 1", "Sheet 2", "Sheet 3", "Sheet 4", "Sheet 5"] @dataclasses.dataclass class Row: id: int age: int rating: float def __post_init__(self): for field in dataclasses.fields(self): value = getattr(self, field.name) if not isinstance(value, field.type): setattr(self, field.name, field.type(value)) Config.unblock_update() input_dataset_cfg_1 = Config.configure_excel_data_node( id="input_excel_multi_sheet_dataset_1", path=EXCEL_INPUT_PATH, has_header=True, sheet_name=SHEET_NAMES ) output_dataset_cfg_1 = Config.configure_excel_data_node( id="output_excel_multi_sheet_dataset_1", path=EXCEL_OUTPUT_PATH, has_header=True, sheet_name=SHEET_NAMES ) task_cfg = Config.configure_task(id="t1", input=input_dataset_cfg_1, function=algorithm, output=output_dataset_cfg_1) scenario_cfg = Config.configure_scenario(id="s1", task_configs=[task_cfg], frequency=Frequency.DAILY) input_dataset_cfg_2 = Config.configure_excel_data_node( id="input_excel_multi_sheet_dataset_2", path=EXCEL_INPUT_PATH, has_header=True, exposed_type=Row, sheet_name=SHEET_NAMES, ) output_dataset_cfg_2 = Config.configure_excel_data_node( id="output_excel_multi_sheet_dataset_2", path=EXCEL_OUTPUT_PATH, has_header=True, exposed_type=Row, sheet_name=SHEET_NAMES, ) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY) input_dataset_cfg_3 = Config.configure_excel_data_node( id="input_excel_multi_sheet_dataset_3", path=EXCEL_INPUT_PATH, has_header=True, exposed_type="numpy", sheet_name=SHEET_NAMES, ) output_dataset_cfg_3 = Config.configure_excel_data_node( id="output_excel_multi_sheet_dataset_3", path=EXCEL_OUTPUT_PATH, has_header=True, exposed_type="numpy", sheet_name=SHEET_NAMES, ) task_cfg_3 = Config.configure_task(id="t3", input=input_dataset_cfg_3, function=algorithm, output=output_dataset_cfg_3) scenario_cfg_3 = Config.configure_scenario(id="s3", task_configs=[task_cfg_3], frequency=Frequency.DAILY) input_dataset_cfg_4 = Config.configure_excel_data_node( id="input_excel_multi_sheet_dataset_4", path=EXCEL_INPUT_PATH, has_header=True, exposed_type="modin", sheet_name=SHEET_NAMES, ) output_dataset_cfg_4 = Config.configure_excel_data_node( id="output_excel_multi_sheet_dataset_4", path=EXCEL_OUTPUT_PATH, has_header=True, exposed_type="modin", sheet_name=SHEET_NAMES, ) task_cfg_4 = Config.configure_task(id="t4", input=input_dataset_cfg_4, function=algorithm, output=output_dataset_cfg_4) scenario_cfg_4 = Config.configure_scenario(id="s4", task_configs=[task_cfg_4], frequency=Frequency.DAILY)
from .algorithms import * from .config import *
import pandas as pd def algorithm(df: pd.DataFrame) -> pd.DataFrame: return df
import dataclasses from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm EXCEL_SINGLE_SHEET_INPUT_PATH = "tests/shared_test_cases/single_excel_sheet/input_1000.xlsx" EXCEL_SINGLE_SHEET_OUTPUT_PATH = "tests/shared_test_cases/single_excel_sheet/output_1000.xlsx" ROW_COUNT = 1000 SHEET_NAME = "Sheet1" @dataclasses.dataclass class Row: id: int age: int rating: float def __post_init__(self): for field in dataclasses.fields(self): value = getattr(self, field.name) if not isinstance(value, field.type): setattr(self, field.name, field.type(value)) Config.unblock_update() input_dataset_cfg_1 = Config.configure_excel_data_node( id="input_excel_single_sheet_dataset_1", path=EXCEL_SINGLE_SHEET_INPUT_PATH, has_header=True, sheet_name=SHEET_NAME ) output_dataset_cfg_1 = Config.configure_excel_data_node( id="output_excel_single_sheet_dataset_1", path=EXCEL_SINGLE_SHEET_OUTPUT_PATH, has_header=True, sheet_name=SHEET_NAME, ) task_cfg = Config.configure_task(id="t1", input=input_dataset_cfg_1, function=algorithm, output=output_dataset_cfg_1) scenario_cfg = Config.configure_scenario(id="s1", task_configs=[task_cfg], frequency=Frequency.DAILY) input_dataset_cfg_2 = Config.configure_excel_data_node( id="input_excel_single_sheet_dataset_2", path=EXCEL_SINGLE_SHEET_INPUT_PATH, has_header=True, exposed_type=Row, sheet_name=SHEET_NAME, ) output_dataset_cfg_2 = Config.configure_excel_data_node( id="output_excel_single_sheet_dataset_2", path=EXCEL_SINGLE_SHEET_OUTPUT_PATH, has_header=True, exposed_type=Row, sheet_name=SHEET_NAME, ) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY) input_dataset_cfg_3 = Config.configure_excel_data_node( id="input_excel_single_sheet_dataset_3", path=EXCEL_SINGLE_SHEET_INPUT_PATH, has_header=True, exposed_type="numpy", sheet_name=SHEET_NAME, ) output_dataset_cfg_3 = Config.configure_excel_data_node( id="output_excel_single_sheet_dataset_3", path=EXCEL_SINGLE_SHEET_OUTPUT_PATH, has_header=True, exposed_type="numpy", sheet_name=SHEET_NAME, ) task_cfg_3 = Config.configure_task(id="t3", input=input_dataset_cfg_3, function=algorithm, output=output_dataset_cfg_3) scenario_cfg_3 = Config.configure_scenario(id="s3", task_configs=[task_cfg_3], frequency=Frequency.DAILY) input_dataset_cfg_4 = Config.configure_excel_data_node( id="input_excel_single_sheet_dataset_4", path=EXCEL_SINGLE_SHEET_INPUT_PATH, has_header=True, exposed_type="modin", sheet_name=SHEET_NAME, ) output_dataset_cfg_4 = Config.configure_excel_data_node( id="output_excel_single_sheet_dataset_4", path=EXCEL_SINGLE_SHEET_OUTPUT_PATH, has_header=True, exposed_type="modin", sheet_name=SHEET_NAME, ) task_cfg_4 = Config.configure_task(id="t4", input=input_dataset_cfg_4, function=algorithm, output=output_dataset_cfg_4) scenario_cfg_4 = Config.configure_scenario(id="s4", task_configs=[task_cfg_4], frequency=Frequency.DAILY)
from .algorithms import * from .config import *
def algorithm(data): return data
import dataclasses from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm PICKLE_DICT_INPUT_PATH = "tests/shared_test_cases/pickle_files/input_dict_1000.p" PICKLE_DICT_OUTPUT_PATH = "tests/shared_test_cases/pickle_files/output_dict_1000.p" PICKLE_LIST_INPUT_PATH = "tests/shared_test_cases/pickle_files/input_object_1000.p" PICKLE_LIST_OUTPUT_PATH = "tests/shared_test_cases/pickle_files/output_object_1000.p" ROW_COUNT = 1000 @dataclasses.dataclass class Row: id: int age: int rating: float Config.unblock_update() input_dataset_cfg_1 = Config.configure_pickle_data_node(id="input_pickle_dataset_1", path=PICKLE_DICT_INPUT_PATH) output_dataset_cfg_1 = Config.configure_pickle_data_node(id="output_pickle_dataset_1", path=PICKLE_DICT_OUTPUT_PATH) task_cfg_1 = Config.configure_task(id="t1", input=input_dataset_cfg_1, function=algorithm, output=output_dataset_cfg_1) scenario_cfg_1 = Config.configure_scenario(id="s1", task_configs=[task_cfg_1], frequency=Frequency.DAILY) input_dataset_cfg_2 = Config.configure_pickle_data_node(id="input_pickle_dataset_2", path=PICKLE_LIST_INPUT_PATH) output_dataset_cfg_2 = Config.configure_pickle_data_node(id="output_pickle_dataset_2", path=PICKLE_LIST_OUTPUT_PATH) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY)
from .algorithms import * from .config import * from .utils import *
import pickle import random from tests.shared_test_cases.pickle_files import Row def gen_list_of_dict_input_pickle(path, n): data = [] for i in range(n): row = {"id": i + 1, "age": random.randint(10, 99), "rating": round(random.uniform(0, 10), 2)} data.append(row) pickle.dump(data, open(path, "wb")) def gen_list_of_objects_input_pickle(path, n): data = [] for i in range(n): row = Row(i + 1, random.randint(10, 99), round(random.uniform(0, 10), 2)) data.append(row) pickle.dump(data, open(path, "wb"))
def algorithm(data): return data
from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm from .utils import RowDecoder, RowEncoder JSON_DICT_INPUT_PATH = "tests/shared_test_cases/json_files/input_dict_1000.json" JSON_DICT_OUTPUT_PATH = "tests/shared_test_cases/json_files/output_dict_1000.json" JSON_OBJECT_INPUT_PATH = "tests/shared_test_cases/json_files/input_object_1000.json" JSON_OBJECT_OUTPUT_PATH = "tests/shared_test_cases/json_files/output_object_1000.json" ROW_COUNT = 1000 Config.unblock_update() input_dataset_cfg_1 = Config.configure_json_data_node(id="input_json_dataset_1", path=JSON_DICT_INPUT_PATH) output_dataset_cfg_1 = Config.configure_json_data_node(id="output_json_dataset_1", path=JSON_DICT_OUTPUT_PATH) task_cfg_1 = Config.configure_task(id="t1", input=input_dataset_cfg_1, function=algorithm, output=output_dataset_cfg_1) scenario_cfg_1 = Config.configure_scenario(id="s1", task_configs=[task_cfg_1], frequency=Frequency.DAILY) input_dataset_cfg_2 = Config.configure_json_data_node( id="input_json_dataset_2", path=JSON_OBJECT_INPUT_PATH, decoder=RowDecoder ) output_dataset_cfg_2 = Config.configure_json_data_node( id="output_json_dataset_2", path=JSON_OBJECT_OUTPUT_PATH, encoder=RowEncoder, decoder=RowDecoder ) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY)
from .algorithms import * from .config import * from .utils import *
import json import random import time from dataclasses import dataclass @dataclass class Row: id: int age: int rating: float class RowEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, Row): return {"id": obj.id, "age": obj.age, "rating": obj.rating, "__type__": "Row"} return json.JSONEncoder.default(self, obj) class RowDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, d): if "__type__" in d and d["__type__"] == "Row": return Row(d["id"], d["age"], d["rating"]) def timer(f): def wrapper(*args, **kwargs): print(f"⏳ {f.__name__}") start = time.time() result = f(*args, **kwargs) end = time.time() elapsed = round(end - start, 4) print(f"\t✔️ {elapsed} seconds") return result return wrapper def get_list_of_dicts(n): data = [] for i in range(n): row = {"id": i + 1, "age": random.randint(10, 99), "rating": round(random.uniform(0, 10), 2)} data.append(row) return data def get_list_of_objects(n): data = [] for i in range(n): row = Row(i + 1, random.randint(10, 99), round(random.uniform(0, 10), 2)) data.append(row) return data
from unittest.mock import patch import taipy.core.taipy as tp from taipy import Config from taipy.core import Core from taipy.core.config import JobConfig from taipy.core.job.status import Status from tests.utils import assert_true_after_time def mult_by_2(a): return a def build_skipped_jobs_config(): input_config = Config.configure_data_node(id="input") intermediate_config = Config.configure_data_node(id="intermediate") output_config = Config.configure_data_node(id="output") task_config_1 = Config.configure_task("first", mult_by_2, input_config, intermediate_config, skippable=True) task_config_2 = Config.configure_task("second", mult_by_2, intermediate_config, output_config, skippable=True) scenario_config = Config.configure_scenario("scenario", task_configs=[task_config_1, task_config_2]) return scenario_config class TestSkipJobs: @staticmethod def __test(): scenario_config = build_skipped_jobs_config() with patch("sys.argv", ["prog"]): Core().run() scenario = tp.create_scenario(scenario_config) scenario.input.write(2) scenario.submit() assert len(tp.get_jobs()) == 2 for job in tp.get_jobs(): assert_true_after_time(job.is_completed, msg=f"job {job.id} is not completed. Status: {job.status}") scenario.submit() assert len(tp.get_jobs()) == 4 skipped = [] for job in tp.get_jobs(): if job.status != Status.COMPLETED: assert_true_after_time(job.is_skipped, msg=f"job {job.id} is not skipped. Status: {job.status}") skipped.append(job) assert len(skipped) == 2 def test_development_fs_repo(self): self.__test() def test_development_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) self.__test() def test_standalone_fs_repo(self): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test() def test_standalone_sql_repo(self, tmp_sqlite): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) self.__test()
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import os from unittest.mock import patch import pandas as pd import taipy.core.taipy as tp from taipy import Config from taipy.core import Core from taipy.core.config import JobConfig from tests.test_complex.utils.algos import average from tests.test_complex.utils.config_builders import build_complex_config, build_complex_required_file_paths from tests.utils import assert_true_after_time class TestComplexApp: @staticmethod def __test(): _, _, csv_path_sum, excel_path_sum, excel_path_out, csv_path_out = build_complex_required_file_paths() scenario_config = build_complex_config() with patch("sys.argv", ["prog"]): Core().run(force_restart=True) scenario = tp.create_scenario(scenario_config) jobs = tp.submit(scenario) for job in jobs: assert_true_after_time(job.is_completed, msg=f"job {job.id} is not completed. Status: {job.status}.") csv_sum_res = pd.read_csv(csv_path_sum) excel_sum_res = pd.read_excel(excel_path_sum) csv_out = pd.read_csv(csv_path_out) excel_out = pd.read_excel(excel_path_out) assert csv_sum_res.to_numpy().flatten().tolist() == [i * 20 for i in range(1, 11)] assert excel_sum_res.to_numpy().flatten().tolist() == [i * 2 for i in range(1, 11)] assert average(csv_sum_res["number"] - excel_sum_res["number"]) == csv_out.to_numpy()[0] assert average((csv_sum_res["number"] - excel_sum_res["number"]) * 10) == excel_out.to_numpy()[0] for path in [csv_path_sum, excel_path_sum, csv_path_out, excel_path_out]: os.remove(path) def test_development_fs_repo(self): self.__test() def test_development_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) self.__test() def test_standalone_fs_repo(self): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test() def test_standalone_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test()
from unittest.mock import patch import taipy.core.taipy as tp from taipy import Config from taipy.core import Core from taipy.core.config import JobConfig from tests.test_complex.utils.config_builders import build_churn_classification_config from tests.utils import assert_true_after_time class TestChurnClassification: @staticmethod def __test(): scenario_cfg = build_churn_classification_config() with patch("sys.argv", ["prog"]): Core().run(force_restart=True) scenario = tp.create_scenario(scenario_cfg) jobs = tp.submit(scenario) for job in jobs: assert_true_after_time( job.is_completed, msg=f"job {job.id} is not completed. Status: {job.status}.", time=30 ) def test_development_fs_repo(self): self.__test() def test_development_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) self.__test() def test_standalone_fs_repo(self): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test() def test_standalone_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test()
import datetime as dt from time import sleep import numpy as np import pandas as pd from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split def sum(a, b): a = a["number"] b = b["number"] return a + b def subtract(a, b): a = a["number"] b = b["number"] return a - b def mult(a, b): return a * b def divide(a, b): return a / b def average(a): return [a.sum() / len(a)] def div_constant_with_sleep(a): sleep(0.5) return a["number"] / 10 def return_a_number(): return 10 def return_a_number_with_sleep(): sleep(0.5) return 10 def preprocess_dataset(initial_dataset: pd.DataFrame, date: dt.datetime = None): """This function preprocess the dataset to be used in the model Args: initial_dataset (pd.DataFrame): the raw format when we first read the data Returns: pd.DataFrame: the preprocessed dataset for classification """ # print("\n Preprocessing the dataset...") # We filter the dataframe on the date if date != "None": initial_dataset["Date"] = pd.to_datetime(initial_dataset["Date"]) processed_dataset = initial_dataset[initial_dataset["Date"] <= date] # print(len(processed_dataset)) else: processed_dataset = initial_dataset processed_dataset = processed_dataset[ [ "CreditScore", "Geography", "Gender", "Age", "Tenure", "Balance", "NumOfProducts", "HasCrCard", "IsActiveMember", "EstimatedSalary", "Exited", ] ] processed_dataset = pd.get_dummies(processed_dataset) if "Gender_Female" in processed_dataset.columns: processed_dataset.drop("Gender_Female", axis=1, inplace=True) processed_dataset = processed_dataset.apply(pd.to_numeric) columns_to_select = [ "CreditScore", "Age", "Tenure", "Balance", "NumOfProducts", "HasCrCard", "IsActiveMember", "EstimatedSalary", "Geography_France", "Geography_Germany", "Geography_Spain", "Gender_Male", "Exited", ] processed_dataset = processed_dataset[[col for col in columns_to_select if col in processed_dataset.columns]] # print(" Preprocessing done!\n") return processed_dataset def create_train_test_data(preprocessed_dataset: pd.DataFrame): """This function will create the train data by segmenting the dataset Args: preprocessed_dataset (pd.DataFrame): the preprocessed dataset Returns: pd.DataFrame: the training dataset """ # print("\n Creating the training and testing dataset...") X_train, X_test, y_train, y_test = train_test_split( preprocessed_dataset.iloc[:, :-1], preprocessed_dataset.iloc[:, -1], test_size=0.2, random_state=42 ) train_data = pd.concat([X_train, y_train], axis=1) test_data = pd.concat([X_test, y_test], axis=1) # print(" Creating done!") return train_data, test_data def train_model_baseline(train_dataset: pd.DataFrame): """Function to train the Logistic Regression model Args: train_dataset (pd.DataFrame): the training dataset Returns: model (LogisticRegression): the fitted model """ # print(" Training the model...\n") X, y = train_dataset.iloc[:, :-1], train_dataset.iloc[:, -1] model_fitted = LogisticRegression().fit(X, y) # print("\n ",model_fitted," is trained!") importance_dict = {"Features": X.columns, "Importance": model_fitted.coef_[0]} importance = pd.DataFrame(importance_dict).sort_values(by="Importance", ascending=True) return model_fitted, importance def train_model(train_dataset: pd.DataFrame): """Function to train the Logistic Regression model Args: train_dataset (pd.DataFrame): the training dataset Returns: model (RandomForest): the fitted model """ # print(" Training the model...\n") X, y = train_dataset.iloc[:, :-1], train_dataset.iloc[:, -1] model_fitted = RandomForestClassifier().fit(X, y) # print("\n ",model_fitted," is trained!") importance_dict = {"Features": X.columns, "Importance": model_fitted.feature_importances_} importance = pd.DataFrame(importance_dict).sort_values(by="Importance", ascending=True) return model_fitted, importance def forecast(test_dataset: pd.DataFrame, trained_model: RandomForestClassifier): """Function to forecast the test dataset Args: test_dataset (pd.DataFrame): the test dataset trained_model (LogisticRegression): the fitted model Returns: forecast (pd.DataFrame): the forecasted dataset """ # print(" Forecasting the test dataset...") X, y = test_dataset.iloc[:, :-1], test_dataset.iloc[:, -1] # predictions = trained_model.predict(X) predictions = trained_model.predict_proba(X)[:, 1] # print(" Forecasting done!") return predictions def forecast_baseline(test_dataset: pd.DataFrame, trained_model: LogisticRegression): """Function to forecast the test dataset Args: test_dataset (pd.DataFrame): the test dataset trained_model (LogisticRegression): the fitted model Returns: forecast (pd.DataFrame): the forecasted dataset """ # print(" Forecasting the test dataset...") X, y = test_dataset.iloc[:, :-1], test_dataset.iloc[:, -1] predictions = trained_model.predict_proba(X)[:, 1] print(" Forecasting done!") return predictions def roc_from_scratch(probabilities, test_dataset, partitions=100): # print(" Calculation of the ROC curve...") y_test = test_dataset.iloc[:, -1] roc = np.array([]) for i in range(partitions + 1): threshold_vector = np.greater_equal(probabilities, i / partitions).astype(int) tpr, fpr = true_false_positive(threshold_vector, y_test) roc = np.append(roc, [fpr, tpr]) roc_np = roc.reshape(-1, 2) roc_data = pd.DataFrame({"False positive rate": roc_np[:, 0], "True positive rate": roc_np[:, 1]}) # print(" Calculation done") # print(" Scoring...") score_auc = roc_auc_score(y_test, probabilities) # print(" Scoring done\n") return roc_data, score_auc def true_false_positive(threshold_vector: np.array, y_test: np.array): """Function to calculate the true positive rate and the false positive rate Args: threshold_vector (np.array): the test dataset y_test (np.array): the fitted model Returns: tpr (pd.DataFrame): the forecasted dataset fpr (pd.DataFrame): the forecasted dataset """ true_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 1) true_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 0) false_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 0) false_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 1) tpr = true_positive.sum() / (true_positive.sum() + false_negative.sum()) fpr = false_positive.sum() / (false_positive.sum() + true_negative.sum()) return tpr, fpr def create_metrics(predictions: np.array, test_dataset: np.array): # print(" Creating the metrics...") threshold = 0.5 threshold_vector = np.greater_equal(predictions, threshold).astype(int) y_test = test_dataset.iloc[:, -1] true_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 1)).sum() true_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 0)).sum() false_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 0)).sum() false_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 1)).sum() f1_score = np.around(2 * true_positive / (2 * true_positive + false_positive + false_negative), decimals=2) accuracy = np.around( (true_positive + true_negative) / (true_positive + true_negative + false_positive + false_negative), decimals=2 ) dict_ftpn = {"tp": true_positive, "tn": true_negative, "fp": false_positive, "fn": false_negative} number_of_good_predictions = true_positive + true_negative number_of_false_predictions = false_positive + false_negative metrics = { "f1_score": f1_score, "accuracy": accuracy, "dict_ftpn": dict_ftpn, "number_of_predictions": len(predictions), "number_of_good_predictions": number_of_good_predictions, "number_of_false_predictions": number_of_false_predictions, } # print(" Creating the metrics done!") return metrics def create_results(forecast_values, test_dataset): forecast_series_proba = pd.Series( np.around(forecast_values, decimals=2), index=test_dataset.index, name="Probability" ) forecast_series = pd.Series((forecast_values > 0.5).astype(int), index=test_dataset.index, name="Forecast") true_series = pd.Series(test_dataset.iloc[:, -1], name="Historical", index=test_dataset.index) index_series = pd.Series(range(len(true_series)), index=test_dataset.index, name="Id") results = pd.concat([index_series, forecast_series_proba, forecast_series, true_series], axis=1) return results
import os import pathlib from taipy.config import Config, Frequency, Scope from .algos import * def build_complex_config(): ( csv_path_inp, excel_path_inp, csv_path_sum, excel_path_sum, excel_path_out, csv_path_out, ) = build_complex_required_file_paths() inp_csv_dn_1 = Config.configure_csv_data_node("dn_csv_in_1", default_path=csv_path_inp) inp_csv_dn_2 = Config.configure_csv_data_node("dn_csv_in_2", default_path=csv_path_inp) inp_excel_dn_1 = Config.configure_excel_data_node("dn_excel_in_1", default_path=excel_path_inp, sheet_name="Sheet1") inp_excel_dn_2 = Config.configure_excel_data_node("dn_excel_in_2", default_path=excel_path_inp, sheet_name="Sheet1") placeholder = Config.configure_data_node(id="dn_placeholder", default_data=10) dn_csv_sum = Config.configure_csv_data_node("dn_sum_csv", default_path=csv_path_sum) dn_excel_sum = Config.configure_excel_data_node("dn_sum_excel", default_path=excel_path_sum, sheet_name="Sheet1") dn_subtract_csv_excel = Config.configure_pickle_data_node("dn_subtract_csv_excel") dn_mult = Config.configure_pickle_data_node("dn_mult") dn_div = Config.configure_pickle_data_node("dn_div") output_csv_dn = Config.configure_csv_data_node("csv_out", csv_path_out) output_excel_dn = Config.configure_excel_data_node("excel_out", excel_path_out) task_print_csv = Config.configure_task("task_print_csv", print, input=inp_csv_dn_1) task_print_excel = Config.configure_task("task_print_excel", print, input=inp_excel_dn_1) task_sum_csv = Config.configure_task("task_sum_csv", sum, input=[inp_csv_dn_2, inp_csv_dn_1], output=dn_csv_sum) task_sum_excel = Config.configure_task( "task_sum_excel", sum, input=[inp_excel_dn_2, inp_excel_dn_1], output=dn_excel_sum, ) task_subtract_csv_excel = Config.configure_task( "task_subtract_csv_excel", subtract, input=[dn_csv_sum, dn_excel_sum], output=dn_subtract_csv_excel, ) task_insert_placeholder = Config.configure_task("task_insert_placeholder", return_a_number, output=[placeholder]) task_mult = Config.configure_task( "task_mult_by_placeholder", mult, input=[dn_subtract_csv_excel, placeholder], output=dn_mult, ) task_div = Config.configure_task("task_div_by_placeholder", divide, input=[dn_mult, placeholder], output=dn_div) task_avg_div = Config.configure_task("task_avg_div", average, input=dn_div, output=output_csv_dn) task_avg_mult = Config.configure_task("task_avg_mult", average, input=dn_mult, output=output_excel_dn) scenario_config = Config.configure_scenario( "scenario", [ task_print_csv, task_print_excel, task_sum_csv, task_sum_excel, task_subtract_csv_excel, task_insert_placeholder, task_mult, task_div, task_avg_div, task_avg_mult, ], ) return scenario_config def build_complex_required_file_paths(): csv_path_inp = "tests/shared_test_cases/data_sample/example.csv" excel_path_inp = "tests/shared_test_cases/data_sample/example.xlsx" csv_path_sum = "tests/shared_test_cases/data_sample/sum.csv" excel_path_sum = "tests/shared_test_cases/data_sample/sum.xlsx" excel_path_out = "tests/shared_test_cases/data_sample/res.xlsx" csv_path_out = "tests/shared_test_cases/data_sample/res.csv" return ( csv_path_inp, excel_path_inp, csv_path_sum, excel_path_sum, excel_path_out, csv_path_out, ) def build_churn_classification_required_file_paths(): csv_path_inp = os.path.join( pathlib.Path(__file__).parent.resolve(), "../../shared_test_cases/data_sample/churn.csv", ) return csv_path_inp def build_churn_classification_config(): csv_path_inp = build_churn_classification_required_file_paths() # path for csv and file_path for pickle initial_dataset = Config.configure_data_node( id="initial_dataset", path=csv_path_inp, storage_type="csv", has_header=True ) date_cfg = Config.configure_data_node(id="date", default_data="None") preprocessed_dataset = Config.configure_data_node( id="preprocessed_dataset", cacheable=True, validity_period=dt.timedelta(days=1) ) # the final datanode that contains the processed data train_dataset = Config.configure_data_node(id="train_dataset", cacheable=True, validity_period=dt.timedelta(days=1)) # the final datanode that contains the processed data trained_model = Config.configure_data_node(id="trained_model", cacheable=True, validity_period=dt.timedelta(days=1)) trained_model_baseline = Config.configure_data_node( id="trained_model_baseline", cacheable=True, validity_period=dt.timedelta(days=1), ) # the final datanode that contains the processed data test_dataset = Config.configure_data_node(id="test_dataset", cacheable=True, validity_period=dt.timedelta(days=1)) forecast_baseline_dataset = Config.configure_data_node( id="forecast_baseline_dataset", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) forecast_test_dataset = Config.configure_data_node( id="forecast_test_dataset", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) roc_data = Config.configure_data_node( id="roc_data", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) score_auc = Config.configure_data_node( id="score_auc", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) metrics = Config.configure_data_node( id="metrics", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) feature_importance_cfg = Config.configure_data_node(id="feature_importance", scope=Scope.SCENARIO) results = Config.configure_data_node( id="results", scope=Scope.SCENARIO, # cacheable=True, # validity_period=dt.timedelta(days=1), ) ############################################################################################################################## # Creation of the tasks ############################################################################################################################## # the task will make the link between the input data node # and the output data node while executing the function # initial_dataset --> preprocess dataset --> preprocessed_dataset task_preprocess_dataset = Config.configure_task( id="preprocess_dataset", input=[initial_dataset, date_cfg], function=preprocess_dataset, output=preprocessed_dataset, ) # preprocessed_dataset --> create train data --> train_dataset, test_dataset task_create_train_test = Config.configure_task( id="create_train_and_test_data", input=preprocessed_dataset, function=create_train_test_data, output=[train_dataset, test_dataset], ) # train_dataset --> create train_model data --> trained_model task_train_model = Config.configure_task( id="train_model", input=train_dataset, function=train_model, output=[trained_model, feature_importance_cfg], ) # train_dataset --> create train_model data --> trained_model task_train_model_baseline = Config.configure_task( id="train_model_baseline", input=train_dataset, function=train_model_baseline, output=[trained_model_baseline, feature_importance_cfg], ) # test_dataset --> forecast --> forecast_dataset task_forecast = Config.configure_task( id="predict_the_test_data", input=[test_dataset, trained_model], function=forecast, output=forecast_test_dataset, ) # test_dataset --> forecast --> forecast_dataset task_forecast_baseline = Config.configure_task( id="predict_of_baseline", input=[test_dataset, trained_model_baseline], function=forecast_baseline, output=forecast_baseline_dataset, ) task_roc = Config.configure_task( id="task_roc", input=[forecast_test_dataset, test_dataset], function=roc_from_scratch, output=[roc_data, score_auc], ) task_roc_baseline = Config.configure_task( id="task_roc_baseline", input=[forecast_baseline_dataset, test_dataset], function=roc_from_scratch, output=[roc_data, score_auc], ) task_create_metrics = Config.configure_task( id="task_create_metrics", input=[forecast_test_dataset, test_dataset], function=create_metrics, output=metrics, ) task_create_results = Config.configure_task( id="task_create_results", input=[forecast_test_dataset, test_dataset], function=create_results, output=results, ) task_create_baseline_metrics = Config.configure_task( id="task_create_baseline_metrics", input=[forecast_baseline_dataset, test_dataset], function=create_metrics, output=metrics, ) task_create_baseline_results = Config.configure_task( id="task_create_baseline_results", input=[forecast_baseline_dataset, test_dataset], function=create_results, output=results, ) ############################################################################################################################## # Creation of the scenario ############################################################################################################################## # the scenario will run the sequences scenario_cfg = Config.configure_scenario( id="churn_classification", task_configs=[ task_preprocess_dataset, task_create_train_test, task_train_model_baseline, task_train_model, task_forecast, task_roc, task_create_metrics, task_create_results, task_forecast_baseline, task_roc_baseline, task_create_metrics, task_create_results, task_create_baseline_metrics, task_create_baseline_results, ], frequency=Frequency.WEEKLY, ) return scenario_cfg
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
from taipy import Config, Core, Gui if __name__ == "__main__": core = Core() core.run() gui = Gui() gui._config._handle_argparse() print(f"Config.core.version_number: {Config.core.version_number}") print(f"Config.core.mode: {Config.core.mode}") print(f"Config.core.force: {Config.core.force}") print(f"Config.gui_config.host: {gui._config.config.get('host', None)}") print(f"Config.gui_config.port: {gui._config.config.get('port', None)}") print(f"Config.gui_config.debug: {gui._config.config.get('debug', None)}") print(f"Config.gui_config.use_reloader: {gui._config.config.get('use_reloader', None)}") print(f"Config.gui_config.ngrok_token: {gui._config.config.get('ngrok_token', None)}") print(f"Config.gui_config.webapp_path: {gui._config.config.get('webapp_path', None)}")
import argparse from taipy import Config, Core, Gui if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--mode", dest="mode", type=str, default="training") parser.add_argument("--force", type=str, default="no") parser.add_argument("--host", dest="host", type=str, default="user_default_host.com") parser.add_argument("--port", type=int, default=8080) parser.add_argument("--non-conflict-arg", type=str, default="") core = Core() core.run() gui = Gui() gui._config._handle_argparse() user_args, _ = parser.parse_known_args() print(f"Config.core.mode: {Config.core.mode}") print(f"User provided mode: {user_args.mode}") print(f"Config.core.force: {Config.core.force}") print(f"User provided force: {user_args.force}") print(f"Config.gui_config.host: {gui._config.config.get('host', None)}") print(f"User provided host: {user_args.host}") print(f"Config.gui_config.port: {gui._config.config.get('port', None)}") print(f"User provided port: {user_args.port}") print(f"User provided non-conflict-arg: {user_args.non_conflict_arg}")
import taipy as tp from taipy.gui import Gui, notify from taipy.config import Config import dask_ml.datasets import dask_ml.cluster import pandas as pd n_clusters = 3 data = dask_ml.datasets.make_blobs( n_samples=1000000, chunks=1000000, random_state=0, centers=n_clusters ) X, _ = data km = dask_ml.cluster.KMeans(n_clusters=n_clusters) km.fit(X) visual_data = pd.DataFrame( {"x": X[::1000, 0], "y": X[::1000, 1], "color": km.labels_[::1000]} ) Config.load("config.toml") scenario_object = Config.scenarios["scenario"] def on_button(state): notify(state, "info", "Running K-Means...") scenario = tp.create_scenario(scenario_object) scenario.centers.write(state.n_clusters) scenario.n_clusters.write(state.n_clusters) tp.submit(scenario) state.X = scenario.dataset.read() state.km = scenario.km.read() state.visual_data = pd.DataFrame( { "x": state.X[::1000, 0], "y": state.X[::1000, 1], "color": state.km.labels_[::1000], } ) notify(state, "success", "Done!") page = """ # Scaling K-Means with **Dask**{: .color-secondary} and **Taipy**{: .color-primary} Number of clusters: <|{n_clusters}|slider|min=1|max=10|> <|Run K-Means|button|on_action=on_button|> <|{visual_data}|chart|mode=markers|x=x|y=y|color=color|rebuild|> """ tp.Core().run() Gui(page).run()
import dask import dask_ml.datasets def generate_data(centers: int): """ Generates synthetic data for clustering. Args: - centers (int): number of clusters to generate Returns: - X (dask.array): array of shape (n_samples, n_features) """ X, _ = dask_ml.datasets.make_blobs( n_samples=1000000, chunks=1000000, random_state=0, centers=centers ) return X.persist() def fit(X: dask.array, n_clusters: int): """ Fit a k-means clustering model. Args: - X (dask.array): array of shape (n_samples, n_features) - n_clusters (int): number of clusters to fit Returns: - km (dask_ml.cluster.KMeans): k-means clustering model """ km = dask_ml.cluster.KMeans(n_clusters=n_clusters) km.fit(X) return km
#!/usr/bin/env python """The setup script.""" import json import os from pathlib import Path from setuptools import find_namespace_packages, find_packages, setup from setuptools.command.build_py import build_py readme = Path("README.md").read_text() with open(f"src{os.sep}taipy{os.sep}gui{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" requirements = [ "flask>=3.0.0,<3.1", "flask-cors>=4.0.0,<5.0", "flask-socketio>=5.3.6,<6.0", "markdown>=3.4.4,<4.0", "pandas>=2.0.0,<3.0", "python-dotenv>=1.0.0,<1.1", "pytz>=2021.3,<2022.2", "tzlocal>=3.0,<5.0", "backports.zoneinfo>=0.2.1,<0.3;python_version<'3.9'", "gevent>=23.7.0,<24.0", "gevent-websocket>=0.10.1,<0.11", "kthread>=0.2.3,<0.3", "taipy-config@git+https://git@github.com/Avaiga/taipy-config.git@develop", "gitignore-parser>=0.1,<0.2", "simple-websocket>=0.10.1,<1.0", "twisted>=23.8.0,<24.0", ] test_requirements = ["pytest>=3.8"] extras_require = { "ngrok": ["pyngrok>=5.1,<6.0"], "image": [ "python-magic>=0.4.24,<0.5;platform_system!='Windows'", "python-magic-bin>=0.4.14,<0.5;platform_system=='Windows'", ], "arrow": ["pyarrow>=10.0.1,<11.0"], } def _build_webapp(): already_exists = Path("./src/taipy/gui/webapp/index.html").exists() if not already_exists: os.system("cd frontend/taipy-gui/dom && npm ci") os.system("cd frontend/taipy-gui && npm ci --omit=optional && npm run build") class NPMInstall(build_py): def run(self): _build_webapp() build_py.run(self) setup( author="Avaiga", author_email="dev@taipy.io", python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", ], description="Low-code library to create graphical user interfaces on the Web for your Python applications.", long_description=readme, long_description_content_type="text/markdown", install_requires=requirements, license="Apache License 2.0", include_package_data=True, keywords="taipy-gui", name="taipy-gui", package_dir={"": "src"}, packages=find_namespace_packages(where="src") + find_packages(include=["taipy", "taipy.gui", "taipy.gui.*"]), test_suite="tests", tests_require=test_requirements, url="https://github.com/avaiga/taipy-gui", version=version_string, zip_safe=False, extras_require=extras_require, cmdclass={"build_py": NPMInstall}, )
# ############################################################ # Generate Python interface definition files # ############################################################ import json import os import typing as t # ############################################################ # Generate gui pyi file (gui/gui.pyi) # ############################################################ gui_py_file = "./src/taipy/gui/gui.py" gui_pyi_file = gui_py_file + "i" os.system(f"pipenv run stubgen {gui_py_file} --no-import --parse-only --export-less -o ./") from src.taipy.gui.config import Config gui_config = "".join( f", {k}: {v.__name__} = ..." if "<class" in str(v) else f", {k}: {str(v).replace('typing', 't').replace('src.taipy.gui.config.', '')} = ..." for k, v in Config.__annotations__.items() ) replaced_content = "" with open(gui_pyi_file, "r") as file: for line in file: if "def run(" in line: line = line.replace( ", run_server: bool = ..., run_in_thread: bool = ..., async_mode: str = ..., **kwargs", gui_config ) replaced_content = replaced_content + line with open(gui_pyi_file, "w") as write_file: write_file.write(replaced_content) # ############################################################ # Generate Page Builder pyi file (gui/builder/__init__.pyi) # ############################################################ builder_py_file = "./src/taipy/gui/builder/__init__.py" builder_pyi_file = builder_py_file + "i" with open("./src/taipy/gui/viselements.json", "r") as file: viselements = json.load(file) with open("./tools/builder/block.txt", "r") as file: block_template = file.read() with open("./tools/builder/control.txt", "r") as file: control_template = file.read() os.system(f"pipenv run stubgen {builder_py_file} --no-import --parse-only --export-less -o ./") with open(builder_pyi_file, "a") as file: file.write("from ._element import _Element, _Block\n") def get_properties(element, viselements) -> t.List[t.Dict[str, t.Any]]: properties = element["properties"] if "inherits" not in element: return properties for inherit in element["inherits"]: inherit_element = next((e for e in viselements["undocumented"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements["blocks"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements["controls"] if e[0] == inherit), None) if inherit_element is None: raise RuntimeError(f"Can't find element with name {inherit}") properties += get_properties(inherit_element[1], viselements) return properties def build_doc(element: t.Dict[str, t.Any]): if "doc" not in element: return "" doc = str(element["doc"]).replace("\n", f'\n{16*" "}') return f"{element['name']} ({element['type']}): {doc} {'(default: '+element['default_value'] + ')' if 'default_value' in element else ''}" for control_element in viselements["controls"]: name = control_element[0] property_list = [] property_names = [] for property in get_properties(control_element[1], viselements): if property["name"] not in property_names and "[" not in property["name"]: property_list.append(property) property_names.append(property["name"]) properties = ", ".join([f"{p} = ..." for p in property_names]) doc_arguments = f"\n{12*' '}".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, "a") as file: file.write( control_template.replace("{{name}}", name) .replace("{{properties}}", properties) .replace("{{doc_arguments}}", doc_arguments) ) for block_element in viselements["blocks"]: name = block_element[0] property_list = [] property_names = [] for property in get_properties(block_element[1], viselements): if property["name"] not in property_names and "[" not in property["name"]: property_list.append(property) property_names.append(property["name"]) properties = ", ".join([f"{p} = ..." for p in property_names]) doc_arguments = f"{8*' '}".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, "a") as file: file.write( block_template.replace("{{name}}", name) .replace("{{properties}}", properties) .replace("{{doc_arguments}}", doc_arguments) ) os.system(f"pipenv run isort {gui_pyi_file}") os.system(f"pipenv run black {gui_pyi_file}") os.system(f"pipenv run isort {builder_pyi_file}") os.system(f"pipenv run black {builder_pyi_file}")
import pytest def pytest_addoption(parser): parser.addoption("--e2e-base-url", action="store", default="/", help="base url for e2e testing") parser.addoption("--e2e-port", action="store", default="5000", help="port for e2e testing") @pytest.fixture(scope="session") def e2e_base_url(request): return request.config.getoption("--e2e-base-url") @pytest.fixture(scope="session") def e2e_port(request): return request.config.getoption("--e2e-port")
"""Unit test package for taipy."""
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import os import sys from importlib.util import find_spec from pathlib import Path import pandas as pd # type: ignore import pytest from flask import Flask, g def pytest_configure(config): if (find_spec("src") and find_spec("src.taipy")) and (not find_spec("taipy") or not find_spec("taipy.gui")): import src.taipy.gui import src.taipy.gui._renderers.builder import src.taipy.gui._warnings import src.taipy.gui.builder import src.taipy.gui.data.decimator.lttb import src.taipy.gui.data.decimator.minmax import src.taipy.gui.data.decimator.rdp import src.taipy.gui.data.decimator.scatter_decimator import src.taipy.gui.data.utils import src.taipy.gui.extension import src.taipy.gui.utils._map_dict import src.taipy.gui.utils._variable_directory import src.taipy.gui.utils.expr_var_name sys.modules["taipy.gui._warnings"] = sys.modules["src.taipy.gui._warnings"] sys.modules["taipy.gui._renderers.builder"] = sys.modules["src.taipy.gui._renderers.builder"] sys.modules["taipy.gui.utils._variable_directory"] = sys.modules["src.taipy.gui.utils._variable_directory"] sys.modules["taipy.gui.utils.expr_var_name"] = sys.modules["src.taipy.gui.utils.expr_var_name"] sys.modules["taipy.gui.utils._map_dict"] = sys.modules["src.taipy.gui.utils._map_dict"] sys.modules["taipy.gui.extension"] = sys.modules["src.taipy.gui.extension"] sys.modules["taipy.gui.data.utils"] = sys.modules["src.taipy.gui.data.utils"] sys.modules["taipy.gui.data.decimator.lttb"] = sys.modules["src.taipy.gui.data.decimator.lttb"] sys.modules["taipy.gui.data.decimator.rdp"] = sys.modules["src.taipy.gui.data.decimator.rdp"] sys.modules["taipy.gui.data.decimator.minmax"] = sys.modules["src.taipy.gui.data.decimator.minmax"] sys.modules["taipy.gui.data.decimator.scatter_decimator"] = sys.modules[ "src.taipy.gui.data.decimator.scatter_decimator" ] sys.modules["taipy.gui"] = sys.modules["src.taipy.gui"] sys.modules["taipy.gui.builder"] = sys.modules["src.taipy.gui.builder"] csv = pd.read_csv( f"{Path(Path(__file__).parent.resolve())}{os.path.sep}current-covid-patients-hospital.csv", parse_dates=["Day"] ) small_dataframe_data = {"name": ["A", "B", "C"], "value": [1, 2, 3]} @pytest.fixture(scope="function") def csvdata(): yield csv @pytest.fixture(scope="function") def small_dataframe(): yield small_dataframe_data @pytest.fixture(scope="function") def gui(helpers): from taipy.gui import Gui gui = Gui() yield gui # Delete Gui instance and state of some classes after each test gui.stop() helpers.test_cleanup() @pytest.fixture def helpers(): from .helpers import Helpers return Helpers @pytest.fixture def test_client(): flask_app = Flask("Test App") # Create a test client using the Flask application configured for testing with flask_app.test_client() as testing_client: # Establish an application context with flask_app.app_context(): g.client_id = "test client id" yield testing_client # this is where the testing happens!
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import inspect import json import logging import socket import time import typing as t import warnings from types import FrameType from taipy.gui import Gui, Html, Markdown from taipy.gui._renderers.builder import _Builder from taipy.gui._warnings import TaipyGuiWarning from taipy.gui.utils._variable_directory import _reset_name_map from taipy.gui.utils.expr_var_name import _reset_expr_var_name class Helpers: @staticmethod def test_cleanup(): _Builder._reset_key() _reset_name_map() _reset_expr_var_name() @staticmethod def test_control_md(gui: Gui, md_string: str, expected_values: t.Union[str, t.List]): gui.add_page("test", Markdown(md_string, frame=None)) Helpers._test_control(gui, expected_values) @staticmethod def test_control_html(gui: Gui, html_string: str, expected_values: t.Union[str, t.List]): gui.add_page("test", Html(html_string, frame=None)) Helpers._test_control(gui, expected_values) @staticmethod def test_control_builder(gui: Gui, builder_page, expected_values: t.Union[str, t.List]): gui.add_page("test", builder_page) Helpers._test_control(gui, expected_values) @staticmethod def _test_control(gui: Gui, expected_values: t.Union[str, t.List]): gui.run(run_server=False, single_client=True, stylekit=False) client = gui._server.test_client() response = client.get("/taipy-jsx/test") assert response.status_code == 200, f"response.status_code {response.status_code} != 200" response_data = json.loads(response.get_data().decode("utf-8", "ignore")) assert isinstance(response_data, t.Dict), "response_data is not Dict" assert "jsx" in response_data, "jsx not in response_data" jsx = response_data["jsx"] logging.getLogger().debug(jsx) if isinstance(expected_values, str): assert jsx == expected_values, f"{jsx} != {expected_values}" elif isinstance(expected_values, list): for expected_value in expected_values: assert expected_value in jsx, f"{expected_value} not in {jsx}" @staticmethod def assert_outward_ws_message(received_message, type, varname, value): assert isinstance(received_message, dict) assert "name" in received_message and received_message["name"] == "message" assert "args" in received_message args = received_message["args"] assert "type" in args and args["type"] == type assert "payload" in args payload = args["payload"][0] assert "name" in payload and varname in payload["name"] assert "payload" in payload and "value" in payload["payload"] and payload["payload"]["value"] == value logging.getLogger().debug(payload["payload"]["value"]) @staticmethod def assert_outward_simple_ws_message(received_message, type, varname, value): assert isinstance(received_message, dict) assert "name" in received_message and received_message["name"] == "message" assert "args" in received_message args = received_message["args"] assert "type" in args and args["type"] == type assert "name" in args and args["name"] == varname assert "payload" in args payload = args["payload"] assert "value" in payload and payload["value"] == value logging.getLogger().debug(payload["value"]) @staticmethod def assert_outward_ws_simple_message(received_message, aType, values): assert isinstance(received_message, dict) assert "name" in received_message and received_message["name"] == "message" assert "args" in received_message args = received_message["args"] assert "type" in args and args["type"] == aType for k, v in values.items(): assert k in args and args[k] == v logging.getLogger().debug(f"{k}: {args[k]}") @staticmethod def assert_outward_ws_multiple_message(received_message, type, array_len: int): assert isinstance(received_message, dict) assert "name" in received_message and received_message["name"] == "message" assert "args" in received_message args = received_message["args"] assert "type" in args and args["type"] == type assert "payload" in args payload = args["payload"] assert isinstance(payload, list) assert len(payload) == array_len logging.getLogger().debug(payload) @staticmethod def create_scope_and_get_sid(gui: Gui) -> str: sid = "test" gui._bindings()._get_or_create_scope(sid) return sid @staticmethod def port_check(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(1) if s.connect_ex(("127.0.0.1", 5000)) == 0: s.close() return True else: s.close() return False @staticmethod def run_e2e(gui, **kwargs): kwargs["run_in_thread"] = True kwargs["single_client"] = True kwargs["run_browser"] = False kwargs["stylekit"] = kwargs.get("stylekit", False) with warnings.catch_warnings(record=True): gui.run(**kwargs) while not Helpers.port_check(): time.sleep(0.1) @staticmethod def run_e2e_multi_client(gui: Gui): with warnings.catch_warnings(record=True): gui.run(run_server=False, run_browser=False, single_client=False, stylekit=False) gui._server.run( host=gui._get_config("host", "127.0.0.1"), port=gui._get_config("port", 5000), debug=False, use_reloader=False, flask_log=False, run_in_thread=True, allow_unsafe_werkzeug=False, notebook_proxy=False, ) while not Helpers.port_check(): time.sleep(0.1) @staticmethod def get_taipy_warnings(warns: t.List[warnings.WarningMessage]) -> t.List[warnings.WarningMessage]: return [w for w in warns if w.category is TaipyGuiWarning]
import inspect from taipy.gui import Gui, Html def test_simple_html(gui: Gui, helpers): # html_string = "<html><head></head><body><h1>test</h1><taipy:field value=\"test\"/></body></html>" html_string = "<html><head></head><body><h1>test</h1></body></html>" gui._set_frame(inspect.currentframe()) gui.add_page("test", Html(html_string)) gui.run(run_server=False) client = gui._server.test_client() jsx = client.get("/taipy-jsx/test").json["jsx"] assert jsx == "<h1>test</h1>"
import pytest from taipy.gui import Gui def test_invalid_control_name(gui: Gui, helpers): md_string = "<|invalid|invalid|>" expected_list = ["INVALID SYNTAX - Control is 'invalid'"] helpers.test_control_md(gui, md_string, expected_list) def test_value_to_negated_property(gui: Gui, helpers): md_string = "<|button|not active=true|>" expected_list = ["<Button", "active={false}"] helpers.test_control_md(gui, md_string, expected_list) def test_invalid_property_value(gui: Gui, helpers): md_string = "<|button|let's try that!|>" expected_list = ["<Button", 'label="&lt;Empty&gt;"'] helpers.test_control_md(gui, md_string, expected_list) def test_unclosed_block(gui: Gui, helpers): md_string = "<|" expected_list = ["<Part", "</Part>"] helpers.test_control_md(gui, md_string, expected_list) def test_opening_unknown_block(gui: Gui, helpers): md_string = "<|unknown" expected_list = ["<Part", 'className="unknown"'] helpers.test_control_md(gui, md_string, expected_list) def test_closing_unknown_block(gui: Gui, helpers): md_string = "|>" expected_list = ["<div>", "No matching opened tag", "</div>"] helpers.test_control_md(gui, md_string, expected_list) def test_md_link(gui: Gui, helpers): md_string = "[content](link)" expected_list = ["<a", 'href="link"', "content</a>"] helpers.test_control_md(gui, md_string, expected_list)
import pytest from taipy.gui.utils._bindings import _Bindings def test_exception_binding_twice(gui, test_client): bind = _Bindings(gui) bind._new_scopes() bind._bind("x", 10) with pytest.raises(ValueError): bind._bind("x", 10) def test_exception_binding_invalid_name(gui): bind = _Bindings(gui) bind._new_scopes() with pytest.raises(ValueError): bind._bind("invalid identifier", 10)
from email import message import pytest from taipy.gui._page import _Page def test_exception_page(gui): page = _Page() page._route = "page1" with pytest.raises(RuntimeError, match="Can't render page page1: no renderer found"): page.render(gui)
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import inspect import typing as t from pathlib import Path import pytest from taipy.gui import Gui from taipy.gui.extension import Element, ElementLibrary, ElementProperty, PropertyType def render_xhtml_4_my_library(properties: t.Dict[str, t.Any]) -> str: return f"<h1>{properties.get('value', '')}</h1>" def render_xhtml_4_my_library_fail(properties: t.Dict[str, t.Any]) -> str: return f"<h1>{properties.get('value', '')}</h1" class MyLibrary(ElementLibrary): elts = { "testinput": Element( "value", { "value": ElementProperty(PropertyType.dynamic_string, "Fred"), "multiline": ElementProperty(PropertyType.boolean, False), "broadcast": ElementProperty(PropertyType.broadcast, "broadcast"), }, "Input", ), "title": Element( "value", { "value": ElementProperty(PropertyType.string, ""), }, "h1", render_xhtml=render_xhtml_4_my_library, ), "title_fail": Element( "value", { "value": ElementProperty(PropertyType.string, ""), }, "h1", render_xhtml=render_xhtml_4_my_library_fail, ), "inner": Element( "value", {"value": ElementProperty(PropertyType.string, "")}, inner_properties={ "with_property": ElementProperty( PropertyType.react, "{<tp:prop:value>}", ), }, ), } def get_name(self) -> str: return "test_lib" def get_elements(self) -> t.Dict[str, Element]: return MyLibrary.elts def get_resource(self, name: str) -> Path: return Path(name) class MyBadLibrary(ElementLibrary): def get_name(self) -> str: return "bad name" def get_elements(self) -> t.Dict[str, Element]: return {} class MyGoodLibrary(ElementLibrary): def get_name(self) -> str: return "test_lib" def get_elements(self) -> t.Dict[str, Element]: return {} Gui.add_library(MyLibrary()) def test_lib_input_md(gui: Gui, test_client, helpers): val = "" # noqa: F841 gui._set_frame(inspect.currentframe()) md_string = "<|{val}|test_lib.testinput|multiline|>" expected_list = [ "<TestLib_Input", 'libClassName="test_lib-testinput"', "multiline={true}", 'defaultValue=""', "broadcast={_bc_broadcast}", "value={tpec_TpExPr_val_TPMDL_0}", ] helpers.test_control_md(gui, md_string, expected_list) def test_lib_xhtml_md(gui: Gui, test_client, helpers): val = "title" # noqa: F841 gui._set_frame(inspect.currentframe()) md_string = "<|{val}|test_lib.title|>" expected = [f"<h1>{val}</h1>"] helpers.test_control_md(gui, md_string, expected) def test_lib_xhtml_fail_md(gui: Gui, test_client, helpers): val = "title" # noqa: F841 gui._set_frame(inspect.currentframe()) md_string = "<|{val}|test_lib.title_fail|>" expected = ["title_fail.render_xhtml() did not return a valid XHTML string. unclosed token: line 1, column 9"] helpers.test_control_md(gui, md_string, expected) def test_lib_input_html_1(gui: Gui, test_client, helpers): val = "" # noqa: F841 gui._set_frame(inspect.currentframe()) html_string = '<test_lib:testinput value="{val}" multiline="true" />' expected_list = [ "<TestLib_Input", "multiline={true}", 'defaultValue=""', "broadcast={_bc_broadcast}", "value={tpec_TpExPr_val_TPMDL_0}", "</TestLib_Input>", ] helpers.test_control_html(gui, html_string, expected_list) def test_lib_input_html_2(gui: Gui, test_client, helpers): val = "" # noqa: F841 gui._set_frame(inspect.currentframe()) html_string = '<test_lib:testinput multiline="true">{val}</test_lib:testinput>' expected_list = [ "<TestLib_Input", "multiline={true}", 'defaultValue=""', "broadcast={_bc_broadcast}", "value={tpec_TpExPr_val_TPMDL_0}", "</TestLib_Input>", ] helpers.test_control_html(gui, html_string, expected_list) def test_lib_inner_md(gui: Gui, test_client, helpers): val = "title" # noqa: F841 gui._set_frame(inspect.currentframe()) md_string = "<|{val}|test_lib.inner|>" expected = [ "<TestLib_Inner", "value={tpec_TpExPr_val_TPMDL_0}", "withProperty={tpec_TpExPr_tpec_TpExPr_val_TPMDL_0_TPMDL_0}", ] helpers.test_control_md(gui, md_string, expected) def test_lib_inner_no_value_md(gui: Gui, test_client, helpers): gui._set_frame(inspect.currentframe()) md_string = "<|test_lib.inner|>" expected = ["<TestLib_Inner", "withProperty={tpec_TpExPr_None_TPMDL_0}"] helpers.test_control_md(gui, md_string, expected) def test_lib_bad_name(): with pytest.raises(NameError): Gui.add_library(MyBadLibrary()) def test_lib_good_name(): Gui.add_library(MyGoodLibrary()) def test_add_lib(): Gui(libraries=[MyGoodLibrary()])
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import os import tempfile from unittest.mock import patch import pytest from taipy.config import Config from taipy.config._config import _Config from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.checker._checker import _Checker from taipy.config.checker.issue_collector import IssueCollector from taipy.gui import Gui class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile("w", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, "r") as fp: return fp.read() def __del__(self): os.unlink(self.filename) def init_config(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = None Config._env_file_config = None Config._applied_config = _Config._default_config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() _Checker._checkers = [] from taipy.config import _inject_section from taipy.gui._default_config import default_config from taipy.gui._gui_section import _GuiSection _inject_section( _GuiSection, "gui_config", _GuiSection(property_list=list(default_config)), [("configure_gui", _GuiSection._configure)], add_to_unconflicted_sections=True, ) @pytest.fixture(scope="function", autouse=True) def cleanup_test(helpers): init_config() helpers.test_cleanup() yield init_config() helpers.test_cleanup() def test_gui_service_arguments_hierarchy(): # Test default configuration gui = Gui() gui.run(run_server=False) service_config = gui._config.config assert not service_config["allow_unsafe_werkzeug"] assert service_config["async_mode"] == "gevent" assert service_config["change_delay"] is None assert service_config["chart_dark_template"] is None assert service_config["dark_mode"] assert service_config["dark_theme"] is None assert not service_config["debug"] assert not service_config["extended_status"] assert service_config["favicon"] is None assert not service_config["flask_log"] assert service_config["host"] == "127.0.0.1" assert service_config["light_theme"] is None assert service_config["margin"] is None assert service_config["ngrok_token"] == "" assert service_config["notification_duration"] == 3000 assert service_config["propagate"] assert service_config["run_browser"] assert not service_config["run_in_thread"] assert not service_config["run_server"] assert not service_config["single_client"] assert not service_config["system_notification"] assert service_config["theme"] is None assert service_config["time_zone"] is None assert service_config["title"] is None assert service_config["upload_folder"] is None assert not service_config["use_arrow"] assert not service_config["use_reloader"] assert service_config["watermark"] == "Taipy inside" assert service_config["webapp_path"] is None assert service_config["port"] == 5000 gui.stop() # Override default configuration by explicit defined arguments in Gui.run() gui = Gui() gui.run(run_server=False, watermark="", host="my_host", port=5001) service_config = gui._config.config assert service_config["watermark"] == "" assert service_config["host"] == "my_host" assert service_config["port"] == 5001 gui.stop() # Override Gui.run() arguments by explicit defined arguments in Config.configure_gui() Config.configure_gui(dark_mode=False, host="my_2nd_host", port=5002) gui = Gui() gui.run(run_server=False, watermark="", host="my_host", port=5001) service_config = gui._config.config assert not service_config["dark_mode"] assert service_config["host"] == "my_2nd_host" assert service_config["watermark"] == "" assert service_config["port"] == 5002 gui.stop() # Override Config.configure_gui() arguments by loading a TOML file with [gui] section toml_config = NamedTemporaryFile( content=""" [TAIPY] [gui] host = "my_3rd_host" port = 5003 use_reloader = "true:bool" """ ) Config.load(toml_config.filename) gui = Gui() gui.run(run_server=False, host="my_host", port=5001) service_config = gui._config.config assert service_config["host"] == "my_3rd_host" assert service_config["port"] == 5003 assert service_config["use_reloader"] gui.stop() # Override TOML configuration file with CLI arguments with patch("sys.argv", ["prog", "--host", "my_4th_host", "--port", "5004", "--no-reloader", "--debug"]): gui = Gui() gui.run(run_server=False, host="my_host", port=5001) service_config = gui._config.config assert service_config["host"] == "my_4th_host" assert service_config["port"] == 5004 assert not service_config["use_reloader"] assert service_config["debug"] gui.stop() def test_clean_config(): gui_config = Config.configure_gui(dark_mode=False) assert Config.gui_config is gui_config gui_config._clean() # Check if the instance before and after _clean() is the same assert Config.gui_config is gui_config assert gui_config.dark_mode is None assert gui_config.properties == {}
import inspect import warnings import pytest from taipy.gui import Gui def test_no_ignore_file(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get("/resource.txt") assert ( response.status_code == 200 ), f"file resource.txt request status should be 200 but is {response.status_code}"
import inspect import warnings import pytest from taipy.gui import Gui def test_ignore_file_found(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get("/resource.txt") assert ( response.status_code == 404 ), f"file resource.txt request status should be 404 but is {response.status_code}" def test_ignore_file_not_found(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get("/resource2.txt") assert ( response.status_code == 200 ), f"file resource2.txt request status should be 200 but is {response.status_code}"
import inspect import time from urllib.request import urlopen from taipy.gui import Gui # this hangs in github def test_run_thread(gui: Gui, helpers): gui._set_frame(inspect.currentframe()) gui.add_page("page1", "# first page") gui.run(run_in_thread=True, run_browser=False) while not helpers.port_check(): time.sleep(0.1) assert ">first page</h1>" in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8") gui.stop() while helpers.port_check(): time.sleep(0.1) gui.run(run_in_thread=True, run_browser=False) while not helpers.port_check(): time.sleep(0.1) assert ">first page</h1>" in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8")
import pytest from taipy.gui import Gui def test_add_shared_variables(gui: Gui): Gui.add_shared_variable("var1", "var2") assert isinstance(gui._Gui__shared_variables, list) assert len(gui._Gui__shared_variables) == 2 Gui.add_shared_variables("var1", "var2") assert len(gui._Gui__shared_variables) == 2
import json from taipy.gui.gui import Gui def test_multiple_instance(): gui1 = Gui("<|gui1|>") gui2 = Gui("<|gui2|>") gui1.run(run_server=False) gui2.run(run_server=False) client1 = gui1._server.test_client() client2 = gui2._server.test_client() assert_multiple_instance(client1, 'value="gui1"') assert_multiple_instance(client2, 'value="gui2"') def assert_multiple_instance(client, expected_value): response = client.get("/taipy-jsx/TaiPy_root_page") response_data = json.loads(response.get_data().decode("utf-8", "ignore")) assert response.status_code == 200 assert isinstance(response_data, dict) assert "jsx" in response_data assert expected_value in response_data["jsx"]
from taipy.gui.utils._variable_directory import _MODULE_NAME_MAP, _variable_decode, _variable_encode def test_variable_encode_decode(): assert _variable_encode("x", "module") == "x_TPMDL_0" assert _MODULE_NAME_MAP[0] == "module" assert _variable_decode("x_TPMDL_0") == ("x", "module") assert _variable_encode("x", None) == "x" assert _variable_decode("x") == ("x", None) assert _variable_encode("TpExPr_x", "module1") == "TpExPr_x_TPMDL_1" assert _MODULE_NAME_MAP[1] == "module1" assert _variable_decode("TpExPr_x_TPMDL_1") == ("x", "module1")
import inspect import warnings from taipy.gui import Gui, Markdown, State, navigate def test_navigate(gui: Gui, helpers): def navigate_to(state: State): navigate(state, "test") with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("#This is a page")) gui.run(run_server=False) client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f"/taipy-jsx/test/?client_id={sid}") ws_client.emit("message", {"client_id": sid, "type": "A", "name": "my_button", "payload": "navigate_to"}) # assert for received message (message that would be sent to the front-end client) assert ws_client.get_received() def test_navigate_to_no_route(gui: Gui, helpers): def navigate_to(state: State): navigate(state, "toto") with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("#This is a page")) gui.run(run_server=False) client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f"/taipy-jsx/test/?client_id={sid}") ws_client.emit("message", {"client_id": sid, "type": "A", "name": "my_button", "payload": "navigate_to"}) # assert for received message (message that would be sent to the front-end client) assert not ws_client.get_received() def test_on_navigate_to_inexistant(gui: Gui, helpers): def on_navigate(state: State, page: str): return "test2" if page == "test" else page with warnings.catch_warnings(record=True) as records: gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("#This is a page")) gui.run(run_server=False) client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) client.get(f"/taipy-jsx/test?client_id={sid}") warns = helpers.get_taipy_warnings(records) assert len(warns) == 1 text = warns[0].message.args[0] if isinstance(warns[0].message, Warning) else warns[0].message assert text == 'Cannot navigate to "test2": unknown page.' def test_on_navigate_to_existant(gui: Gui, helpers): def on_navigate(state: State, page: str): return "test2" if page == "test1" else page with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.add_page("test1", Markdown("#This is a page test1")) gui.add_page("test2", Markdown("#This is a page test2")) gui.run(run_server=False) client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) content = client.get(f"/taipy-jsx/test1?client_id={sid}") assert content.status_code == 302
import inspect import pandas as pd # type: ignore from taipy.gui import Gui def test_expression_text_control_str(gui: Gui, test_client, helpers): gui._bind_var_val("x", "Hello World!") md_string = "<|{x}|>" expected_list = ["<Field", 'dataType="str"', 'defaultValue="Hello World!"', "value={tpec_TpExPr_x_TPMDL_0}"] helpers.test_control_md(gui, md_string, expected_list) def test_expression_text_control_int(gui: Gui, test_client, helpers): gui._bind_var_val("x", 10) md_string = "<|{x}|>" expected_list = ["<Field", 'dataType="int"', 'defaultValue="10"', "value={tpec_TpExPr_x_TPMDL_0}"] helpers.test_control_md(gui, md_string, expected_list) def test_expression_text_control_1(gui: Gui, test_client, helpers): gui._set_frame(inspect.currentframe()) gui._bind_var_val("x", 10) gui._bind_var_val("y", 20) md_string = "<|{x + y}|>" expected_list = [ "<Field", 'dataType="int"', 'defaultValue="30"', "value={tp_TpExPr_x_y_TPMDL_0_0}", ] helpers.test_control_md(gui, md_string, expected_list) def test_expression_text_control_2(gui: Gui, test_client, helpers): gui._set_frame(inspect.currentframe()) gui._bind_var_val("x", 10) gui._bind_var_val("y", 20) md_string = "<|x + y = {x + y}|>" expected_list = [ "<Field", 'dataType="str"', 'defaultValue="x + y = 30"', "value={tp_TpExPr_x_y_x_y_TPMDL_0_0}", ] helpers.test_control_md(gui, md_string, expected_list) def test_expression_text_control_3(gui: Gui, test_client, helpers): gui._set_frame(inspect.currentframe()) gui._bind_var_val("x", "Mickey Mouse") gui._bind_var_val("y", "Donald Duck") md_string = "<|Hello {x} and {y}|>" expected_list = [ "<Field", 'dataType="str"', 'defaultValue="Hello Mickey Mouse and Donald Duck"', "value={tp_TpExPr_Hello_x_and_y_TPMDL_0_0}", ] helpers.test_control_md(gui, md_string, expected_list) def test_expression_text_gt_operator(gui: Gui, test_client, helpers): gui._set_frame(inspect.currentframe()) gui._bind_var_val("x", 0) md_string = "<|{x > 0}|>" expected_list = [ "<Field", 'dataType="bool"', 'defaultValue="false"', "value={tp_TpExPr_x_0_TPMDL_0_0}", ] helpers.test_control_md(gui, md_string, expected_list) def test_expression_button_control(gui: Gui, test_client, helpers): gui._bind_var_val("label", "A button label") md_string = "<|button|label={label}|>" expected_list = ["<Button", 'defaultLabel="A button label"', "label={tpec_TpExPr_label_TPMDL_0}"] helpers.test_control_md(gui, md_string, expected_list) def test_expression_table_control(gui: Gui, test_client, helpers): gui._set_frame(inspect.currentframe()) gui._bind_var_val("pd", pd) gui._bind_var_val("series_1", pd.Series(["a", "b", "c"], name="Letters")) gui._bind_var_val("series_2", pd.Series([1, 2, 3], name="Numbers")) md_string = "<|{pd.concat([series_1, series_2], axis=1)}|table|columns=Letters;Numbers|>" expected_list = [ "<Table", 'defaultColumns="{&quot;Letters&quot;: &#x7B;&quot;index&quot;: 0, &quot;type&quot;: &quot;object&quot;, &quot;dfid&quot;: &quot;Letters&quot;&#x7D;, &quot;Numbers&quot;: &#x7B;&quot;index&quot;: 1, &quot;type&quot;: &quot;int&quot;, &quot;dfid&quot;: &quot;Numbers&quot;&#x7D;}"', 'updateVarName="_TpD_tp_TpExPr_pd_concat_series_1_series_2_axis_1_TPMDL_0_0"', "data={_TpD_tp_TpExPr_pd_concat_series_1_series_2_axis_1_TPMDL_0_0}", ] helpers.test_control_md(gui, md_string, expected_list) assert isinstance(gui._get_data_scope().tp_TpExPr_pd_concat_series_1_series_2_axis_1_TPMDL_0_0, pd.DataFrame) def test_lambda_expression_selector(gui: Gui, test_client, helpers): gui._bind_var_val( "lov", [ {"id": "1", "name": "scenario 1"}, {"id": "3", "name": "scenario 3"}, {"id": "2", "name": "scenario 2"}, ], ) gui._bind_var_val("sel", {"id": "1", "name": "scenario 1"}) md_string = "<|{sel}|selector|lov={lov}|type=test|adapter={lambda elt: (elt['id'], elt['name'])}|>" expected_list = [ "<Selector", 'defaultLov="[[&quot;1&quot;, &quot;scenario 1&quot;], [&quot;3&quot;, &quot;scenario 3&quot;], [&quot;2&quot;, &quot;scenario 2&quot;]]"', 'defaultValue="[&quot;1&quot;]"', 'updateVars="lov=_TpL_tpec_TpExPr_lov_TPMDL_0"', "lov={_TpL_tpec_TpExPr_lov_TPMDL_0}", 'updateVarName="_TpLv_tpec_TpExPr_sel_TPMDL_0"', "value={_TpLv_tpec_TpExPr_sel_TPMDL_0}", ] helpers.test_control_md(gui, md_string, expected_list)
import numpy as np import pandas as pd from taipy.gui.data.decimator.lttb import LTTB from taipy.gui.data.decimator.minmax import MinMaxDecimator from taipy.gui.data.decimator.rdp import RDP from taipy.gui.data.decimator.scatter_decimator import ScatterDecimator from taipy.gui.data.utils import _df_data_filter def test_data_filter_1(csvdata): df, _ = _df_data_filter(csvdata[:1500], None, "Daily hospital occupancy", "", MinMaxDecimator(100), {}, False) assert df.shape[0] == 100 def test_data_filter_2(csvdata): df, _ = _df_data_filter(csvdata[:1500], None, "Daily hospital occupancy", "", LTTB(100), {}, False) assert df.shape[0] == 100 def test_data_filter_3(csvdata): df, _ = _df_data_filter(csvdata[:1500], None, "Daily hospital occupancy", "", RDP(n_out=100), {}, False) assert df.shape[0] == 100 def test_data_filter_4(csvdata): df, _ = _df_data_filter(csvdata[:1500], None, "Daily hospital occupancy", "", RDP(epsilon=100), {}, False) assert df.shape[0] == 18 def test_data_filter_5(csvdata): df, _ = _df_data_filter( csvdata[:1500], None, "Daily hospital occupancy", "", ScatterDecimator(), {"width": 200, "height": 100}, False ) assert df.shape[0] == 1150
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import inspect import pytest from taipy.gui import Gui, Markdown from .state_asset.page1 import get_a, md_page1, set_a def test_state(gui: Gui): a = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.add_page("page1", md_page1) gui.run(run_server=False, single_client=True) state = gui._Gui__state with gui.get_flask_app().app_context(): assert state.a == 10 assert state["page1"].a == 20 assert state["tests.taipy.gui.gui_specific.state_asset.page1"].a == 20 assert state._gui == gui with pytest.raises(Exception) as e: state.b assert e.value.args[0] == "Variable 'b' is not defined." with pytest.raises(Exception) as e: state.b = 10 assert e.value.args[0] == "Variable 'b' is not accessible." with pytest.raises(Exception) as e: state._taipy_p1 assert e.value.args[0] == "Variable '_taipy_p1' is protected and is not accessible." with pytest.raises(Exception) as e: state._taipy_p1 = 10 assert e.value.args[0] == "Variable '_taipy_p1' is not accessible." assert state._get_placeholder("_taipy_p1") is None state._set_placeholder("_taipy_p1", 10) assert state._get_placeholder("_taipy_p1") == 10 assert state._get_placeholder_attrs() == ( "_taipy_p1", "_current_context", ) assert get_a(state) == 20 set_a(state, 30) assert get_a(state) == 30
import pytest from taipy.gui import Gui from taipy.gui.utils._locals_context import _LocalsContext def test_locals_context(gui: Gui): lc = _LocalsContext() gui.run(run_server=False) with gui.get_flask_app().app_context(): with pytest.raises(KeyError): lc.get_default() current_locals = locals() lc.set_default(current_locals) assert lc.get_default() == current_locals temp_locals = {"__main__": "test"} lc.add("test", temp_locals) assert lc.get_context() is None assert lc.get_locals() == current_locals with lc.set_locals_context("test"): assert lc.get_context() == "test" assert lc.get_locals() == temp_locals assert lc.get_context() is None assert lc.get_locals() == current_locals assert lc.is_default() is True assert "__main__" in lc.get_all_keys()
import inspect from taipy.gui.utils.get_module_name import _get_module_name_from_frame, _get_module_name_from_imported_var x = 10 def test_get_module_name(): assert "tests.taipy.gui.gui_specific.test_get_module_name" == _get_module_name_from_frame(inspect.currentframe()) def test_get_module_name_imported_var(): assert "tests.taipy.gui.gui_specific.test_get_module_name" == _get_module_name_from_imported_var( "x", 10, "test_get_module_name" ) assert "test_get_module_name" == _get_module_name_from_imported_var("x", 11, "test_get_module_name")
import inspect import os from pathlib import Path from taipy.gui import Gui def test_folder_pages_binding(gui: Gui): folder_path = f"{Path(Path(__file__).parent.resolve())}{os.path.sep}sample_assets" gui._set_frame(inspect.currentframe()) gui.add_pages(folder_path) gui.run(run_server=False) assert len(gui._config.routes) == 3 # 2 files -> 2 routes + 1 default route assert len(gui._config.pages) == 3 # 2 files -> 2 pages + 1 default page
import inspect import json import warnings from taipy.gui import Gui def test_render_route(gui: Gui): gui._set_frame(inspect.currentframe()) gui.add_page("page1", "# first page") gui.add_page("page2", "# second page") gui.run(run_server=False) with warnings.catch_warnings(record=True): client = gui._server.test_client() response = client.get("/taipy-init") response_data = json.loads(response.get_data().decode("utf-8", "ignore")) assert response.status_code == 200 assert isinstance(response_data, dict) assert isinstance(response_data["locations"], dict) assert "/page1" in response_data["locations"] assert "/page2" in response_data["locations"] assert "/" in response_data["locations"] assert response_data["locations"] == {"/": "/TaiPy_root_page", "/page1": "/page1", "/page2": "/page2"}
import json import pandas as pd import pytest from taipy.gui import Gui from taipy.gui.utils import _TaipyContent def test__get_real_var_name(gui: Gui): res = gui._get_real_var_name("") assert isinstance(res, tuple) assert res[0] == "" assert res[1] == "" gui.run(run_server=False) with gui.get_flask_app().app_context(): with pytest.raises(NameError): res = gui._get_real_var_name(f"{_TaipyContent.get_hash()}_var") def test__get_user_instance(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): with pytest.warns(UserWarning): gui._get_user_instance("", type(None)) def test__call_broadcast_callback(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._call_broadcast_callback(lambda s, t: t, ["Hello World"], "mine") assert res == "Hello World" with gui.get_flask_app().app_context(): with pytest.warns(UserWarning): res = gui._call_broadcast_callback(print, ["Hello World"], "mine") assert res is None def test__refresh_expr(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._refresh_expr("var", None) assert res is None def test__tbl_cols(gui: Gui): data = pd.DataFrame({"col1": [0, 1, 2], "col2": [True, True, False]}) gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._tbl_cols(True, None, json.dumps({}), json.dumps({"data": "data"}), data=data) d = json.loads(res) assert isinstance(d, dict) assert d["col1"]["type"] == "int" res = gui._tbl_cols(False, None, "", "") assert repr(res) == "Taipy: Do not update" def test__chart_conf(gui: Gui): data = pd.DataFrame({"col1": [0, 1, 2], "col2": [True, True, False]}) gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._chart_conf(True, None, json.dumps({}), json.dumps({"data": "data"}), data=data) d = json.loads(res) assert isinstance(d, dict) assert d["columns"]["col1"]["type"] == "int" res = gui._chart_conf(False, None, "", "") assert repr(res) == "Taipy: Do not update" with pytest.warns(UserWarning): res = gui._chart_conf(True, None, "", "") assert repr(res) == "Taipy: Do not update" def test__get_valid_adapter_result(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): res = gui._get_valid_adapter_result(("id", "label")) assert isinstance(res, tuple) assert res[0] == "id"
import json import warnings from types import SimpleNamespace from taipy.gui import Gui, Markdown def test_partial(gui: Gui): with warnings.catch_warnings(record=True): gui.add_partial(Markdown("#This is a partial")) gui.run(run_server=False) client = gui._server.test_client() response = client.get(f"/taipy-jsx/{gui._config.partial_routes[0]}") response_data = json.loads(response.get_data().decode("utf-8", "ignore")) assert response.status_code == 200 assert "jsx" in response_data and "This is a partial" in response_data["jsx"] def test_partial_update(gui: Gui): with warnings.catch_warnings(record=True): partial = gui.add_partial(Markdown("#This is a partial")) gui.run(run_server=False, single_client=True) client = gui._server.test_client() response = client.get(f"/taipy-jsx/{gui._config.partial_routes[0]}") response_data = json.loads(response.get_data().decode("utf-8", "ignore")) assert response.status_code == 200 assert "jsx" in response_data and "This is a partial" in response_data["jsx"] # update partial fake_state = SimpleNamespace() fake_state._gui = gui partial.update_content(fake_state, "#partial updated") # type: ignore response = client.get(f"/taipy-jsx/{gui._config.partial_routes[0]}") response_data = json.loads(response.get_data().decode("utf-8", "ignore")) assert response.status_code == 200 assert "jsx" in response_data and "partial updated" in response_data["jsx"]
from taipy.gui import Gui, Markdown def test_variable_binding(helpers): """ Tests the binding of a few variables and a function """ def another_function(gui): pass x = 10 y = 20 z = "button label" gui = Gui() gui.add_page("test", Markdown("<|{x}|> | <|{y}|> | <|{z}|button|on_action=another_function|>")) gui.run(run_server=False, single_client=True) client = gui._server.test_client() jsx = client.get("/taipy-jsx/test").json["jsx"] for expected in ["<Button", 'defaultLabel="button label"', "label={tpec_TpExPr_z_TPMDL_0}"]: assert expected in jsx assert gui._bindings().x == x assert gui._bindings().y == y assert gui._bindings().z == z with gui.get_flask_app().app_context(): assert callable(gui._get_user_function("another_function")) helpers.test_cleanup() def test_properties_binding(helpers): gui = Gui() modifier = "nice " # noqa: F841 button_properties = {"label": "A {modifier}button"} # noqa: F841 gui.add_page("test", Markdown("<|button|properties=button_properties|>")) gui.run(run_server=False) client = gui._server.test_client() jsx = client.get("/taipy-jsx/test").json["jsx"] for expected in ["<Button", 'defaultLabel="A nice button"']: assert expected in jsx helpers.test_cleanup() def test_dict_binding(helpers): """ Tests the binding of a dictionary property """ d = {"k": "test"} # noqa: F841 gui = Gui("<|{d.k}|>") gui.run(run_server=False) client = gui._server.test_client() jsx = client.get("/taipy-jsx/TaiPy_root_page").json["jsx"] for expected in ["<Field", 'defaultValue="test"']: assert expected in jsx helpers.test_cleanup()
from taipy.gui import Markdown a = 20 def get_a(state): return state.a def set_a(state, val): state.a = val md_page1 = Markdown( """ <|{a}|> """ )
import inspect import pytest from taipy.gui import Gui from taipy.gui.extension import Element, ElementLibrary class MyLibrary(ElementLibrary): def get_name(self) -> str: return "taipy_extension_example" def get_elements(self): return dict() def test_extension_no_config(gui: Gui, helpers): gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get("/taipy-extension/toto/titi") assert ret.status_code == 404 def test_extension_config_wrong_path(gui: Gui, helpers): Gui.add_library(MyLibrary()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get("/taipy-extension/taipy_extension_example/titi") assert ret.status_code == 404
import inspect import pytest from flask import g from taipy.gui import Gui def test_get_status(gui: Gui): gui.run(run_server=False) flask_client = gui._server.test_client() ret = flask_client.get("/taipy.status.json") assert ret.status_code == 200, f"status_code => {ret.status_code} != 200" assert ret.mimetype == "application/json", f"mimetype => {ret.mimetype} != application/json" assert ret.json, "json is not defined" assert "gui" in ret.json, "json has no key gui" gui = ret.json.get("gui") assert isinstance(gui, dict), "json.gui is not a dict" assert "user_status" in gui, "json.gui has no key user_status" assert gui.get("user_status") == "", "json.gui.user_status is not empty" def test_get_extended_status(gui: Gui): gui.run(run_server=False, extended_status=True) flask_client = gui._server.test_client() ret = flask_client.get("/taipy.status.json") assert ret.status_code == 200, f"status_code => {ret.status_code} != 200" assert ret.mimetype == "application/json", f"mimetype => {ret.mimetype} != application/json" assert ret.json, "json is not defined" gui = ret.json.get("gui") assert "backend_version" in gui, "json.gui has no key backend_version" assert "flask_version" in gui, "json.gui has no key flask_version" assert "frontend_version" in gui, "json.gui has no key frontend_version" assert "host" in gui, "json.gui has no key host" assert "python_version" in gui, "json.gui has no key python_version" assert "user_status" in gui, "json.gui has no key user_status" assert gui.get("user_status") == "", "json.gui.user_status is not empty" def test_get_status_with_user_status(gui: Gui): user_status = "user_status" def on_status(state): return user_status gui._set_frame(inspect.currentframe()) gui.run(run_server=False) flask_client = gui._server.test_client() ret = flask_client.get("/taipy.status.json") assert ret.status_code == 200, f"status_code => {ret.status_code} != 200" assert ret.json, "json is not defined" gui = ret.json.get("gui") assert "user_status" in gui, "json.gui has no key user_status" assert gui.get("user_status") == user_status, f'json.gui.user_status => {gui.get("user_status")} != {user_status}'
import inspect import io import pathlib import tempfile import pytest from taipy.gui import Gui from taipy.gui.data.data_scope import _DataScopes from taipy.gui.utils import _get_non_existent_file_path def test_file_upload_no_varname(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f"/taipy-uploads?client_id={sid}") assert ret.status_code == 400 def test_file_upload_no_blob(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f"/taipy-uploads?client_id={sid}", data={"var_name": "varname"}) assert ret.status_code == 400 def test_file_upload_no_filename(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() file = (io.BytesIO(b"abcdef"), "") # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) with pytest.warns(UserWarning): ret = flask_client.post(f"/taipy-uploads?client_id={sid}", data={"var_name": "varname", "blob": file}) assert ret.status_code == 400 def test_file_upload_simple(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) file_name = "test.jpg" file = (io.BytesIO(b"abcdef"), file_name) upload_path = pathlib.Path(gui._get_config("upload_folder", tempfile.gettempdir())) file_name = _get_non_existent_file_path(upload_path, file_name).name ret = flask_client.post( f"/taipy-uploads?client_id={sid}", data={"var_name": "varname", "blob": file}, content_type="multipart/form-data", ) assert ret.status_code == 200 created_file = upload_path / file_name assert created_file.exists() def test_file_upload_multi_part(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) file_name = "test2.jpg" file0 = (io.BytesIO(b"abcdef"), file_name) file1 = (io.BytesIO(b"abcdef"), file_name) upload_path = pathlib.Path(gui._get_config("upload_folder", tempfile.gettempdir())) file_name = _get_non_existent_file_path(upload_path, file_name).name ret = flask_client.post( f"/taipy-uploads?client_id={sid}", data={"var_name": "varname", "blob": file0, "total": "2", "part": "0"}, content_type="multipart/form-data", ) assert ret.status_code == 200 file0_path = upload_path / f"{file_name}.part.0" assert file0_path.exists() ret = flask_client.post( f"/taipy-uploads?client_id={sid}", data={"var_name": "varname", "blob": file1, "total": "2", "part": "1"}, content_type="multipart/form-data", ) assert ret.status_code == 200 file1_path = upload_path / f"{file_name}.part.1" assert file1_path.exists() file_path = upload_path / file_name assert file_path.exists() def test_file_upload_multiple(gui: Gui, helpers): var_name = "varname" gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with gui.get_flask_app().app_context(): gui._bind_var_val(var_name, None) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = _DataScopes._GLOBAL_ID file = (io.BytesIO(b"abcdef"), "test.jpg") ret = flask_client.post( f"/taipy-uploads?client_id={sid}", data={"var_name": var_name, "blob": file}, content_type="multipart/form-data" ) assert ret.status_code == 200 created_file = pathlib.Path(gui._get_config("upload_folder", tempfile.gettempdir())) / "test.jpg" assert created_file.exists() file2 = (io.BytesIO(b"abcdef"), "test2.jpg") ret = flask_client.post( f"/taipy-uploads?client_id={sid}", data={"var_name": var_name, "blob": file2, "multiple": "True"}, content_type="multipart/form-data", ) assert ret.status_code == 200 created_file = pathlib.Path(gui._get_config("upload_folder", tempfile.gettempdir())) / "test2.jpg" assert created_file.exists() value = getattr(gui._bindings()._get_all_scopes()[sid], var_name) assert len(value) == 2
import pathlib import pytest from taipy.gui import Gui def test_image_path_not_found(gui: Gui, helpers): gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ret = flask_client.get(f"/taipy-images/images/img.png?client_id={sid}") assert ret.status_code == 404 def test_image_path_found(gui: Gui, helpers): url = gui._get_content( "img", str((pathlib.Path(__file__).parent.parent.parent / "resources" / "fred.png").resolve()), True ) gui.run(run_server=False) flask_client = gui._server.test_client() # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ret = flask_client.get(f"{url}?client_id={sid}") assert ret.status_code == 200 def test_image_data_too_big(gui: Gui, helpers): with open((pathlib.Path(__file__).parent.parent.parent / "resources" / "taipan.jpg"), "rb") as big_file: url = gui._get_content("img", big_file.read(), True) assert not url.startswith("data:")
import inspect import pytest from taipy.gui import Gui def test_user_content_without_callback(gui: Gui, helpers): gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(gui._get_user_content_url("path")) assert ret.status_code == 404 def test_user_content_with_wrong_callback(gui: Gui, helpers): def on_user_content_cb(state, path, args): return None on_user_content = on_user_content_cb gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() with pytest.warns(UserWarning): ret = flask_client.get(gui._get_user_content_url("path", {"a": "b"})) assert ret.status_code == 404 def test_user_content_with_callback(gui: Gui, helpers): def on_user_content_cb(state, path, args): return "" on_user_content = on_user_content_cb gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() ret = flask_client.get(gui._get_user_content_url("path")) assert ret.status_code == 200
import inspect from taipy.gui import Gui, Markdown from taipy.gui.data.data_scope import _DataScopes def test_sending_messages_in_group(gui: Gui, helpers): name = "World!" # noqa: F841 btn_id = "button1" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|Hello {name}|button|id={btn_id}|>")) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = _DataScopes._GLOBAL_ID # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={cid}") assert gui._bindings()._get_all_scopes()[cid].name == "World!" # type: ignore assert gui._bindings()._get_all_scopes()[cid].btn_id == "button1" # type: ignore with gui.get_flask_app().test_request_context(f"/taipy-jsx/test/?client_id={cid}", data={"client_id": cid}): with gui as aGui: aGui._Gui__state.name = "Monde!" aGui._Gui__state.btn_id = "button2" assert gui._bindings()._get_all_scopes()[cid].name == "Monde!" assert gui._bindings()._get_all_scopes()[cid].btn_id == "button2" # type: ignore received_messages = ws_client.get_received() helpers.assert_outward_ws_multiple_message(received_messages[0], "MS", 2)
import inspect import logging import pathlib import pytest from taipy.gui import Gui, download def test_download_file(gui: Gui, helpers): def do_something(state, id): download(state, (pathlib.Path(__file__).parent.parent.parent / "resources" / "taipan.jpg")) # Bind a page so that the function will be called # gui.add_page( # "test", Markdown("<|Do something!|button|on_action=do_something|id=my_button|>") # ) # set gui frame gui._set_frame(inspect.currentframe()) gui.run(run_server=False) # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) ws_client.emit("message", {"client_id": sid, "type": "A", "name": "my_button", "payload": "do_something"}) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert len(received_messages) == 1 assert isinstance(received_messages[0], dict) assert "name" in received_messages[0] and received_messages[0]["name"] == "message" assert "args" in received_messages[0] args = received_messages[0]["args"] assert "type" in args and args["type"] == "DF" assert "content" in args and args["content"] == "/taipy-content/taipyStatic0/taipan.jpg" logging.getLogger().debug(args["content"])
import inspect from taipy.gui import Gui, Markdown def ws_u_assert_template(gui: Gui, helpers, value_before_update, value_after_update, payload): # Bind test variable var = value_before_update # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page("test", Markdown("<|{var}|>")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f"/taipy-jsx/test?client_id={sid}") assert gui._bindings()._get_all_scopes()[sid].var == value_before_update ws_client.emit("message", {"client_id": sid, "type": "U", "name": "tpec_TpExPr_var_TPMDL_0", "payload": payload}) assert gui._bindings()._get_all_scopes()[sid].var == value_after_update # assert for received message (message that would be sent to the front-end client) received_message = ws_client.get_received() assert len(received_message) helpers.assert_outward_ws_message(received_message[0], "MU", "tpec_TpExPr_var_TPMDL_0", value_after_update) def test_ws_u_string(gui: Gui, helpers): value_before_update = "a random string" value_after_update = "a random string is added" payload = {"value": value_after_update} # set gui frame gui._set_frame(inspect.currentframe()) ws_u_assert_template(gui, helpers, value_before_update, value_after_update, payload) def test_ws_u_number(gui: Gui, helpers): value_before_update = 10 value_after_update = "11" payload = {"value": value_after_update} # set gui frame gui._set_frame(inspect.currentframe()) ws_u_assert_template(gui, helpers, value_before_update, value_after_update, payload)
import inspect from taipy.gui import Gui, Markdown def test_du_table_data_fetched(gui: Gui, helpers, csvdata): # Bind test variables csvdata = csvdata # set gui frame gui._set_frame(inspect.currentframe()) Gui._set_timezone("UTC") # Bind a page so that the variable will be evaluated as expression gui.add_page( "test", Markdown( "<|{csvdata}|table|page_size=10|page_size_options=10;30;100|columns=Day;Entity;Code;Daily hospital occupancy|date_format=eee dd MMM yyyy|>" ), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={sid}") ws_client.emit( "message", { "client_id": sid, "type": "DU", "name": "_TpD_tpec_TpExPr_csvdata_TPMDL_0", "payload": { "columns": ["Day", "Entity", "Code", "Daily hospital occupancy"], "pagekey": "0-100--asc", "start": 0, "end": 9, "orderby": "", "sort": "asc", }, }, ) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert received_messages helpers.assert_outward_ws_message( received_messages[0], "MU", "_TpD_tpec_TpExPr_csvdata_TPMDL_0", { "data": [ { "Code": "AUT", "Day_str": "2020-04-01T00:00:00.000000Z", "Daily hospital occupancy": 856, "Entity": "Austria", "_tp_index": 0, }, { "Code": "AUT", "Day_str": "2020-04-02T00:00:00.000000Z", "Daily hospital occupancy": 823, "Entity": "Austria", "_tp_index": 1, }, { "Code": "AUT", "Day_str": "2020-04-03T00:00:00.000000Z", "Daily hospital occupancy": 829, "Entity": "Austria", "_tp_index": 2, }, { "Code": "AUT", "Day_str": "2020-04-04T00:00:00.000000Z", "Daily hospital occupancy": 826, "Entity": "Austria", "_tp_index": 3, }, { "Code": "AUT", "Day_str": "2020-04-05T00:00:00.000000Z", "Daily hospital occupancy": 712, "Entity": "Austria", "_tp_index": 4, }, { "Code": "AUT", "Day_str": "2020-04-06T00:00:00.000000Z", "Daily hospital occupancy": 824, "Entity": "Austria", "_tp_index": 5, }, { "Code": "AUT", "Day_str": "2020-04-07T00:00:00.000000Z", "Daily hospital occupancy": 857, "Entity": "Austria", "_tp_index": 6, }, { "Code": "AUT", "Day_str": "2020-04-08T00:00:00.000000Z", "Daily hospital occupancy": 829, "Entity": "Austria", "_tp_index": 7, }, { "Code": "AUT", "Day_str": "2020-04-09T00:00:00.000000Z", "Daily hospital occupancy": 820, "Entity": "Austria", "_tp_index": 8, }, { "Code": "AUT", "Day_str": "2020-04-10T00:00:00.000000Z", "Daily hospital occupancy": 771, "Entity": "Austria", "_tp_index": 9, }, ], "rowcount": 14477, "start": 0, "format": "JSON", }, )
import inspect import pytest from taipy.gui import Gui, Markdown def test_default_on_change(gui: Gui, helpers): st = {"d": False} def on_change(state, var, value): st["d"] = True x = 10 # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|{x}|input|>")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f"/taipy-jsx/test?client_id={sid}") # fake var update ws_client.emit("message", {"client_id": sid, "type": "U", "name": "x", "payload": {"value": "20"}}) assert ws_client.get_received() assert st["d"] is True def test_specific_on_change(gui: Gui, helpers): st = {"d": False, "s": False} def on_change(state, var, value): st["d"] = True def on_input_change(state, var, value): st["s"] = True x = 10 # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|{x}|input|on_change=on_input_change|>")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f"/taipy-jsx/test?client_id={sid}") # fake var update ws_client.emit( "message", {"client_id": sid, "type": "U", "name": "x", "payload": {"value": "20", "on_change": "on_input_change"}}, ) assert ws_client.get_received() assert st["s"] is True assert st["d"] is False
import inspect import pytest from taipy.gui import Gui, Markdown def test_ru_selector(gui: Gui, helpers, csvdata): # Bind test variables selected_val = ["value1", "value2"] # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( "test", Markdown("<|{selected_val}|selector|multiple|>"), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={sid}") ws_client.emit("message", {"client_id": sid, "type": "RU", "name": "", "payload": {"names": ["selected_val"]}}) # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() assert len(received_messages) helpers.assert_outward_ws_message(received_messages[0], "MU", "selected_val", ["value1", "value2"])
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import inspect import pytest from taipy.gui import Gui, Markdown def test_broadcast(gui: Gui, helpers): # Bind test variables selected_val = ["value1", "value2"] # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( "test", Markdown("<|{selected_val}|selector|multiple|>"), ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) sid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={sid}") gui._broadcast("broadcast_name", "broadcast_value") received_messages = ws_client.get_received() assert len(received_messages) helpers.assert_outward_simple_ws_message(received_messages[0], "U", "_bc_broadcast_name", "broadcast_value")
import inspect import time from taipy.gui import Gui, Markdown def test_a_button_pressed(gui: Gui, helpers): def do_something(state, id): state.x = state.x + 10 state.text = "a random text" x = 10 # noqa: F841 text = "hi" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) # Bind a page so that the variable will be evaluated as expression gui.add_page( "test", Markdown("<|Do something!|button|on_action=do_something|id=my_button|> | <|{x}|> | <|{text}|>") ) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) # Get the jsx once so that the page will be evaluated -> variable will be registered sid = helpers.create_scope_and_get_sid(gui) flask_client.get(f"/taipy-jsx/test?client_id={sid}") assert gui._bindings()._get_all_scopes()[sid].x == 10 # type: ignore assert gui._bindings()._get_all_scopes()[sid].text == "hi" # type: ignore ws_client.emit("message", {"client_id": sid, "type": "A", "name": "my_button", "payload": "do_something"}) assert gui._bindings()._get_all_scopes()[sid].text == "a random text" assert gui._bindings()._get_all_scopes()[sid].x == 20 # type: ignore # assert for received message (message that would be sent to the front-end client) received_messages = ws_client.get_received() helpers.assert_outward_ws_message(received_messages[0], "MU", "x", 20) helpers.assert_outward_ws_message(received_messages[1], "MU", "text", "a random text")
import inspect import warnings from flask import g from taipy.gui import Gui from taipy.gui.utils.types import _TaipyNumber def test_unbind_variable_in_expression(gui: Gui, helpers): gui.run(run_server=False, single_client=True) with warnings.catch_warnings(record=True) as records: with gui.get_flask_app().app_context(): gui._evaluate_expr("{x}") warns = helpers.get_taipy_warnings(records) assert len(warns) == 3 assert "Variable 'x' is not available in" in str(warns[0].message) assert "Variable 'x' is not defined" in str(warns[1].message) assert "Cannot evaluate expression 'x'" in str(warns[2].message) assert "name 'x' is not defined" in str(warns[2].message) def test_evaluate_same_expression_multiple_times(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): s1 = gui._evaluate_expr("x + 10 = {x + 10}") s2 = gui._evaluate_expr("x + 10 = {x + 10}") assert s1 == s2 def test_evaluate_expressions_same_variable(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): s1 = gui._evaluate_expr("x + 10 = {x + 10}") s2 = gui._evaluate_expr("x = {x}") assert "tp_TpExPr_x" in s1 and "tp_TpExPr_x" in s2 def test_evaluate_holder(gui: Gui): x = 10 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with warnings.catch_warnings(record=True): with gui.get_flask_app().app_context(): gui._evaluate_expr("{x + 10}") hash = gui._evaluate_bind_holder(_TaipyNumber, "TpExPr_x + 10_TPMDL_0") assert "_TpN_tp_TpExPr_x_10_TPMDL_0_0" in hash lst = gui._evaluate_holders("TpExPr_x + 10_TPMDL_0") assert len(lst) == 1 assert "_TpN_tp_TpExPr_x_10_TPMDL_0_0" in lst[0] # test re-evaluate holders gui._bindings().x = 20 gui._re_evaluate_expr(lst[0]) def test_evaluate_not_expression_type(gui: Gui): gui.run(run_server=False) with gui.get_flask_app().app_context(): assert "x + 10" == gui._evaluate_expr("x + 10") def test_evaluate_expression_2_clients(gui: Gui): x = 10 # noqa: F841 y = 20 # noqa: F841 gui._set_frame(inspect.currentframe()) gui.run(run_server=False) with gui.get_flask_app().app_context(): gui._bindings()._get_or_create_scope("A") gui._bindings()._get_or_create_scope("B") g.client_id = "A" gui._evaluate_expr("x + y = {x + y}") g.client_id = "B" gui._evaluate_expr("x") gui._re_evaluate_expr("x")
import inspect import pytest from taipy.gui.gui import Gui from taipy.gui.utils import _MapDict def test_map_dict(): d = {"a": 1, "b": 2, "c": 3} md = _MapDict(d) md_copy = _MapDict(d).copy() assert len(md) == 3 assert md.__getitem__("a") == d["a"] md.__setitem__("a", 4) assert md.__getitem__("a") == 4 assert d["a"] == 4 v1 = d["b"] v2 = md.pop("b") assert v1 == v2 assert "b" not in d.keys() assert "c" in md assert len(md) == 2 v1 = d["c"] v2 = md.popitem() assert v2 == ("c", v1) assert len(md) == 1 md.clear() assert len(md) == 0 assert len(d) == 0 assert len(md_copy) == 3 v1 = "" for k in md_copy: v1 += k assert v1 == "abc" v1 = "" for k in md_copy.keys(): v1 += k assert v1 == "abc" v1 = "" for k in md_copy.__reversed__(): v1 += k assert v1 == "cba" v1 = 0 for k in md_copy.values(): v1 += k assert v1 == 6 # 1+2+3 v1 = md_copy.setdefault("a", 5) assert v1 == 1 v1 = md_copy.setdefault("d", 5) assert v1 == 5 try: md = _MapDict("not_a_dict") assert False except Exception: assert True pass def test_map_dict_update(): update_values = {} def update(k, v): update_values[0] = k update_values[1] = v pass d = {"a": 1, "b": "2"} md = _MapDict(d, update) md.__setitem__("a", 3) assert update_values[0] == "a" assert update_values[1] == 3 pass def test_map_dict_update_full_dictionary_1(): values = {"a": 1, "b": 2} update_values = {"a": 3, "b": 5} md = _MapDict(values) assert md["a"] == 1 assert md["b"] == 2 md.update(update_values) assert md["a"] == 3 assert md["b"] == 5 def test_map_dict_update_full_dictionary_2(): temp_values = {} def update(k, v): temp_values[k] = v values = {"a": 1, "b": 2} update_values = {"a": 3, "b": 5} md = _MapDict(values, update) assert md["a"] == 1 assert md["b"] == 2 md.update(update_values) assert temp_values["a"] == 3 assert temp_values["b"] == 5 def test_map_dict_set(gui: Gui, test_client): d = {"a": 1} # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) with gui.get_flask_app().app_context(): assert isinstance(gui._Gui__state.d, _MapDict) gui._Gui__state.d = {"b": 2} assert isinstance(gui._Gui__state.d, _MapDict) assert len(gui._Gui__state.d) == 1 assert gui._Gui__state.d.get("a", None) is None assert gui._Gui__state.d.get("b", None) == 2 def test_map_dict_items(): def update(k, v): pass values = {"a": 1, "b": {"c": "list c"}} md = _MapDict(values) mdu = _MapDict(values, update) assert md["a"] == 1 assert isinstance(md["b"], _MapDict) assert isinstance(mdu["b"], _MapDict) assert md["b"]["c"] == "list c" assert mdu["b"]["c"] == "list c" del md["a"] with pytest.raises(KeyError): md["e"] setattr(md, "a", 1) assert md["a"] == 1
import pathlib import tempfile from taipy.gui import Gui from taipy.gui.utils import _get_non_existent_file_path def test_empty_file_name(gui: Gui, helpers): assert _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), "").name def test_non_existent_file(gui: Gui, helpers): assert not _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), "").exists() def test_existent_file(gui: Gui, helpers): file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), "") with open(file_path, "w") as file_handler: file_handler.write("hello") assert file_path.exists() file_stem = file_path.stem.split(".", 1)[0] file_suffix = file_path.suffixes[-1] index = int(file_path.suffixes[0][1:]) if len(file_path.suffixes) > 1 else -1 file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), "") assert file_path.name == f"{file_stem}.{index + 1}{file_suffix}" with open(file_path, "w") as file_handler: file_handler.write("hello 2") assert file_path.exists() file_path = _get_non_existent_file_path(pathlib.Path(tempfile.gettempdir()), "") assert file_path.name == f"{file_stem}.{index + 2}{file_suffix}"
import warnings import pytest from taipy.gui.utils.date import _string_to_date from taipy.gui.utils.types import _TaipyBase, _TaipyBool, _TaipyDate, _TaipyNumber def test_taipy_base(): tb = _TaipyBase("value", "hash") assert tb.get() == "value" assert tb.get_name() == "hash" tb.set("a value") assert tb.get() == "a value" assert tb.get_hash() == NotImplementedError def test_taipy_bool(): assert _TaipyBool(0, "v").get() is False assert _TaipyBool(1, "v").get() is True assert _TaipyBool(False, "v").get() is False assert _TaipyBool(True, "v").get() is True assert _TaipyBool("", "v").get() is False assert _TaipyBool("hey", "v").get() is True assert _TaipyBool([], "v").get() is False assert _TaipyBool(["an item"], "v").get() is True def test_taipy_number(): with pytest.raises(TypeError): _TaipyNumber("a string", "x").get() with warnings.catch_warnings(record=True): _TaipyNumber("a string", "x").cast_value("a string") _TaipyNumber(0, "x").cast_value(0) def test_taipy_date(): assert _TaipyDate(_string_to_date("2022-03-03 00:00:00 UTC"), "x").get() == "2022-03-03T00:00:00+00:00" assert _TaipyDate("2022-03-03 00:00:00 UTC", "x").get() == "2022-03-03 00:00:00 UTC" assert _TaipyDate(None, "x").get() is None _TaipyDate("", "x").cast_value("2022-03-03 00:00:00 UTC") _TaipyDate("", "x").cast_value(_string_to_date("2022-03-03 00:00:00 UTC"))
import inspect from time import sleep import pytest from taipy.gui import Gui, State, invoke_long_callback def test_long_callback(gui: Gui): status = None # noqa: F841 def heavy_function(delay=1): sleep(delay) def heavy_function_with_exception(delay=1): sleep(delay) raise Exception("Heavy function Exception") def heavy_function_status(state: State, status: int): state.status = status def on_exception(state: State, function_name: str, e: Exception): state.status = -1 gui._set_frame(inspect.currentframe()) gui.run(run_server=False, single_client=True) state = gui._Gui__state with gui.get_flask_app().app_context(): assert state.status is None invoke_long_callback(state, heavy_function) invoke_long_callback(state, heavy_function_with_exception) invoke_long_callback(state, heavy_function, (), heavy_function_status) invoke_long_callback(state, heavy_function, (2), heavy_function_status, (), 1000) invoke_long_callback(state, heavy_function_with_exception, (), heavy_function_status)
import inspect from flask import g from taipy.gui import Gui, Markdown, get_state_id def test_get_state_id(gui: Gui, helpers): name = "World!" # noqa: F841 btn_id = "button1" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|Hello {name}|button|id={btn_id}|>")) gui.run(run_server=False) flask_client = gui._server.test_client() cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={cid}") with gui.get_flask_app().app_context(): g.client_id = cid assert cid == get_state_id(gui._Gui__state)
import inspect from flask import g from taipy.gui import Gui, Markdown, State, download def test_download(gui: Gui, helpers): name = "World!" # noqa: F841 btn_id = "button1" # noqa: F841 def on_download_action(state: State): pass # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|Hello {name}|button|id={btn_id}|>")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={cid}") with gui.get_flask_app().test_request_context(f"/taipy-jsx/test/?client_id={cid}", data={"client_id": cid}): g.client_id = cid download(gui._Gui__state, "some text", "filename.txt", "on_download_action") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message( received_messages[0], "DF", {"name": "filename.txt", "onAction": "on_download_action"} )
import inspect from flask import g from taipy.gui import Gui, Markdown, navigate def test_navigate(gui: Gui, helpers): name = "World!" # noqa: F841 btn_id = "button1" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|Hello {name}|button|id={btn_id}|>")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={cid}") with gui.get_flask_app().test_request_context(f"/taipy-jsx/test/?client_id={cid}", data={"client_id": cid}): g.client_id = cid navigate(gui._Gui__state, "test") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], "NA", {"to": "test"})
import inspect from flask import g from taipy.gui import Gui, Markdown, State, invoke_callback def test_invoke_callback(gui: Gui, helpers): name = "World!" # noqa: F841 btn_id = "button1" # noqa: F841 val = 1 # noqa: F841 def user_callback(state: State): state.val = 10 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|Hello {name}|button|id={btn_id}|>\n<|{val}|>")) gui.run(run_server=False, single_client=True) flask_client = gui._server.test_client() # client id cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={cid}") with gui.get_flask_app().app_context(): g.client_id = cid invoke_callback(gui, cid, user_callback, []) assert gui._Gui__state.val == 10
import inspect from flask import g from taipy.gui import Gui, Markdown, hold_control def test_hold_control(gui: Gui, helpers): name = "World!" # noqa: F841 btn_id = "button1" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|Hello {name}|button|id={btn_id}|>")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={cid}") with gui.get_flask_app().test_request_context(f"/taipy-jsx/test/?client_id={cid}", data={"client_id": cid}): g.client_id = cid hold_control(gui._Gui__state) received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message( received_messages[0], "BL", {"action": "_taipy_on_cancel_block_ui", "message": "Work in Progress..."} )
import inspect from flask import g from taipy.gui import Gui, Markdown, resume_control def test_resume_control(gui: Gui, helpers): name = "World!" # noqa: F841 btn_id = "button1" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|Hello {name}|button|id={btn_id}|>")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={cid}") with gui.get_flask_app().test_request_context(f"/taipy-jsx/test/?client_id={cid}", data={"client_id": cid}): g.client_id = cid resume_control(gui._Gui__state) received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], "BL", {"message": None})
import inspect from flask import g from taipy.gui import Gui, Markdown, notify def test_notify(gui: Gui, helpers): name = "World!" # noqa: F841 btn_id = "button1" # noqa: F841 # set gui frame gui._set_frame(inspect.currentframe()) gui.add_page("test", Markdown("<|Hello {name}|button|id={btn_id}|>")) gui.run(run_server=False) flask_client = gui._server.test_client() # WS client and emit ws_client = gui._server._ws.test_client(gui._server.get_flask()) cid = helpers.create_scope_and_get_sid(gui) # Get the jsx once so that the page will be evaluated -> variable will be registered flask_client.get(f"/taipy-jsx/test?client_id={cid}") with gui.get_flask_app().test_request_context(f"/taipy-jsx/test/?client_id={cid}", data={"client_id": cid}): g.client_id = cid notify(gui._Gui__state, "Info", "Message") received_messages = ws_client.get_received() helpers.assert_outward_ws_simple_message(received_messages[0], "AL", {"atype": "Info", "message": "Message"})
import contextlib import time from urllib.request import urlopen import pytest from testbook import testbook @pytest.mark.filterwarnings("ignore::RuntimeWarning") @testbook("tests/taipy/gui/notebook/simple_gui.ipynb") def test_notebook_simple_gui(tb, helpers): tb.execute_cell("import") tb.execute_cell("page_declaration") tb.execute_cell("gui_init") tb.execute_cell("gui_run") while not helpers.port_check(): time.sleep(0.1) assert ">Hello</h1>" in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8") assert 'defaultValue=\\"10\\"' in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8") # Test state manipulation within notebook tb.execute_cell("get_variable") assert "10" in tb.cell_output_text("get_variable") assert 'defaultValue=\\"10\\"' in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8") tb.execute_cell("set_variable") assert 'defaultValue=\\"20\\"' in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8") tb.execute_cell("re_get_variable") assert "20" in tb.cell_output_text("re_get_variable") # Test page reload tb.execute_cell("gui_stop") with pytest.raises(Exception) as exc_info: urlopen("http://127.0.0.1:5000/taipy-jsx/page1") assert "501: Gateway error" in str(exc_info.value) tb.execute_cell("gui_re_run") while True: with contextlib.suppress(Exception): urlopen("http://127.0.0.1:5000/taipy-jsx/page1") break assert ">Hello</h1>" in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8") tb.execute_cell("gui_reload") while True: with contextlib.suppress(Exception): urlopen("http://127.0.0.1:5000/taipy-jsx/page1") break assert ">Hello</h1>" in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8") tb.execute_cell("gui_re_stop") with pytest.raises(Exception) as exc_info: urlopen("http://127.0.0.1:5000/taipy-jsx/page1") assert "501: Gateway error" in str(exc_info.value)
from taipy.gui import Gui, Markdown
import inspect from importlib import util import pytest if util.find_spec("playwright"): from playwright._impl._page import Page from taipy.gui import Gui @pytest.mark.teste2e def test_redirect(page: "Page", gui: Gui, helpers): page_md = """ <|Redirect Successfully|id=text1|> """ gui._set_frame(inspect.currentframe()) gui.add_page(name="test", page=page_md) helpers.run_e2e(gui) page.goto("./") page.expect_websocket() page.wait_for_selector("#text1") text1 = page.query_selector("#text1") assert text1.inner_text() == "Redirect Successfully"
import pytest @pytest.fixture(scope="session") def browser_context_args(browser_context_args, e2e_port, e2e_base_url): return { **browser_context_args, "base_url": f"http://127.0.0.1:{e2e_port}{e2e_base_url}", "timezone_id": "Europe/Paris", } @pytest.fixture(scope="function") def gui(helpers, e2e_base_url): from taipy.gui import Gui gui = Gui() gui.load_config({"base_url": e2e_base_url, "host": "0.0.0.0" if e2e_base_url != "/" else "127.0.0.1"}) yield gui # Delete Gui instance and state of some classes after each test gui.stop() helpers.test_cleanup()