text
stringlengths
0
5.92k
from taipy.core.config import Config, Frequency, Status import taipy as tp import datetime as dt import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 10 seconds in add function") time.sleep(10) return nb + 10 # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate") output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) def callback_scenario_state(scenario, job): """All the scenarios are subscribed to the callback_scenario_state function. It means whenever a job is done, it is called. Depending on the job and the status, it will update the message stored in a json that is then displayed on the GUI. Args: scenario (Scenario): the scenario of the job changed job (_type_): the job that has its status changed """ print(scenario.name) if job.status == tp.core.Status.COMPLETED: for data_node in job.task.output.values(): print(data_node.read()) # Configuration of scenario scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[first_task_cfg, second_task_cfg], name="my_scenario") Config.export("config_09.toml") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.subscribe(callback_scenario_state) scenario_1.submit(wait=True) tp.Rest().run()
from taipy.core.config import Config, Scope, Frequency import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): return len(df) Config.load('config_06.toml') # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_3 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2021,9,1), name="Scenario 2022/9/1") # scenario 1 and 2 belongs to the same cycle scenario_1.month.write(10) scenario_1.submit() # first task has already been executed by scenario 1 # because scenario 2 shares the same data node for this task scenario_2.submit() # every task has already been executed so everything will be skipped scenario_2.submit() # scenario 3 has no connection to the other scenarios so everything will be executed scenario_3.month.write(9) scenario_3.submit() # changing an input data node will make the task be reexecuted print("Scenario 3: change in historical data") scenario_3.historical_data.write(pd.read_csv('time_series_2.csv')) scenario_3.submit()
from taipy.core.config import Config, Frequency import taipy as tp # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): return nb + 10 # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate") output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) def compare_function(*data_node_results): # example of function compare_result = {} current_res_i = 0 for current_res in data_node_results: compare_result[current_res_i] = {} next_res_i = 0 for next_res in data_node_results: print(f"comparing result {current_res_i} with result {next_res_i}") compare_result[current_res_i][next_res_i] = next_res - current_res next_res_i += 1 current_res_i += 1 return compare_result scenario_cfg = Config.configure_scenario_from_tasks(id="multiply_scenario", name="my_scenario", task_configs=[first_task_cfg, second_task_cfg], comparators={output_cfg.id: compare_function}) Config.export("config_08.toml") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.input.write(10) scenario_2.input.write(8) scenario_1.submit() scenario_2.submit() print(tp.compare_scenarios(scenario_1, scenario_2)) tp.Rest().run()
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 10 seconds in add function") time.sleep(10) return nb + 10 Config.load('config_07.toml') Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) if __name__=="__main__": scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.submit() scenario_2.submit() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd def filter_current(df): current_month = dt.datetime.now().month df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == current_month] return df def count_values(df): return len(df) historical_data_cfg = Config.configure_csv_data_node(id="historical_data", default_path="src/time_series.csv") month_values_cfg = Config.configure_data_node(id="month_data") nb_of_values_cfg = Config.configure_data_node(id="nb_of_values") task_filter_cfg = Config.configure_task(id="filter_current", function=filter_current, input=historical_data_cfg, output=month_values_cfg) task_count_values_cfg = Config.configure_task(id="count_values", function=count_values, input=month_values_cfg, output=nb_of_values_cfg) scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[task_filter_cfg, task_count_values_cfg]) Config.export('config_03.toml') if __name__ == '__main__': tp.Core().run() scenario = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario.submit() print("Nb of values of scenario:", scenario.nb_of_values.read()) data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 10 seconds in add function") time.sleep(10) return nb + 10 Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate", default_data=21) output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) # Configuration of the pipeline and scenario scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[first_task_cfg, second_task_cfg]) Config.export("config_07.toml") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.submit() scenario_2.submit() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
from taipy.core.config import Config, Frequency, Scope import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): return len(df) Config.load('config_05.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_3 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2021,9,1), name="Scenario 2021/9/1") scenario_1.month.write(10) print("Scenario 1: month", scenario_1.month.read()) print("Scenario 2: month", scenario_2.month.read()) print("\nScenario 1: submit") scenario_1.submit() print("Value", scenario_1.nb_of_values.read()) print("\nScenario 2: first submit") scenario_2.submit() print("Value", scenario_2.nb_of_values.read()) print("Scenario 2: second submit") scenario_2.submit() print("Value", scenario_2.nb_of_values.read()) print("\nScenario 3: submit") scenario_3.month.write(9) scenario_3.submit() print("Value", scenario_3.nb_of_values.read())
from taipy.core.config import Config, Scope, Frequency import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): return len(df) historical_data_cfg = Config.configure_csv_data_node(id="historical_data", default_path="time_series.csv", scope=Scope.GLOBAL) month_cfg = Config.configure_data_node(id="month", scope=Scope.CYCLE) month_values_cfg = Config.configure_data_node(id="month_data", scope=Scope.CYCLE) nb_of_values_cfg = Config.configure_data_node(id="nb_of_values") task_filter_cfg = Config.configure_task(id="filter_by_month", function=filter_by_month, input=[historical_data_cfg, month_cfg], output=month_values_cfg, skippable=True) task_count_values_cfg = Config.configure_task(id="count_values", function=count_values, input=month_values_cfg, output=nb_of_values_cfg, skippable=True) scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[task_filter_cfg, task_count_values_cfg], frequency=Frequency.MONTHLY) Config.export('config_06.toml') if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_3 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2021,9,1), name="Scenario 2022/9/1") # scenario 1 and 2 belongs to the same cycle scenario_1.month.write(10) scenario_1.submit() # first task has already been executed by scenario 1 # because scenario 2 shares the same data node for this task scenario_2.submit() # every task has already been executed so everything will be skipped scenario_2.submit() # scenario 3 has no connection to the other scenarios so everything will be executed scenario_3.month.write(9) scenario_3.submit() # changing an input data node will make the task be reexecuted print("Scenario 3: change in historical data") scenario_3.historical_data.write(pd.read_csv('time_series_2.csv')) scenario_3.submit()
from taipy.core.config import Config, Frequency import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): return len(df) Config.load('config_04.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_1.month.write(10) scenario_2.month.write(10) print("Month Data Node of Scenario 1", scenario_1.month.read()) print("Month Data Node of Scenario 2", scenario_2.month.read()) scenario_1.submit() scenario_2.submit() print("Scenario 1 before", scenario_1.is_primary) print("Scenario 2 before", scenario_2.is_primary) tp.set_primary(scenario_2) print("Scenario 1 after", scenario_1.is_primary) print("Scenario 2 after", scenario_2.is_primary) scenario_3 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2021,9,1), name="Scenario 2022/9/1") scenario_3.month.write(9) scenario_3.submit() print("Is scenario 3 primary?", scenario_3.is_primary) data_node = None scenario = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
from taipy import Config import taipy as tp def double(nb): return nb * 2 input_data_node_cfg = Config.configure_data_node("input", default_data=21) output_data_node_cfg = Config.configure_data_node("output") task_cfg = Config.configure_task("double", double, input_data_node_cfg, output_data_node_cfg) scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[task_cfg]) Config.export('config_02.toml') if __name__ == '__main__': tp.Core().run() scenario = tp.create_scenario(scenario_cfg, name="Scenario") tp.submit(scenario) print("Output of First submit:", scenario.output.read()) print("Before write", scenario.input.read()) scenario.input.write(54) print("After write",scenario.input.read()) tp.submit(scenario) print("Second submit",scenario.output.read()) # Basic functions of Taipy Core print([s.name for s in tp.get_scenarios()]) scenario = tp.get(scenario.id) tp.delete(scenario.id) scenario = None data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
from taipy.core.config import Config, Frequency, Scope import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): return len(df) historical_data_cfg = Config.configure_csv_data_node(id="historical_data", default_path="time_series.csv", scope=Scope.GLOBAL) month_cfg = Config.configure_data_node(id="month", scope=Scope.CYCLE) month_values_cfg = Config.configure_data_node(id="month_data", scope=Scope.CYCLE) nb_of_values_cfg = Config.configure_data_node(id="nb_of_values") task_filter_cfg = Config.configure_task(id="filter_by_month", function=filter_by_month, input=[historical_data_cfg,month_cfg], output=month_values_cfg) task_count_values_cfg = Config.configure_task(id="count_values", function=count_values, input=month_values_cfg, output=nb_of_values_cfg) scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[task_filter_cfg, task_count_values_cfg], frequency=Frequency.MONTHLY) Config.export('config_05.toml') if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_3 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2021,9,1), name="Scenario 2021/9/1") scenario_1.month.write(10) print("Scenario 1: month", scenario_1.month.read()) print("Scenario 2: month", scenario_2.month.read()) print("\nScenario 1: submit") scenario_1.submit() print("Value", scenario_1.nb_of_values.read()) print("\nScenario 2: first submit") scenario_2.submit() print("Value", scenario_2.nb_of_values.read()) print("Scenario 2: second submit") scenario_2.submit() print("Value", scenario_2.nb_of_values.read()) print("\nScenario 3: submit") scenario_3.month.write(9) scenario_3.submit() print("Value", scenario_3.nb_of_values.read())
from taipy import Config import taipy as tp import pandas as pd import datetime as dt data = pd.read_csv("https://raw.githubusercontent.com/Avaiga/taipy-getting-started-core/src/daily-min-temperatures.csv") # Normal function used by Taipy def predict(historical_temperature: pd.DataFrame, date_to_forecast: str) -> float: print(f"Running baseline...") historical_temperature['Date'] = pd.to_datetime(historical_temperature['Date']) historical_same_day = historical_temperature.loc[ (historical_temperature['Date'].dt.day == date_to_forecast.day) & (historical_temperature['Date'].dt.month == date_to_forecast.month) ] return historical_same_day['Temp'].mean() # Configuration of Data Nodes historical_temperature_cfg = Config.configure_data_node("historical_temperature") date_to_forecast_cfg = Config.configure_data_node("date_to_forecast") predictions_cfg = Config.configure_data_node("predictions") # Configuration of tasks predictions_cfg = Config.configure_task("predict", predict, [historical_temperature_cfg, date_to_forecast_cfg], predictions_cfg) # Configuration of scenario scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[predictions_cfg]) Config.export('config_01.toml') if __name__ == '__main__': # Run of the Core tp.Core().run() # Creation of the scenario and execution scenario = tp.create_scenario(scenario_cfg) scenario.historical_temperature.write(data) scenario.date_to_forecast.write(dt.datetime.now()) tp.submit(scenario) print("Value at the end of task", scenario.predictions.read()) def save(state): state.scenario.historical_temperature.write(data) state.scenario.date_to_forecast.write(state.date) tp.gui.notify(state, "s", "Saved! Ready to submit") date = None scenario_md = """ <|{scenario}|scenario_selector|> <|{date}|date|on_change=save|active={scenario}|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|Refresh|button|on_action={lambda s: s.assign("scenario", scenario)}|> <|{scenario.predictions.read() if scenario else ''}|> """ tp.Gui(scenario_md).run()
from taipy import Config import taipy as tp import pandas as pd import datetime as dt data = pd.read_csv("https://raw.githubusercontent.com/Avaiga/taipy-getting-started-core/src/daily-min-temperatures.csv") # Normal function used by Taipy def predict(historical_temperature: pd.DataFrame, date_to_forecast: str) -> float: print(f"Running baseline...") historical_temperature['Date'] = pd.to_datetime(historical_temperature['Date']) historical_same_day = historical_temperature.loc[ (historical_temperature['Date'].dt.day == date_to_forecast.day) & (historical_temperature['Date'].dt.month == date_to_forecast.month) ] return historical_same_day['Temp'].mean() Config.load('config_01.toml') if __name__ == '__main__': scenario_cfg = Config.scenarios['my_scenario'] # Run of the Core tp.Core().run() # Creation of the scenario and execution scenario = tp.create_scenario(scenario_cfg) scenario.historical_temperature.write(data) scenario.date_to_forecast.write(dt.datetime.now()) tp.submit(scenario) print("Value at the end of task", scenario.predictions.read()) def save(state): state.scenario.historical_temperature.write(data) state.scenario.date_to_forecast.write(state.date) tp.gui.notify(state, "s", "Saved! Ready to submit") date = None scenario_md = """ <|{scenario}|scenario_selector|> <|{date}|date|on_change=save|active={scenario}|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|Refresh|button|on_action={lambda s: s.assign("scenario", scenario)}|> <|{scenario.predictions.read() if scenario else ''}|> """ tp.Gui(scenario_md).run() Config.load('config_01.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] # Run of the Core tp.Core().run() # Creation of the scenario and execution scenario = tp.create_scenario(scenario_cfg) tp.submit(scenario) print("Value at the end of task", scenario.output.read())
from taipy.core.config import Config, Frequency import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): return len(df) historical_data_cfg = Config.configure_csv_data_node(id="historical_data", default_path="time_series.csv") month_cfg = Config.configure_data_node(id="month") month_values_cfg = Config.configure_data_node(id="month_data") nb_of_values_cfg = Config.configure_data_node(id="nb_of_values") task_filter_cfg = Config.configure_task(id="filter_by_month", function=filter_by_month, input=[historical_data_cfg, month_cfg], output=month_values_cfg) task_count_values_cfg = Config.configure_task(id="count_values", function=count_values, input=month_values_cfg, output=nb_of_values_cfg) scenario_cfg = Config.configure_scenario_from_tasks(id="my_scenario", task_configs=[task_filter_cfg, task_count_values_cfg], frequency=Frequency.MONTHLY) Config.export('config_04.toml') if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_1.month.write(10) scenario_2.month.write(10) print("Month Data Node of Scenario 1", scenario_1.month.read()) print("Month Data Node of Scenario 2", scenario_2.month.read()) scenario_1.submit() scenario_2.submit() print("Scenario 1 before", scenario_1.is_primary) print("Scenario 2 before", scenario_2.is_primary) tp.set_primary(scenario_2) print("Scenario 1 after", scenario_1.is_primary) print("Scenario 2 after", scenario_2.is_primary) scenario_3 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2021,9,1), name="Scenario 2022/9/1") scenario_3.month.write(9) scenario_3.submit() print("Is scenario 3 primary?", scenario_3.is_primary) scenario = None data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
from taipy.core.config import Config, Scope, Frequency import taipy as tp import datetime as dt import pandas as pd import time Config.load('config_09.toml') Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): print("Wait 10 seconds") time.sleep(10) return len(df) def callback_scenario_state(scenario, job): """All the scenarios are subscribed to the callback_scenario_state function. It means whenever a job is done, it is called. Depending on the job and the status, it will update the message stored in a json that is then displayed on the GUI. Args: scenario (Scenario): the scenario of the job changed job (_type_): the job that has its status changed """ print(scenario.name) if job.status.value == 7: for data_node in job.task.output.values(): print(data_node.read()) if __name__=="__main__": # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_1.subscribe(callback_scenario_state) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
from taipy import Config import taipy as tp def double(nb): return nb * 2 Config.load('config_02.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] scenario = tp.create_scenario(scenario_cfg, name="Scenario") tp.submit(scenario) print("Output of First submit:", scenario.output.read()) print("Before write", scenario.input.read()) scenario.input.write(54) print("After write",scenario.input.read()) tp.submit(scenario) print("Second submit",scenario.output.read()) # Basic functions of Taipy Core print([s.name for s in tp.get_scenarios()]) scenario = tp.get(scenario.id) tp.delete(scenario.id) scenario = None data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd def filter_current(df): current_month = dt.datetime.now().month df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == current_month] return df def count_values(df): return len(df) Config.load('config_03.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario.submit() print("Nb of values of scenario:", scenario.nb_of_values.read()) data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
from importlib import util, import_module from pathlib import Path import sys import inspect import os import json if len(sys.argv) < 3: print("Packages should be passed as arguments after the name of the searched file.", file=sys.stderr) exit(1) else: errors = 0 file_name = sys.argv[1] result = dict() exit_code = 1 for package in sys.argv[2:]: parts = package.split(".") package_found = True for idx in range(len(parts)): if not util.find_spec(".".join(parts[0: idx+1])): package_found = False break if not package_found: print(f"Package {package} not found.", file=sys.stderr) errors += 1 else: module = import_module(package) found = False try: module_file = inspect.getfile(module) for root, dirs, files in os.walk(Path(module_file).parent.resolve()): root_path = Path(root) if file_name in files: result[package] = str((root_path / file_name).resolve()) found = True except Exception as e: print(f"Error accessing {package}: {e}.", file=sys.stderr) exit_code += 1 if not found: print(f"File {file_name} not found in Package {package}.", file=sys.stderr) errors += 1 if len(result): json.dump(result, sys.stdout) elif errors: exit(exit_code)
from taipy import Gui import cv2 face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") number_of_faces_detected = 0 selected_file = None image = None def process_image(state): img = cv2.imread(state.selected_file, cv2.IMREAD_UNCHANGED) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) state.number_of_faces_detected = len(faces) # Draw a rectangle around faces for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2) state.image = cv2.imencode(".jpg", img)[1].tobytes() content = """ <|{selected_file}|file_selector|label=Upload File|on_action=process_image|extensions=.jpg,.gif,.png|drop_message=Drop Message|> <|{image}|image|width=300px|height=300px|> <|{number_of_faces_detected} face(s) detected|> """ if __name__ == "__main__": Gui(page=content).run(dark_mode=False, port=8080)
from setuptools import find_packages, setup setup( author="You Name", author_email="your@email.domain", python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", # "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], # license="Apache License 2.0", install_requires=["taipy-gui>=2.0"], include_package_data=True, name="guiext-library", description="My taipy-gui extension demo", long_description="This package contains a demonstration of using the Taipy GUI Extension API.", keywords="taipy", packages=find_packages(include=["demo_lib", "demo_lib.*"]), version="1.0.0", zip_safe=False )
from taipy.gui import Gui from library import Library page = """ # Extension library <|library.element|> """ gui = Gui(page=page) gui.add_library(Library()) if __name__ == "__main__": # Run main app gui.run()
from pathlib import Path from taipy.gui.extension import ElementLibrary, Element, ElementProperty, PropertyType class Library(ElementLibrary): elts = { # Declare the elements of the library here, as key/value pairs of # a dictionary. # - The key is used as the element name. # - The value must be an instance of taipy.gui.extension.Element # # Ex: # "element_name": Element( # "default_property_name" # { # "property_name": ElementProperty(...) # }, # react_component="ElementComponent" # ), } def get_name(self) -> str: return "library" def get_elements(self) -> dict: return Library.elts def get_scripts(self) -> list[str]: # Only one JavaScript bundle for this library. return ["library/frontend/dist/library.js"] def get_resource(self, name: str) -> Path: return super().get_resource(name)
# Export the library class for easier access by developers using it from .library import Library
#Import modules import taipy as tp from taipy import Config, Scope, Gui import pandas as pd import numpy as np #Back-End Code #Filter function for best/worst colleges within 1 stat def filtering_college(initial_dataset: pd.DataFrame, selected_stat, ): completed_graph_dataset = initial_dataset[selected_stat] completed_graph_data = completed_graph_dataset.nlargest(10, selected_stat, keep = "all") return completed_graph_data #Data Node Creation initial_dataset_cfg = Config.configure_data_node(id="initial_dataset",storage_type="csv",path="College_Data.csv",scope=Scope.GLOBAL) selected_stat_cfg = Config.configure_data_node(id = "selected_stat", default_data = "Name", slope = Scope.GLOBAL) completed_graph_data_cfg = Config.configure_data_node(id="completed_graph_data", scope=Scope.GLOBAL) #Task Creation filtered_college_cfg = Config.configure_task(id = "filtered_college", function=filtering_college, input = [initial_dataset_cfg, selected_stat_cfg], output = [completed_graph_data_cfg]) #Pipeline Creation pipeline_cfg = Config.configure_scenario(id="pipeline",task_configs=[filtered_college_cfg]) #Scenario Creation scenario_cfg = Config.configure_scenario(id = "scenario", pipeline_configs = [pipeline_cfg]) #scenario = tp.create_scenario(scenario_cfg) #Core creation if __name__ == "__main__": tp.Core().run() #Start of Front-End Code #Callback Function def modify_df(state): scenario.selected_node.write(state.selected_stat) tp.submit(scenario) state.df = scenario.completed_graph_data_cfg.read() list_stats = ["Name","Private","Apps","Accept","Enroll","Top10perc","Top25perc","F.Undergrad","P.Undergrad","Outstate","Room.Board","Books","Personal","PhD","Terminal","S.F.Ratio","perc.alumni","Expend","Grad.Rate"] selected_stat = "Top10perc" df = pd.DataFrame(columns = ["Name", selected_stat], copy = True) #Variable Instantiation #App Creation college_stat_app = """<|{selected_stat}|selector|lov={list_stats}|on_change=modify_df|dropdown|> <|{df}|chart|x=Name|y=selected_stat|type=bar|title=College Stats|>""" #Runs the app (finally) print(selected_stat) Gui(page = college_stat_app).run()
import json def add_line(source, line, step): line = line.replace('Getting Started with Taipy GUI', 'Getting Started with Taipy GUI on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/en/latest/getting_started/src/') if line.startswith('!['): if step != 'index': line = line.replace('(', '(https://github.com/Avaiga/taipy-getting-started-gui/blob/latest/' + step + '/') else: line = line.replace('(', '(https://github.com/Avaiga/taipy-getting-started-gui/blob/latest/') # conversion of Markdown image to HTML img_src = line.split('](')[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('<div align="center">\n') source.append(f' <img src={img_src} {width}>\n') source.append('</div>\n') elif step == 'step_00' and line.startswith('from taipy'): source.append("from taipy.gui import Gui, Markdown\n") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": ['# !pip install taipy\n'] }) notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": ['## Using Notebooks\n',] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(1, 8)] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\n') cell = "markdown" for_studio = 0 for line in split_text: if cell == "markdown": line=line.replace(" ","") elif cell == "code" and (line[:4] == " " or len(line)<=1) and for_studio == 2: line=line[4:] else: for_studio = 0 if '=== "Taipy Studio' in line: for_studio = 1 if '=== "Python configuration"' in line: for_studio = 2 if for_studio != 1: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { "cells": [], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2)
from taipy.gui import Gui, notify text = "Original text" # Definition of the page page = """ # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> <|Run local|button|on_action=on_button_action|> """ def on_button_action(state): notify(state, 'info', f'The text is: {state.text}') state.text = "Button Pressed" def on_change(state, var_name, var_value): if var_name == "text" and var_value == "Reset": state.text = "" return Gui(page).run()
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ # Getting started with Taipy GUI <|layout|columns=1 1| <| My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> |> <|Table|expandable| <|{dataframe}|table|width=100%|number_format=%.2f|> |> |> <|layout|columns=1 1 1| ## Positive <|{np.mean(dataframe['Score Pos'])}|text|format=%.2f|raw|> ## Neutral <|{np.mean(dataframe['Score Neu'])}|text|format=%.2f|raw|> ## Negative <|{np.mean(dataframe['Score Neg'])}|text|format=%.2f|raw|> |> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) dataframe2 = dataframe.copy() def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text[:50], "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" path = "" treatment = 0 page_file = """ <|{path}|file_selector|extensions=.txt|label=Upload .txt file|on_action=analyze_file|> <|{f'Downloading {treatment}%...'}|> <br/> <|Table|expandable| <|{dataframe2}|table|width=100%|number_format=%.2f|> |> <br/> <|{dataframe2}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|height=600px|> """ def analyze_file(state): state.dataframe2 = dataframe2 state.treatment = 0 with open(state.path,"r", encoding='utf-8') as f: data = f.read() # split lines and eliminates duplicates file_list = list(dict.fromkeys(data.replace('\n', ' ').split(".")[:-1])) for i in range(len(file_list)): text = file_list[i] state.treatment = int((i+1)*100/len(file_list)) temp = state.dataframe2.copy() scores = analyze_text(text) temp.loc[len(temp)] = scores state.dataframe2 = temp state.path = None pages = {"/":"<|toggle|theme|>\n<center>\n<|navbar|>\n</center>", "line":page, "text":page_file} Gui(pages=pages).run()
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ # Getting started with Taipy GUI <|layout|columns=1 1| <| My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> |> <|Table|expandable| <|{dataframe}|table|width=100%|number_format=%.2f|> |> |> <|layout|columns=1 1 1| ## Positive <|{float(np.mean(dataframe['Score Pos']))}|text|format=%.2f|raw|>% ## Neutral <|{float(np.mean(dataframe['Score Neu']))}|text|format=%.2f|raw|>% ## Negative <|{float(np.mean(dataframe['Score Neg']))}|text|format=%.2f|raw|>% |> <br/> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text, "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" Gui(page).run()
from taipy.gui import Gui text = "Original text" page = """ # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> """ Gui(page).run()
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) # Torch is, for now, only available for the Python version between 3.8 and 3.10. # If you cannot install these packages, just return a dictionary of random numbers for the `analyze_text(text).` def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text, "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" page = """ <|toggle|theme|> # Getting started with Taipy GUI My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> ## Positive <|{float(np.mean(dataframe['Score Pos']))}|text|format=%.2f|>% ## Neutral <|{float(np.mean(dataframe['Score Neu']))}|text|format=%.2f|>% ## Negative <|{float(np.mean(dataframe['Score Neg']))}|text|format=%.2f|>% <|{dataframe}|table|number_format=%.2f|> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ Gui(page).run()
from taipy import Gui Gui(page="# Getting started with *Taipy*").run()
import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ <|toggle|theme|> # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> <|Analyze|button|on_action=local_callback|> <|{dataframe}|table|number_format=%.2f|> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ dataframe = pd.DataFrame({"Text":['Test', 'Other', 'Love'], "Score Pos":[1, 1, 4], "Score Neu":[2, 3, 1], "Score Neg":[1, 2, 0], "Overall":[0, -1, 4]}) def local_callback(state): notify(state, 'info', f'The text is: {state.text}') temp = state.dataframe.copy() temp.loc[len(temp)] = {"Text":state.text, "Score Pos":0, "Score Neu":0, "Score Neg":0, "Overall":0} state.dataframe = temp state.text = "" Gui(page).run()
from taipy.gui import Gui, Markdown input_pid = None navigation = [("/add_product", "Add Prodcut"), "/", "Home"] page = """ # Admin Panel ## Add Product def submit_button(state): state. <|{input_pid}|input|> <|submit|button|on_action = submit_button> """ page1 = """ ii """ Gui(page=page+page1).run(title="Go To Mall | Admin Panel", port=4000)
from taipy import Config from taipy import Core, Gui from taipy.gui import Markdown import taipy as tp from pages.home import home_md from pages.temp import temp_page def build_message(name: str): return f"Hello! {name}" input_name_data_node_cfg = Config.configure_data_node(id="input_name") message_data_node_cfg = Config.configure_data_node(id="message") build_msg_task_cfg = Config.configure_task( "buil_msg", build_message, input_name_data_node_cfg, message_data_node_cfg) scenario_cfg = Config.configure_scenario( "scenariod", task_configs=[build_msg_task_cfg]) # making GUI input_name = "M ahi" message = None def submit_scenario(state): state.scenario.input_name.write(state.input_name) state.scenario.submit() state.message = scenario.message.read() love = "sazia" page = """ Name: <|{input_name}|input|> <|submit|button|on_action=submit_scenario|> Message: <|{message}|text|> Kima : <|All world are need to safe|text|> """ pages = { "/": home_md, "temp": temp_page, } if __name__ == "__main__": Core().run() # mange scenarios and data scenario = tp.create_scenario(scenario_cfg) # instance of run gui Gui(pages=pages).run(title="Mahi Template", port=5000, favicon="https://www.youtube.com/s/gaming/emoji/7ff574f2/emoji_u1f602.png", )
from taipy.gui import Gui, Markdown name = "maho" ... page = """ ... <|{dialog_is_visible}|dialog| Enter a name: <|{name}|input|> |> ... """ ... pages = { "/" : page, 'page1': Markdown("# My first page"), 'page2': Markdown("# My second page") } Gui(pages=pages).run(title="Mahi App",port=5001 )
from taipy.gui import Markdown, Gui text = "Welcome to home page" mahi_text = "So how are you" home_md = Markdown(""" # **Home** <|{text}|> <br/> <|{mahi_text}|> """)
from taipy.gui import Gui, Markdown def fahren_to_celcius(fahr): return (fahr-32)*5/9 fahr = 100 celcious = fahren_to_celcius(fahr) temp_page = Markdown(""" # **Home** Fahrenheit : <|{fahr}|> Converted Celcius : <|{celcious}|> """)
from taipy.gui import Gui, Markdown, notify value = 0 single_page = Markdown(""" # Taipy Application Check the documentation [here](https://docs.taipy.io/en/latest/manuals/about/). <|{value}|slider|on_change=on_slider|> <|Push|button|on_action=on_push|> """) def on_push(state): ... def on_slider(state): if state.value == 100: notify(state, "success", "Taipy is running!") def on_change(state, var_name:str, var_value): ... if __name__ == "__main__": gui = Gui(single_page) gui.run()
from taipy.gui import Gui from taipy.config import Config from pages.root.root import * from pages.page_1.page_1 import page_1_md from pages.page_2.page_2 import page_2_md Config.load("config/config.toml") def on_change(state, var_name:str, var_value): ... pages = {"/":root_md, "page_1":page_1_md, "page_2":page_2_md} if __name__ == "__main__": gui = Gui(pages=pages) gui.run()
from taipy.config import Config
import pandas as pd import numpy as np from sklearn.linear_model import LinearRegression def clean_data(data): ... return data.dropna().drop_duplicates() def predict(data): model = LinearRegression() model.fit(data[["x"]], data[["y"]]) data["y_pred"] = model.predict(data[["x"]]) return data def evaluate(data): ... return np.random.rand()
from taipy.gui import Markdown import pandas as pd scenario = None results = None def show_results(state): state.results = state.scenario.predictions.read() page_1_md = Markdown("pages/page_1/page_1.md")
from taipy.gui import Markdown root_md = Markdown("pages/root/root.md")
from taipy.gui import Markdown import pandas as pd path = None data = None def drop_csv(state): state.data = pd.read_csv(state.path) page_2_md = Markdown("pages/page_2/page_2.md")
from taipy.gui import Gui, Markdown, notify from pages.root.root import * from pages.page_1.page_1 import page_1_md from pages.page_2.page_2 import page_2_md def on_change(state, var_name:str, var_value): ... pages = {"/":root_md, "page_1":page_1_md, "page_2":page_2_md} if __name__ == "__main__": gui = Gui(pages=pages) gui.run()
from taipy.gui import Markdown page_1_md = Markdown("pages/page_1/page_1.md")
from taipy.gui import Markdown root_md = Markdown("pages/root/root.md")
from taipy.gui import Markdown page_2_md = Markdown("pages/page_2/page_2.md")
from taipy.gui import Gui from pages import home import os gui = Gui(page=home.page).run( title="Demo Logistic Regression", port=os.environ.get("PORT", "8000"), )
from config.nodes import ( node_initial_dataset, node_prediction, node_prediction_model, node_X, node_Y, ) from models.data import make_X, make_Y from models.predict import train, predict from taipy import Config task_make_X = Config.configure_task( id="make_X", input=[node_initial_dataset], output=node_X, function=make_X, ) task_make_Y = Config.configure_task( id="make_Y", input=[node_initial_dataset], output=node_Y, function=make_Y, ) task_train = Config.configure_task( id="train", input=[node_X, node_Y], output=node_prediction_model, function=train ) task_predict = Config.configure_task( id="predict", input=[node_X, node_Y], output=node_prediction, function=predict )
from taipy import Config node_initial_dataset = Config.configure_data_node(id="initial_dataset") node_X = Config.configure_data_node(id="X") node_Y = Config.configure_data_node(id="Y") node_prediction_model = Config.configure_data_node(id="prediction_model") node_prediction = Config.configure_data_node(id="prediction")
from taipy import Config from config.tasks import task_make_X, task_make_Y, task_train, task_predict pipeline_train = Config.configure_pipeline( id="train", task_configs=[task_make_X, task_make_Y, task_train] ) pipeline_predict = Config.configure_pipeline(id="predict", task_configs=[task_predict])
from sklearn.linear_model import LogisticRegression def train(X, Y): X_train, Y_train = X[:50], Y[:50] X_test, Y_test = X[50:], Y[50:] # Using scikit-learn default regression = LogisticRegression(random_state=0).fit(X_train, Y_train) print(f"intercept: {regression.intercept_} coefficients: {regression.coef_}") print(f"train accuracy: {regression.score(X_train, Y_train)}") print(f"test accuracy: {regression.score(X_test, Y_test)}") return regression def predict(x, regression: LogisticRegression): return regression.predict(x)
import numpy as np # Set seed for random number generator rg = np.random.default_rng(seed=0) # Create an array with 500 rows and 3 columns. # This will serve as initial data node initial_dataset = rg.normal(size=(500, 3)) def make_X(dataset): # Remove the first column which can be considered as noise X1 = np.delete(dataset, 0, axis=1) # Now create two more columns correlated with X1 X2 = X1 + 0.1 * np.random.normal(size=(500, 2)) X = np.concatenate((X1, X2), axis=1) return X def make_Y(dataset): P = 1 / (1 + np.e ** (-np.matmul(dataset, [1, 1, 1]))) Y = P > 0.5 return Y
from taipy.gui import Markdown import taipy as tp from taipy.core.job.job import Job from config.pipelines import pipeline_train from models.data import initial_dataset def job_status_changed(pipeline, job: Job): print(job.status) def training_button_clicked(state, id, action): pipeline = tp.create_pipeline(pipeline_train) # Set initial dataset: pipeline.initial_dataset.write(initial_dataset) tp.subscribe_pipeline( pipeline=pipeline, callback=job_status_changed, ) tp.submit(pipeline) page = Markdown("src/pages/home.md")
from taipy import Gui page = """ # Hello World 🌍 with *Taipy*This is my first Taipy test app. And it is running fine! """ Gui(page).run(use_reloader=True) # use_reloader=True if you are in development
from taipy import Gui from page.dashboard_fossil_fuels_consumption import * if __name__ == "__main__": Gui(page).run( use_reloader=True, title="Test", dark_mode=False, ) # use_reloader=True if you are in development
import pandas as pd import taipy as tp from data.data import dataset_fossil_fuels_gdp country = "Spain" region = "Europe" lov_region = list(dataset_fossil_fuels_gdp.Entity.unique()) def load_dataset(_country): """Load dataset for a specific country. Args: _country (str): The name of the country. Returns: pandas.DataFrame: A DataFrame containing the fossil fuels GDP data for the specified country. """ dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp.reset_index() dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp_cp[ dataset_fossil_fuels_gdp["Entity"] == _country ] return dataset_fossil_fuels_gdp_cp dataset_fossil_fuels_gdp_cp = load_dataset(country) def on_change_country(state): """Update the dataset based on the selected country. Args: state (object): The "state" of the variables ran by the program (value changes through selectors) Returns: None """ print("country is:", state.country) _country = state.country dataset_fossil_fuels_gdp_cp = load_dataset(_country) state.dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp_cp layout = {"yaxis": {"range": [0, 100000]}, "xaxis": {"range": [1965, 2021]}} page = """ # Fossil Fuel consumption by per capita by country* Data comes from <a href="https://ourworldindata.org/grapher/per-capita-fossil-energy-vs-gdp" target="_blank">Our World in Data</a> <|{country}|selector|lov={lov_region}|on_change=on_change_country|dropdown|label=Country/Region|> <|{dataset_fossil_fuels_gdp_cp}|chart|type=plot|x=Year|y=Fossil fuels per capita (kWh)|height=200%|layout={layout}|> ## Fossil fuel per capita for <|{country}|>: <|{dataset_fossil_fuels_gdp_cp}|table|height=400px|width=95%|> """
import pandas as pd dataset_fossil_fuels_gdp = pd.read_csv("data/per-capita-fossil-energy-vs-gdp.csv") country_codes = pd.read_csv("./data/country_codes.csv") dataset_fossil_fuels_gdp = dataset_fossil_fuels_gdp.merge( country_codes[["alpha-3", "region"]], how="left", left_on="Code", right_on="alpha-3" ) dataset_fossil_fuels_gdp = dataset_fossil_fuels_gdp[ ~dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"].isnull() ].reset_index() dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"] = ( dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"] * 1000 )
# This is a sample Python script. # Press Maj+F10 to execute it or replace it with your code. # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. def print_hi(name): # Use a breakpoint in the code line below to debug your script. print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint. # Press the green button in the gutter to run the script. if __name__ == '__main__': print_hi('PyCharm') # See PyCharm help at https://www.jetbrains.com/help/pycharm/
from taipy.gui import Gui, notify from taipy import Config import pandas as pd tasks = pd.DataFrame({ "Type":[], "Name":[], "Completed":[] }) tasks["Completed"] = tasks["Completed"].astype("bool") task_name="" task_type="" page = """ # TODO Schedular Enter Task: <|{task_name}|input|> Type: <|{task_type}|selector|lov=Personal;Home;Work|dropdown|> <|Add Task|button|on_action=on_task_add|> <|{tasks}|table|filter|editable|editable[Type]=False|on_edit=on_task_edit|on_delete=on_task_delete|style=style_completed|> """ def style_completed(_1, _2, values): if(values["Completed"]): return "strikeout" def on_task_edit(state, var_name, payload): if(var_name == "tasks"): index = payload["index"] col = payload["col"] value = payload["user_value"] new_tasks = state.tasks.copy() new_tasks.loc[index, col] = value state.tasks = new_tasks notify(state, "I", "Task Updated.") def on_task_delete(state, var_name, payload): if(var_name == "tasks"): index = payload["index"] state.tasks = state.tasks.drop(index=index) notify(state, "E", "Task Deleted.") def on_task_add(state, var_name, payload): if(state.task_name == "" or state.task_type == ""): notify(state, "E", "Task Name or Task Type Not Set.") return False _task_type = state.task_type _task_name = state.task_name _isCompleted = False new_data = pd.DataFrame([[_task_type, _task_name, _isCompleted]], columns=state.tasks.columns) state.tasks = pd.concat([new_data, state.tasks], axis=0, ignore_index=True) notify(state, "S", "New Task Added Successfully.") Gui(page, css_file="todo.css").run(use_reloader=True)
#!/usr/bin/env python """The setup script.""" import json import os from setuptools import find_namespace_packages, find_packages, setup with open("README.md") as readme_file: readme = readme_file.read() with open(f"src{os.sep}taipy{os.sep}config{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" requirements = ["toml>=0.10,<0.11", "deepdiff>=6.2,<6.3"] test_requirements = ["pytest>=3.8"] setup( author="Avaiga", author_email="dev@taipy.io", python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], description="A Taipy package dedicated to easily configure a Taipy application.", install_requires=requirements, long_description=readme, long_description_content_type="text/markdown", include_package_data=True, license="Apache License 2.0", keywords="taipy-config", name="taipy-config", package_dir={"": "src"}, packages=find_namespace_packages(where="src") + find_packages(include=["taipy", "taipy.config", "taipy.config.*", "taipy.logger", "taipy.logger.*"]), test_suite="tests", tests_require=test_requirements, url="https://github.com/avaiga/taipy-config", version=version_string, zip_safe=False, )
import ast import re from pathlib import Path from typing import List def _get_function_delimiters(initial_line, lines): begin = end = initial_line while True: if lines[begin - 1] == "\n": break begin -= 1 if lines[end].endswith("(\n"): while ":\n" not in lines[end]: end += 1 if '"""' in lines[end + 1]: while True: if '"""\n' in lines[end]: break end += 1 return begin, end + 1 def _get_file_lines(filename: str) -> List[str]: # Get file lines for later with open(filename) as f: return f.readlines() def _get_file_ast(filename: str): # Get raw text and build ast _config = Path(filename) _tree = _config.read_text() return ast.parse(_tree) def _build_base_config_pyi(filename, base_pyi): lines = _get_file_lines(filename) tree = _get_file_ast(filename) class_lineno = [f.lineno for f in ast.walk(tree) if isinstance(f, ast.ClassDef) and f.name == "Config"] begin_class, end_class = _get_function_delimiters(class_lineno[0] - 1, lines) base_pyi += "".join(lines[begin_class:end_class]) functions = [f.lineno for f in ast.walk(tree) if isinstance(f, ast.FunctionDef) and not f.name.startswith("__")] for ln in functions: begin_line, end_line = _get_function_delimiters(ln - 1, lines) base_pyi += "".join(lines[begin_line:end_line]) base_pyi = __add_docstring(base_pyi, lines, end_line) base_pyi += "\n" return base_pyi def __add_docstring(base_pyi, lines, end_line): if '"""' not in lines[end_line - 1]: base_pyi += '\t\t""""""\n'.replace("\t", " ") return base_pyi def _build_entity_config_pyi(base_pyi, filename, entity_map): lines = _get_file_lines(filename) tree = _get_file_ast(filename) functions = {} for f in ast.walk(tree): if isinstance(f, ast.FunctionDef): if "_configure" in f.name and not f.name.startswith("__"): functions[f.name] = f.lineno elif "_set_default" in f.name and not f.name.startswith("__"): functions[f.name] = f.lineno elif "_add" in f.name and not f.name.startswith("__"): functions[f.name] = f.lineno for k, v in functions.items(): begin_line, end_line = _get_function_delimiters(v - 1, lines) try: func = "".join(lines[begin_line:end_line]) func = func if not k.startswith("_") else func.replace(k, entity_map.get(k)) func = __add_docstring(func, lines, end_line) + "\n" base_pyi += func except Exception: print(f"key={k}") raise return base_pyi def _generate_entity_and_property_maps(filename): entities_map = {} property_map = {} entity_tree = _get_file_ast(filename) functions = [ f for f in ast.walk(entity_tree) if isinstance(f, ast.Call) and getattr(f.func, "id", "") == "_inject_section" ] for f in functions: entity = ast.unparse(f.args[0]) entities_map[entity] = {} property_map[eval(ast.unparse(f.args[1]))] = entity # Remove class name from function map text = ast.unparse(f.args[-1]).replace(f"{entity}.", "") matches = re.findall(r"\((.*?)\)", text) for m in matches: v, k = m.replace("'", "").split(",") entities_map[entity][k.strip()] = v return entities_map, property_map def _generate_acessors(base_pyi, property_map): for property, cls in property_map.items(): return_template = f"Dict[str, {cls}]" if property != "job_config" else f"{cls}" template = ("\t@_Classproperty\n" + f'\tdef {property}(cls) -> {return_template}:\n\t\t""""""\n').replace( "\t", " " ) base_pyi += template + "\n" return base_pyi def _build_header(filename): _file = Path(filename) return _file.read_text() + "\n\n" if __name__ == "__main__": header_file = "stubs/pyi_header.py" config_init = Path("taipy-core/src/taipy/core/config/__init__.py") base_config = "src/taipy/config/config.py" dn_filename = "taipy-core/src/taipy/core/config/data_node_config.py" job_filename = "taipy-core/src/taipy/core/config/job_config.py" scenario_filename = "taipy-core/src/taipy/core/config/scenario_config.py" task_filename = "taipy-core/src/taipy/core/config/task_config.py" migration_filename = "taipy-core/src/taipy/core/config/migration_config.py" core_filename = "taipy-core/src/taipy/core/config/core_section.py" entities_map, property_map = _generate_entity_and_property_maps(config_init) pyi = _build_header(header_file) pyi = _build_base_config_pyi(base_config, pyi) pyi = _generate_acessors(pyi, property_map) pyi = _build_entity_config_pyi(pyi, scenario_filename, entities_map["ScenarioConfig"]) pyi = _build_entity_config_pyi(pyi, dn_filename, entities_map["DataNodeConfig"]) pyi = _build_entity_config_pyi(pyi, task_filename, entities_map["TaskConfig"]) pyi = _build_entity_config_pyi(pyi, job_filename, entities_map["JobConfig"]) pyi = _build_entity_config_pyi(pyi, migration_filename, entities_map["MigrationConfig"]) pyi = _build_entity_config_pyi(pyi, core_filename, entities_map["CoreSection"]) with open("src/taipy/config/config.pyi", "w") as f: f.writelines(pyi)
import json from typing import Any, Callable, Dict, List, Optional, Union from datetime import timedelta from taipy.core.config import DataNodeConfig, JobConfig, ScenarioConfig, TaskConfig, MigrationConfig, CoreSection from .checker.issue_collector import IssueCollector from .common._classproperty import _Classproperty from .common._config_blocker import _ConfigBlocker from .common.frequency import Frequency from .common.scope import Scope from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import os import pathlib from unittest import TestCase, mock from src.taipy.logger._taipy_logger import _TaipyLogger class TestTaipyLogger(TestCase): def test_taipy_logger(self): _TaipyLogger._get_logger().info("baz") _TaipyLogger._get_logger().debug("qux") def test_taipy_logger_configured_by_file(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), "logger.conf") with mock.patch.dict(os.environ, {"TAIPY_LOGGER_CONFIG_PATH": path}): _TaipyLogger._get_logger().info("baz") _TaipyLogger._get_logger().debug("qux")
import os import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.named_temporary_file import NamedTemporaryFile config_from_filename = NamedTemporaryFile( """ [TAIPY] custom_property_not_overwritten = true custom_property_overwritten = 10 """ ) config_from_environment = NamedTemporaryFile( """ [TAIPY] custom_property_overwritten = 11 """ ) def test_load_from_environment_overwrite_load_from_filename(): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 11 os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) def test_block_load_from_environment_overwrite_load_from_filename(): Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 # The Config.load is failed to override
import pytest from src.taipy.config._config import _Config from src.taipy.config._config_comparator._config_comparator import _ConfigComparator from src.taipy.config._serializer._toml_serializer import _TomlSerializer from src.taipy.config.checker.issue_collector import IssueCollector from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest @pytest.fixture(scope="function", autouse=True) def reset(): reset_configuration_singleton() register_test_sections() def reset_configuration_singleton(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = _Config() Config._env_file_config = _Config() Config._applied_config = _Config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() Config._comparator = _ConfigComparator() def register_test_sections(): Config._register_default(UniqueSectionForTest("default_attribute")) Config.configure_unique_section_for_tests = UniqueSectionForTest._configure Config.unique_section_name = Config.unique_sections[UniqueSectionForTest.name] Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop="default_prop", prop_int=0)) Config.configure_section_for_tests = SectionForTest._configure Config.section_name = Config.sections[SectionForTest.name]
import os from unittest import mock import pytest from src.taipy.config.exceptions.exceptions import InvalidConfigurationId from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class WrongUniqueSection(UniqueSectionForTest): name = "1wrong_id" class WrongSection(SectionForTest): name = "correct_name" def test_section_uses_valid_id(): with pytest.raises(InvalidConfigurationId): WrongUniqueSection(attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("wrong id", attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("1wrong_id", attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("wrong_@id", attribute="foo") def test_templated_properties_are_replaced(): with mock.patch.dict(os.environ, {"foo": "bar", "baz": "1"}): u_sect = UniqueSectionForTest(attribute="attribute", tpl_property="ENV[foo]") assert u_sect.tpl_property == "bar" sect = SectionForTest(id="my_id", attribute="attribute", tpl_property="ENV[baz]:int") assert sect.tpl_property == 1
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
from src.taipy.config.config import Config from src.taipy.config.global_app.global_app_config import GlobalAppConfig from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 1 assert default_config._unique_sections[UniqueSectionForTest.name] is not None assert default_config._unique_sections[UniqueSectionForTest.name].attribute == "default_attribute" assert default_config._sections is not None assert len(default_config._sections) == 1 _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) def test_register_default_configuration(): Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop1="prop1")) # Replace the first default section Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop2="prop2")) default_section = Config.sections[SectionForTest.name][Section._DEFAULT_KEY] assert len(default_section.properties) == 1 assert default_section.prop2 == "prop2" assert default_section.prop1 is None
import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import LoadingError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_node_can_not_appear_twice(): config = NamedTemporaryFile( """ [unique_section_name] attribute = "my_attribute" [unique_section_name] attribute = "other_attribute" """ ) with pytest.raises(LoadingError, match="Can not load configuration"): Config.load(config.filename) def test_skip_configuration_outside_nodes(): config = NamedTemporaryFile( """ foo = "bar" """ ) Config.load(config.filename) assert Config.global_config.foo is None
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
from src.taipy.config._config import _Config from src.taipy.config.checker._checker import _Checker class TestDefaultConfigChecker: def test_check_default_config(self): config = _Config._default_config() collector = _Checker._check(config) assert len(collector._errors) == 0 assert len(collector._infos) == 0 assert len(collector._warnings) == 0
from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class TestIssueCollector: def test_add_error(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_error("field", "value", "message", "checker") assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") collector._add_error("field", "value", "message", "checker") assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") def test_add_warning(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_warning("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 1 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") collector._add_warning("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 2 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") def test_add_info(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_info("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 1 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") collector._add_info("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 2 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") def test_all(self): collector = IssueCollector() collector._add_info("foo", "bar", "baz", "qux") assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_warning("foo2", "bar2", "baz2", "qux2") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_warning("foo3", "bar3", "baz3", "qux3") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "foo3", "bar3", "baz3", "qux3") assert collector.all[2] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_info("field", "value", "message", "checker") collector._add_error("field", "value", "message", "checker") assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[2] == Issue(IssueCollector._WARNING_LEVEL, "foo3", "bar3", "baz3", "qux3") assert collector.all[3] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") assert collector.all[4] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker")
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import os from unittest import mock from unittest.mock import MagicMock from src.taipy.config import Config from src.taipy.config.checker._checker import _Checker from src.taipy.config.checker.issue_collector import IssueCollector from tests.config.utils.checker_for_tests import CheckerForTest def test_register_checker(): checker = CheckerForTest checker._check = MagicMock() _Checker.add_checker(checker) Config.check() checker._check.assert_called_once()
import logging from unittest import mock from src.taipy.config._config import _Config from src.taipy.config.checker._checkers._config_checker import _ConfigChecker from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class MyCustomChecker(_ConfigChecker): def _check(self) -> IssueCollector: pass def test__error(): with mock.patch.object(logging.Logger, "error"): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._error("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._error("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.errors[1] == Issue(IssueCollector._ERROR_LEVEL, "foo", "bar", "baz", "MyCustomChecker") def test__warning(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._warning("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.warnings) == 1 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._warning("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.warnings) == 2 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.warnings[1] == Issue(IssueCollector._WARNING_LEVEL, "foo", "bar", "baz", "MyCustomChecker") def test__info(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._info("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.infos) == 1 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._info("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.infos) == 2 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.infos[1] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "MyCustomChecker")
from src.taipy.config import IssueCollector from src.taipy.config.checker._checkers._config_checker import _ConfigChecker class CheckerForTest(_ConfigChecker): def _check(self) -> IssueCollector: return self._collector
from copy import copy from typing import Any, Dict, List, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from .section_for_tests import SectionForTest class SectionOfSectionsListForTest(Section): name = "list_section_name" _MY_ATTRIBUTE_KEY = "attribute" _SECTIONS_LIST_KEY = "sections_list" def __init__(self, id: str, attribute: Any = None, sections_list: List = None, **properties): self._attribute = attribute self._sections_list = sections_list if sections_list else [] super().__init__(id, **properties) def __copy__(self): return SectionOfSectionsListForTest( self.id, self._attribute, copy(self._sections_list), **copy(self._properties) ) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val @property def sections_list(self): return list(self._sections_list) @sections_list.setter # type: ignore @_ConfigBlocker._check() def sections_list(self, val): self._sections_list = val def _clean(self): self._attribute = None self._sections_list = [] self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute if self._sections_list: as_dict[self._SECTIONS_LIST_KEY] = self._sections_list as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) section_configs = config._sections.get(SectionForTest.name, None) or [] # type: ignore sections_list = [] if inputs_as_str := as_dict.pop(cls._SECTIONS_LIST_KEY, None): for section_id in inputs_as_str: if section_id in section_configs: sections_list.append(section_configs[section_id]) else: sections_list.append(section_id) return SectionOfSectionsListForTest(id=id, attribute=attribute, sections_list=sections_list, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._sections_list = as_dict.pop(self._SECTIONS_LIST_KEY, self._sections_list) if self._sections_list is None and default_section: self._sections_list = default_section._sections_list self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, sections_list: List = None, **properties): section = SectionOfSectionsListForTest(id, attribute, sections_list, **properties) Config._register(section) return Config.sections[SectionOfSectionsListForTest.name][id]
import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile("w", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, "r") as fp: return fp.read() def __del__(self): os.unlink(self.filename)
from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker class SectionForTest(Section): name = "section_name" _MY_ATTRIBUTE_KEY = "attribute" def __init__(self, id: str, attribute: Any = None, **properties): self._attribute = attribute super().__init__(id, **properties) def __copy__(self): return SectionForTest(self.id, self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return SectionForTest(id=id, attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, **properties): section = SectionForTest(id, attribute, **properties) Config._register(section) return Config.sections[SectionForTest.name][id]
from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from src.taipy.config.unique_section import UniqueSection class UniqueSectionForTest(UniqueSection): name = "unique_section_name" _MY_ATTRIBUTE_KEY = "attribute" def __init__(self, attribute: str = None, **properties): self._attribute = attribute super().__init__(**properties) def __copy__(self): return UniqueSectionForTest(self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, None) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return UniqueSectionForTest(attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(attribute: str, **properties): section = UniqueSectionForTest(attribute, **properties) Config._register(section) return Config.unique_sections[UniqueSectionForTest.name]
import pytest from src.taipy.config.common._validate_id import _validate_id from src.taipy.config.exceptions.exceptions import InvalidConfigurationId class TestId: def test_validate_id(self): s = _validate_id("foo") assert s == "foo" with pytest.raises(InvalidConfigurationId): _validate_id("1foo") with pytest.raises(InvalidConfigurationId): _validate_id("foo bar") with pytest.raises(InvalidConfigurationId): _validate_id("foo/foo$") with pytest.raises(InvalidConfigurationId): _validate_id("") with pytest.raises(InvalidConfigurationId): _validate_id(" ") with pytest.raises(InvalidConfigurationId): _validate_id("class") with pytest.raises(InvalidConfigurationId): _validate_id("def") with pytest.raises(InvalidConfigurationId): _validate_id("with") with pytest.raises(InvalidConfigurationId): _validate_id("CYCLE") with pytest.raises(InvalidConfigurationId): _validate_id("SCENARIO") with pytest.raises(InvalidConfigurationId): _validate_id("SEQUENCE") with pytest.raises(InvalidConfigurationId): _validate_id("TASK") with pytest.raises(InvalidConfigurationId): _validate_id("DATANODE")
import pytest from src.taipy.config.common.scope import Scope def test_scope(): # Test __ge__ method assert Scope.GLOBAL >= Scope.GLOBAL assert Scope.GLOBAL >= Scope.CYCLE assert Scope.CYCLE >= Scope.CYCLE assert Scope.GLOBAL >= Scope.SCENARIO assert Scope.CYCLE >= Scope.SCENARIO assert Scope.SCENARIO >= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO >= "testing string" # Test __gt__ method assert Scope.GLOBAL > Scope.CYCLE assert Scope.GLOBAL > Scope.SCENARIO assert Scope.CYCLE > Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO > "testing string" # Test __le__ method assert Scope.GLOBAL <= Scope.GLOBAL assert Scope.CYCLE <= Scope.GLOBAL assert Scope.CYCLE <= Scope.CYCLE assert Scope.SCENARIO <= Scope.GLOBAL assert Scope.SCENARIO <= Scope.CYCLE assert Scope.SCENARIO <= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO <= "testing string" # Test __lt__ method assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.CYCLE with pytest.raises(TypeError): assert Scope.SCENARIO < "testing string"
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import argparse import re import sys import pytest from src.taipy._cli._base_cli import _CLI if sys.version_info >= (3, 10): argparse_options_str = "options:" else: argparse_options_str = "optional arguments:" def preprocess_stdout(stdout): stdout = stdout.replace("\n", " ").replace("\t", " ") return re.sub(" +", " ", stdout) def remove_subparser(name: str): """Remove a subparser from argparse.""" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope="function") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler="resolve") _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield def test_subparser(capfd): subcommand_1 = _CLI._add_subparser("subcommand_1", help="subcommand_1 help") subcommand_1.add_argument("--foo", "-f", help="foo help") subcommand_1.add_argument("--bar", "-b", help="bar help") subcommand_2 = _CLI._add_subparser("subcommand_2", help="subcommand_2 help") subcommand_2.add_argument("--doo", "-d", help="doo help") subcommand_2.add_argument("--baz", "-z", help="baz help") expected_subcommand_1_help_message = f"""subcommand_1 [-h] [--foo FOO] [--bar BAR] {argparse_options_str} -h, --help show this help message and exit --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help """ subcommand_1.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_1_help_message) in preprocess_stdout(stdout) expected_subcommand_2_help_message = f"""subcommand_2 [-h] [--doo DOO] [--baz BAZ] {argparse_options_str} -h, --help show this help message and exit --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help """ subcommand_2.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_2_help_message) in preprocess_stdout(stdout) def test_duplicate_subcommand(): subcommand_1 = _CLI._add_subparser("subcommand_1", help="subcommand_1 help") subcommand_1.add_argument("--foo", "-f", help="foo help") subcommand_2 = _CLI._add_subparser("subcommand_1", help="subcommand_2 help") subcommand_2.add_argument("--bar", "-b", help="bar help") # The title of subcommand_2 is duplicated with subcommand_1, and therefore # there will be no new subcommand created assert len(_CLI._sub_taipyparsers) == 1 def test_groupparser(capfd): group_1 = _CLI._add_groupparser("group_1", "group_1 desc") group_1.add_argument("--foo", "-f", help="foo help") group_1.add_argument("--bar", "-b", help="bar help") group_2 = _CLI._add_groupparser("group_2", "group_2 desc") group_2.add_argument("--doo", "-d", help="doo help") group_2.add_argument("--baz", "-z", help="baz help") expected_help_message = """ group_1: group_1 desc --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help group_2: group_2 desc --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help """.strip() _CLI._parser.print_help() stdout, _ = capfd.readouterr() assert expected_help_message in stdout def test_duplicate_group(): group_1 = _CLI._add_groupparser("group_1", "group_1 desc") group_1.add_argument("--foo", "-f", help="foo help") group_2 = _CLI._add_groupparser("group_1", "group_2 desc") group_2.add_argument("--bar", "-b", help="bar help") # The title of group_2 is duplicated with group_1, and therefore # there will be no new group created assert len(_CLI._arg_groups) == 1
import pytest from src.taipy.config.common._classproperty import _Classproperty class TestClassProperty: def test_class_property(self): class TestClass: @_Classproperty def test_property(cls): return "test_property" assert TestClass.test_property == "test_property" assert TestClass().test_property == "test_property" with pytest.raises(TypeError): TestClass.test_property()
import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked def test_global_config_with_env_variable_value(): with mock.patch.dict(os.environ, {"FOO": "bar", "BAZ": "qux"}): Config.configure_global_app(foo="ENV[FOO]", bar="ENV[BAZ]") assert Config.global_config.foo == "bar" assert Config.global_config.bar == "qux" def test_default_global_app_config(): global_config = Config.global_config assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_block_update_global_app_config(): Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_global_app(foo="bar") with pytest.raises(ConfigurationUpdateBlocked): Config.global_config.properties = {"foo": "bar"} # Test if the global_config stay as default assert Config.global_config.foo is None assert len(Config.global_config.properties) == 0
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
from importlib.util import find_spec if find_spec("taipy"): if find_spec("taipy.config"): from taipy.config._init import * # type: ignore if find_spec("taipy.gui"): from taipy.gui._init import * # type: ignore if find_spec("taipy.core"): from taipy.core._init import * # type: ignore if find_spec("taipy.rest"): from taipy.rest._init import * # type: ignore if find_spec("taipy.gui_core"): from taipy.gui_core._init import * # type: ignore if find_spec("taipy.enterprise"): from taipy.enterprise._init import * # type: ignore if find_spec("taipy._run"): from taipy._run import _run as run # type: ignore
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
import logging.config import os import sys class _TaipyLogger: _ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH = "TAIPY_LOGGER_CONFIG_PATH" __logger = None @classmethod def _get_logger(cls): cls._ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH = "TAIPY_LOGGER_CONFIG_PATH" if cls.__logger: return cls.__logger if config_filename := os.environ.get(cls._ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH): logging.config.fileConfig(config_filename) cls.__logger = logging.getLogger("Taipy") else: cls.__logger = logging.getLogger("Taipy") cls.__logger.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) cls.__logger.addHandler(ch) return cls.__logger
import json import os def _get_version(): with open(f"{os.path.dirname(os.path.abspath(__file__))}{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" return version_string
from .config import Config from .common.frequency import Frequency from .common.scope import Scope