text
stringlengths 0
5.92k
|
|---|
import taipy as tp from taipy import Config, Scope def example_algorithm(entry: str): # does nothing! return entry input_cfg = Config.configure_data_node("input", path="input.pkl", scope=Scope.GLOBAL, default_data="A string") output_cfg = Config.configure_data_node("output", path="output.pkl", scope=Scope.GLOBAL) task_cfg = Config.configure_task("example_algorithm", example_algorithm, input_cfg, output_cfg, skippable=True) scenario_cfg = Config.configure_scenario("my_scenario", [task_cfg]) if __name__ == "__main__": tp.Core().run() tp.create_scenario(scenario_cfg) print(f"Number of scenarios: {len(tp.get_scenarios())}")
|
import taipy as tp from taipy import Config def example_algorithm(entry: str): # does nothing! return entry input_cfg = Config.configure_data_node("input", default_data="a_string") output_cfg = Config.configure_data_node("output", description="What a description") task_cfg = Config.configure_task("example_algorithm", example_algorithm, input_cfg, output_cfg) scenario_cfg = Config.configure_scenario("my_scenario", [task_cfg]) if __name__ == "__main__": tp.Core().run() tp.create_scenario(scenario_cfg) print(f"Number of scenarios: {len(tp.get_scenarios())}")
|
import taipy as tp from taipy import Config def example_algorithm(entry: str): # does nothing! return entry input_cfg = Config.configure_data_node("input", default_data="a_string") output_cfg = Config.configure_data_node("output") task_cfg = Config.configure_task("example_algorithm", example_algorithm, input_cfg, output_cfg) scenario_cfg = Config.configure_scenario("my_scenario", [task_cfg]) if __name__ == "__main__": tp.Core().run() tp.create_scenario(scenario_cfg) print(f"Number of scenarios: {len(tp.get_scenarios())}")
|
from datetime import datetime import pandas as pd from taipy import Config, Frequency, Scope def write_orders_plan(data: pd.DataFrame): insert_data = data[["date", "product_id", "number_of_products"]].to_dict("records") return ["DELETE FROM orders", ("INSERT INTO orders VALUES (:date, :product_id, :number_of_products)", insert_data)] def train(sales_history: pd.DataFrame): print("Running training") return "TRAINED_MODEL" def predict(model, current_month): print("Running predicting") return "SALES_PREDICTIONS" def plan(sales_predictions, capacity): print("Running planning") return "PRODUCTION_ORDERS" def compare(previous_month_prediction, current_month_prediction): print("Comparing previous month and current month sale predictions") return "COMPARISON_RESULT" # Configure all six data nodes sales_history_cfg = Config.configure_csv_data_node( id="sales_history", scope=Scope.GLOBAL, default_path="path/sales.csv" ) trained_model_cfg = Config.configure_data_node(id="trained_model", scope=Scope.CYCLE) current_month_cfg = Config.configure_data_node(id="current_month", scope=Scope.CYCLE, default_data=datetime(2020, 1, 1)) sales_predictions_cfg = Config.configure_data_node(id="sales_predictions", scope=Scope.CYCLE) capacity_cfg = Config.configure_data_node(id="capacity") orders_cfg = Config.configure_sql_data_node( id="orders", db_username="admin", db_password="ENV[PWD]", db_name="production_planning", db_engine="mssql", read_query="SELECT orders.ID, orders.date, products.price, orders.number_of_products FROM orders INNER JOIN products ON orders.product_id=products.ID", write_query_builder=write_orders_plan, db_driver="ODBC Driver 17 for SQL Server", ) # Configure the three tasks training_cfg = Config.configure_task("training", train, sales_history_cfg, [trained_model_cfg]) predicting_cfg = Config.configure_task( id="predicting", function=predict, input=[trained_model_cfg, current_month_cfg], output=sales_predictions_cfg ) planning_cfg = Config.configure_task( id="planning", function=plan, input=[sales_predictions_cfg, capacity_cfg], output=[orders_cfg] ) # Configure the scenario monthly_scenario_cfg = Config.configure_scenario( id="scenario_configuration", task_configs=[training_cfg, predicting_cfg, planning_cfg], frequency=Frequency.MONTHLY, comparators={sales_predictions_cfg.id: compare}, sequences={"sales": [training_cfg, predicting_cfg], "production": [planning_cfg]}, )
|
import taipy as tp if __name__ == "__main__": gui = tp.Gui(page="# Getting started with *Taipy*") rest = tp.Rest() tp.run(gui, rest, title="Taipy application")
|
from taipy import Config import taipy as tp import pandas as pd import datetime as dt data = pd.read_csv("https://raw.githubusercontent.com/Avaiga/taipy-getting-started-core/develop/src/daily-min-temperatures.csv") # Normal function used by Taipy def predict(historical_temperature: pd.DataFrame, date_to_forecast: str) -> float: print(f"Running baseline...") historical_temperature['Date'] = pd.to_datetime(historical_temperature['Date']) historical_same_day = historical_temperature.loc[ (historical_temperature['Date'].dt.day == date_to_forecast.day) & (historical_temperature['Date'].dt.month == date_to_forecast.month) ] return historical_same_day['Temp'].mean() Config.load('config.toml') if __name__ == '__main__': scenario_cfg = Config.scenarios['my_scenario'] # Run of the Core tp.Core().run() # Creation of the scenario and execution scenario = tp.create_scenario(scenario_cfg) scenario.historical_temperature.write(data) scenario.date_to_forecast.write(dt.datetime.now()) tp.submit(scenario) print("Value at the end of task", scenario.predictions.read()) def save(state): state.scenario.historical_temperature.write(data) state.scenario.date_to_forecast.write(state.date) tp.gui.notify(state, "s", "Saved! Ready to submit") date = None scenario_md = """ <|{scenario}|scenario_selector|> <|{date}|date|on_change=save|active={scenario}|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|Refresh|button|on_action={lambda s: s.assign("scenario", scenario)}|> <|{scenario.predictions.read() if scenario else ''}|> """ tp.Gui(scenario_md).run()
|
from taipy import Config import taipy as tp import pandas as pd import datetime as dt data = pd.read_csv("https://raw.githubusercontent.com/Avaiga/taipy-getting-started-core/develop/src/daily-min-temperatures.csv") # Normal function used by Taipy def predict(historical_temperature: pd.DataFrame, date_to_forecast: str) -> float: print(f"Running baseline...") historical_temperature['Date'] = pd.to_datetime(historical_temperature['Date']) historical_same_day = historical_temperature.loc[ (historical_temperature['Date'].dt.day == date_to_forecast.day) & (historical_temperature['Date'].dt.month == date_to_forecast.month) ] return historical_same_day['Temp'].mean() # Configuration of Data Nodes historical_temperature_cfg = Config.configure_data_node("historical_temperature") date_to_forecast_cfg = Config.configure_data_node("date_to_forecast") predictions_cfg = Config.configure_data_node("predictions") # Configuration of tasks predictions_cfg = Config.configure_task("predict", predict, [historical_temperature_cfg, date_to_forecast_cfg], predictions_cfg) # Configuration of scenario scenario_cfg = Config.configure_scenario(id="my_scenario", task_configs=[predictions_cfg]) Config.export('config.toml') if __name__ == '__main__': # Run of the Core tp.Core().run() # Creation of the scenario and execution scenario = tp.create_scenario(scenario_cfg) scenario.historical_temperature.write(data) scenario.date_to_forecast.write(dt.datetime.now()) tp.submit(scenario) print("Value at the end of task", scenario.predictions.read()) def save(state): state.scenario.historical_temperature.write(data) state.scenario.date_to_forecast.write(state.date) tp.gui.notify(state, "s", "Saved! Ready to submit") date = None scenario_md = """ <|{scenario}|scenario_selector|> Put a Date <|{date}|date|on_change=save|active={scenario}|> Run the scenario <|{scenario}|scenario|> <|{scenario}|scenario_dag|> View all the information on your prediction here <|{scenario.predictions if scenario else None}|data_node|> """ tp.Gui(scenario_md).run()
|
from taipy.config import Config, Frequency, Scope import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df Config.load('config.toml') scenario_cfg = Config.scenarios["my_scenario"] if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_1.month.write(10) print("Month Data Node of Scenario 1:", scenario_1.month.read()) print("Month Data Node of Scenario 2:", scenario_2.month.read()) scenario_1.submit() before_set_1 = scenario_1.is_primary before_set_2 = scenario_2.is_primary tp.set_primary(scenario_2) print('Scenario 1: Primary?', before_set_1, scenario_1.is_primary) print('Scenario 2: Primary?', before_set_2, scenario_2.is_primary) scenario = None data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
|
from taipy.config import Config, Frequency, Scope import taipy as tp import datetime as dt import pandas as pd def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df historical_data_cfg = Config.configure_csv_data_node(id="historical_data", default_path="time_series.csv", scope=Scope.GLOBAL) month_cfg = Config.configure_data_node(id="month", scope=Scope.CYCLE) month_values_cfg = Config.configure_data_node(id="month_data", scope=Scope.CYCLE) task_filter_cfg = Config.configure_task(id="filter_by_month", function=filter_by_month, input=[historical_data_cfg, month_cfg], output=month_values_cfg) scenario_cfg = Config.configure_scenario(id="my_scenario", task_configs=[task_filter_cfg], frequency=Frequency.MONTHLY) Config.export('step_04/onfig.toml') if __name__ == '__main__': tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_2 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,5), name="Scenario 2022/10/5") scenario_1.month.write(10) print("Month Data Node of Scenario 1:", scenario_1.month.read()) print("Month Data Node of Scenario 2:", scenario_2.month.read()) scenario_1.submit() before_set_1 = scenario_1.is_primary before_set_2 = scenario_2.is_primary tp.set_primary(scenario_2) print('Scenario 1: Primary?', before_set_1, scenario_1.is_primary) print('Scenario 2: Primary?', before_set_2, scenario_2.is_primary) scenario = None data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
|
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 10 seconds in add function") time.sleep(10) return nb + 10 Config.load('config.toml') Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) if __name__=="__main__": scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.submit() scenario_2.submit() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
|
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): print("Wait 10 seconds in add function") time.sleep(10) return nb + 10 Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate", default_data=21) output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) # Configuration of the scenario scenario_cfg = Config.configure_scenario(id="my_scenario", task_configs=[first_task_cfg, second_task_cfg]) Config.export("config.toml") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.submit() scenario_2.submit() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
|
from taipy.core.config import Config import taipy as tp # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): return nb + 10 # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate") output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) def compare_function(*data_node_results): # example of function compare_result = {} current_res_i = 0 for current_res in data_node_results: compare_result[current_res_i] = {} next_res_i = 0 for next_res in data_node_results: print(f"comparing result {current_res_i} with result {next_res_i}") compare_result[current_res_i][next_res_i] = next_res - current_res next_res_i += 1 current_res_i += 1 return compare_result scenario_cfg = Config.configure_scenario(id="multiply_scenario", name="my_scenario", task_configs=[first_task_cfg, second_task_cfg], comparators={output_cfg.id: compare_function}) Config.export("config_08.toml") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_2 = tp.create_scenario(scenario_cfg) scenario_1.input.write(10) scenario_2.input.write(8) scenario_1.submit() scenario_2.submit() print(tp.compare_scenarios(scenario_1, scenario_2))
|
from taipy.gui import Gui, notify text = "Original text" # Definition of the page page = """ # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> <|Run local|button|on_action=on_button_action|> """ def on_button_action(state): notify(state, 'info', f'The text is: {state.text}') state.text = "Button Pressed" def on_change(state, var_name, var_value): if var_name == "text" and var_value == "Reset": state.text = "" return Gui(page).run(debug=True)
|
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ # Getting started with Taipy GUI <|layout|columns=1 1| <| My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> |> <|Table|expandable| <|{dataframe}|table|width=100%|number_format=%.2f|> |> |> <|layout|columns=1 1 1| ## Positive <|{np.mean(dataframe['Score Pos'])}|text|format=%.2f|raw|> ## Neutral <|{np.mean(dataframe['Score Neu'])}|text|format=%.2f|raw|> ## Negative <|{np.mean(dataframe['Score Neg'])}|text|format=%.2f|raw|> |> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) dataframe2 = dataframe.copy() def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text[:50], "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" path = "" treatment = 0 page_file = """ <|{path}|file_selector|extensions=.txt|label=Upload .txt file|on_action=analyze_file|> <|{f'Downloading {treatment}%...'}|> <br/> <|Table|expandable| <|{dataframe2}|table|width=100%|number_format=%.2f|> |> <br/> <|{dataframe2}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|height=600px|> """ def analyze_file(state): state.dataframe2 = dataframe2 state.treatment = 0 with open(state.path,"r", encoding='utf-8') as f: data = f.read() # split lines and eliminates duplicates file_list = list(dict.fromkeys(data.replace('\n', ' ').split(".")[:-1])) for i in range(len(file_list)): text = file_list[i] state.treatment = int((i+1)*100/len(file_list)) temp = state.dataframe2.copy() scores = analyze_text(text) temp.loc[len(temp)] = scores state.dataframe2 = temp state.path = None pages = {"/":"<|toggle|theme|>\n<center>\n<|navbar|>\n</center>", "line":page, "text":page_file} Gui(pages=pages).run(debug=True)
|
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ # Getting started with Taipy GUI <|layout|columns=1 1| <| My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> |> <|Table|expandable| <|{dataframe}|table|width=100%|number_format=%.2f|> |> |> <|layout|columns=1 1 1| ## Positive <|{float(np.mean(dataframe['Score Pos']))}|text|format=%.2f|raw|>% ## Neutral <|{float(np.mean(dataframe['Score Neu']))}|text|format=%.2f|raw|>% ## Negative <|{float(np.mean(dataframe['Score Neg']))}|text|format=%.2f|raw|>% |> <br/> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text, "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" Gui(page).run(debug=True)
|
from taipy.gui import Gui text = "Original text" page = """ # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> """ Gui(page).run(debug=True)
|
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) # Torch is, for now, only available for the Python version between 3.8 and 3.10. # If you cannot install these packages, just return a dictionary of random numbers for the `analyze_text(text).` def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text, "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" page = """ <|toggle|theme|> # Getting started with Taipy GUI My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> ## Positive <|{float(np.mean(dataframe['Score Pos']))}|text|format=%.2f|>% ## Neutral <|{float(np.mean(dataframe['Score Neu']))}|text|format=%.2f|>% ## Negative <|{float(np.mean(dataframe['Score Neg']))}|text|format=%.2f|>% <|{dataframe}|table|number_format=%.2f|> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ Gui(page).run(debug=True)
|
from taipy import Gui Gui(page="# Getting started with *Taipy*").run(debug=True)
|
import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ <|toggle|theme|> # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> <|Analyze|button|on_action=local_callback|> <|{dataframe}|table|number_format=%.2f|> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ dataframe = pd.DataFrame({"Text":['Test', 'Other', 'Love'], "Score Pos":[1, 1, 4], "Score Neu":[2, 3, 1], "Score Neg":[1, 2, 0], "Overall":[0, -1, 4]}) def local_callback(state): notify(state, 'info', f'The text is: {state.text}') temp = state.dataframe.copy() temp.loc[len(temp)] = {"Text":state.text, "Score Pos":0, "Score Neu":0, "Score Neg":0, "Overall":0} state.dataframe = temp state.text = "" Gui(page).run(debug=True)
|
from taipy.config import Config from taipy.core import Status import taipy as tp import time # Normal function used by Taipy def double(nb): return nb * 2 def add(nb): return nb + 10 # Configuration of Data Nodes input_cfg = Config.configure_data_node("input", default_data=21) intermediate_cfg = Config.configure_data_node("intermediate") output_cfg = Config.configure_data_node("output") # Configuration of tasks first_task_cfg = Config.configure_task("double", double, input_cfg, intermediate_cfg) second_task_cfg = Config.configure_task("add", add, intermediate_cfg, output_cfg) def callback_scenario_state(scenario, job): """All the scenarios are subscribed to the callback_scenario_state function. It means whenever a job is done, it is called. Depending on the job and the status, it will update the message stored in a json that is then displayed on the GUI. Args: scenario (Scenario): the scenario of the job changed job (_type_): the job that has its status changed """ print(f'{job.id} to {job.status}') if job.status == Status.COMPLETED: for data_node in job.task.output.values(): print("Data node value:", data_node.read()) # Configuration of scenario scenario_cfg = Config.configure_scenario(id="my_scenario", task_configs=[first_task_cfg, second_task_cfg], name="my_scenario") if __name__=="__main__": tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg) scenario_1.subscribe(callback_scenario_state) scenario_1.submit(wait=True) from taipy.gui import Gui, notify def on_submission_status_change(state=None, submittable=None, details=None): submission_status = details.get('submission_status') if submission_status == 'COMPLETED': print(f"{submittable.name} has completed.") notify(state, 'success', 'Completed!') # Add additional actions here, like updating the GUI or logging the completion. elif submission_status == 'FAILED': print(f"{submittable.name} has failed.") notify(state, 'error', 'Completed!') # Handle failure, like sending notifications or logging the error. # Add more conditions for other statuses as needed. if __name__=="__main__": scenario_md = """ <|{scenario_1}|scenario|on_submission_change=on_submission_status_change|> """ Gui(scenario_md).run()
|
""" A multi-page Taipy application, which includes 3 pages: - A rootpage which is shared by other pages. - Two pages named page_1 and page_2. Please refer to ../../manuals/gui/pages for more details. """ from pages import data_viz, scenario_page, performance from pages.root import * from configuration.config import * from taipy.gui import Gui import taipy as tp def on_change(state, var_name: str, var_value): state['scenario'].on_change(state, var_name, var_value) pages = { "/": root_page, "data_viz": data_viz, "scenario": scenario_page, "performance": performance } if __name__ == "__main__": tp.Core().run() gui = Gui(pages=pages) gui.run(title="Taipy Application")
|
import datetime as dt import pandas as pd from taipy import Config, Scope, Frequency from algorithms.algorithms import * path_to_csv = "data/dataset.csv" # Datanodes (3.1) ## Input Data Nodes initial_dataset_cfg = Config.configure_data_node(id="initial_dataset", storage_type="csv", path=path_to_csv, scope=Scope.GLOBAL) # We assume the current day is the 26th of July 2021. # This day can be changed to simulate multiple executions of scenarios on different days day_cfg = Config.configure_data_node(id="day", default_data=dt.datetime(2021, 7, 26)) n_predictions_cfg = Config.configure_data_node(id="n_predictions", default_data=40) max_capacity_cfg = Config.configure_data_node(id="max_capacity", default_data=200) ## Remaining Data Nodes cleaned_dataset_cfg = Config.configure_data_node(id="cleaned_dataset", scope=Scope.GLOBAL) predictions_baseline_cfg = Config.configure_data_node(id="predictions_baseline") predictions_ml_cfg = Config.configure_data_node(id="predictions_ml") full_predictions_cfg = Config.configure_data_node(id="full_predictions") metrics_baseline_cfg = Config.configure_data_node(id="metrics_baseline") metrics_ml_cfg = Config.configure_data_node(id="metrics_ml") # Functions (3.2) # Tasks (3.3) clean_data_task_cfg = Config.configure_task(id="task_clean_data", function=clean_data, input=initial_dataset_cfg, output=cleaned_dataset_cfg, skippable=True) predict_baseline_task_cfg = Config.configure_task(id="predict_baseline", function=predict_baseline, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_baseline_cfg) predict_ml_task_cfg = Config.configure_task(id="task_predict_ml", function=predict_ml, input=[cleaned_dataset_cfg, n_predictions_cfg, day_cfg, max_capacity_cfg], output=predictions_ml_cfg) metrics_baseline_task_cfg = Config.configure_task(id="task_metrics_baseline", function=compute_metrics, input=[cleaned_dataset_cfg, predictions_baseline_cfg], output=metrics_baseline_cfg) metrics_ml_task_cfg = Config.configure_task(id="task_metrics_ml", function=compute_metrics, input=[cleaned_dataset_cfg, predictions_ml_cfg], output=metrics_ml_cfg) full_predictions_task_cfg = Config.configure_task(id="task_full_predictions", function=create_predictions_dataset, input=[predictions_baseline_cfg, predictions_ml_cfg, day_cfg, n_predictions_cfg, cleaned_dataset_cfg], output=full_predictions_cfg) # Configure our scenario which is our business problem. scenario_cfg = Config.configure_scenario(id="scenario", task_configs=[clean_data_task_cfg, predict_baseline_task_cfg, predict_ml_task_cfg, metrics_baseline_task_cfg, metrics_ml_task_cfg, full_predictions_task_cfg], frequency=Frequency.WEEKLY)
|
# To make things clear, we've opted for an AutoRegressive model instead of a pure ML model like: # Random Forest, Linear Regression, LSTM, etc from statsmodels.tsa.ar_model import AutoReg import pandas as pd from sklearn.metrics import mean_squared_error, mean_absolute_error import numpy as np import datetime as dt def clean_data(initial_dataset: pd.DataFrame): print(" Cleaning data") # Convert the date column to datetime initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date']) cleaned_dataset = initial_dataset.copy() return cleaned_dataset def predict_baseline(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting baseline") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset['Date'] < day] predictions = train_dataset['Value'][-n_predictions:].reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions # This is the function that will be used by the task def predict_ml(cleaned_dataset: pd.DataFrame, n_predictions: int, day: dt.datetime, max_capacity: int): print(" Predicting with ML") # Select the train data train_dataset = cleaned_dataset[cleaned_dataset["Date"] < day] # Fit the AutoRegressive model model = AutoReg(train_dataset["Value"], lags=7).fit() # Get the n_predictions forecasts predictions = model.forecast(n_predictions).reset_index(drop=True) predictions = predictions.apply(lambda x: min(x, max_capacity)) return predictions def compute_metrics(historical_data, predicted_data): historical_to_compare = historical_data[-len(predicted_data):]['Value'] rmse = mean_squared_error(historical_to_compare, predicted_data) mae = mean_absolute_error(historical_to_compare, predicted_data) return rmse, mae def create_predictions_dataset(predictions_baseline, predictions_ml, day, n_predictions, cleaned_data): print("Creating predictions dataset...") # Create the historical dataset that will be displayed new_length = len(cleaned_data[cleaned_data["Date"] < day]) + n_predictions historical_data = cleaned_data[:new_length].reset_index(drop=True) create_series = lambda data, name: pd.Series([np.NaN] * (len(historical_data)), name=name).fillna({i: val for i, val in enumerate(data, len(historical_data)-n_predictions)}) predictions_dataset = pd.concat([ historical_data["Date"], historical_data["Value"].rename("Historical values"), create_series(predictions_ml, "Predicted values ML"), create_series(predictions_baseline, "Predicted values Baseline") ], axis=1) return predictions_dataset
|
from .data_viz import data_viz from .scenario import scenario_page from .performance import performance from .root import root_page
|
""" The rootpage of the application. Page content is imported from the root.md file. Please refer to ../../manuals/gui/pages for more details. """ from taipy.gui import Markdown root_page = Markdown("pages/root.md")
|
from .scenario import scenario_page
|
""" The second page of the application. Page content is imported from the page_2.md file. Please refer to ../../manuals/gui/pages for more details. """ from taipy.gui import Markdown, notify import datetime as dt import pandas as pd scenario = None data_node = None day = dt.datetime(2021, 7, 26) n_predictions = 40 max_capacity = 200 predictions_dataset = {"Date":[dt.datetime(2021, 7, 26)], "Predicted values ML":[0], "Predicted values Baseline":[0], "Historical values":[0]} def submission_change(state, submittable, details: dict): if details['submission_status'] == 'COMPLETED': notify(state, "success", 'Scenario completed!') state['scenario'].on_change(state, 'scenario', state.scenario) else: notify(state, "error", 'Something went wrong!') def save(state): print("Saving scenario...") # Get the currently selected scenario # Conversion to the right format state_day = dt.datetime(state.day.year, state.day.month, state.day.day) # Change the default parameters by writing in the Data Nodes state.scenario.day.write(state_day) state.scenario.n_predictions.write(int(state.n_predictions)) state.scenario.max_capacity.write(int(state.max_capacity)) notify(state, "success", "Saved!") def on_change(state, var_name, var_value): if var_name == "scenario" and var_value: state.day = state.scenario.day.read() state.n_predictions = state.scenario.n_predictions.read() state.max_capacity = state.scenario.max_capacity.read() if state.scenario.full_predictions.is_ready_for_reading: state.predictions_dataset = state.scenario.full_predictions.read()[-200:] else: state.predictions_dataset = predictions_dataset scenario_page = Markdown("pages/scenario/scenario.md")
|
from .data_viz import data_viz
|
""" The first page of the application. Page content is imported from the page_1.md file. Please refer to ../../manuals/gui/pages for more details. """ from taipy.gui import Markdown import pandas as pd def get_data(path_to_csv: str): # pandas.read_csv() returns a pd.DataFrame dataset = pd.read_csv(path_to_csv) dataset["Date"] = pd.to_datetime(dataset["Date"]) return dataset # Read the dataframe path_to_csv = "data/dataset.csv" dataset = get_data(path_to_csv) # Initial value n_week = 10 # Select the week based on the slider value dataset_week = dataset[dataset["Date"].dt.isocalendar().week == n_week] def on_slider(state): state.dataset_week = dataset[dataset["Date"].dt.isocalendar().week == state.n_week] data_viz = Markdown("pages/data_viz/data_viz.md")
|
from .performance import performance
|
from taipy.gui import Markdown import pandas as pd import taipy as tp # Initial dataset for comparison comparison_scenario = pd.DataFrame(columns=["Scenario Name", "RMSE baseline", "MAE baseline", "RMSE ML", "MAE ML"]) # Selector for metrics metric_selector = ["RMSE", "MAE"] selected_metric = metric_selector[0] def compare(state): print("Comparing...") # Initialize lists for comparison scenario_data = [] # Go through all the primary scenarios all_scenarios = sorted(tp.get_primary_scenarios(), key=lambda x: x.creation_date.timestamp()) for scenario in all_scenarios: rmse_baseline, mae_baseline = scenario.metrics_baseline.read() rmse_ml, mae_ml = scenario.metrics_ml.read() # Store scenario data in a dictionary scenario_data.append({ "Scenario Name": scenario.name, "RMSE baseline": rmse_baseline, "MAE baseline": mae_baseline, "RMSE ML": rmse_ml, "MAE ML": mae_ml }) # Create a DataFrame from the scenario_data list state.comparison_scenario = pd.DataFrame(scenario_data) performance = Markdown("pages/performance/performance.md")
|
import taipy as tp from taipy import Config, Core, Gui ################################################################ # Configure application # ################################################################ def build_message(name): return f"Hello {name}!" # A first data node configuration to model an input name. input_name_data_node_cfg = Config.configure_data_node(id="input_name") # A second data node configuration to model the message to display. message_data_node_cfg = Config.configure_data_node(id="message") # A task configuration to model the build_message function. build_msg_task_cfg = Config.configure_task("build_msg", build_message, input_name_data_node_cfg, message_data_node_cfg) # The scenario configuration represents the whole execution graph. scenario_cfg = Config.configure_scenario("scenario", task_configs=[build_msg_task_cfg]) ################################################################ # Design graphical interface # ################################################################ input_name = "Taipy" message = None def submit_scenario(state): state.scenario.input_name.write(state.input_name) state.scenario.submit() state.message = scenario.message.read() page = """ Name: <|{input_name}|input|> <|submit|button|on_action=submit_scenario|> Message: <|{message}|text|> """ if __name__ == "__main__": ################################################################ # Instantiate and run Core service # ################################################################ Core().run() ################################################################ # Manage scenarios and data nodes # ################################################################ scenario = tp.create_scenario(scenario_cfg) ################################################################ # Instantiate and run Gui service # ################################################################ Gui(page).run()
|
import taipy as tp from taipy import Config, Core ################################################################ # Configure application # ################################################################ def build_message(name): return f"Hello {name}!" # A first data node configuration to model an input name. input_name_data_node_cfg = Config.configure_data_node(id="input_name") # A second data node configuration to model the message to display. message_data_node_cfg = Config.configure_data_node(id="message") # A task configuration to model the build_message function. build_msg_task_cfg = Config.configure_task("build_msg", build_message, input_name_data_node_cfg, message_data_node_cfg) # The scenario configuration represents the whole execution graph. scenario_cfg = Config.configure_scenario("scenario", task_configs=[build_msg_task_cfg]) if __name__ == "__main__": ################################################################ # Instantiate and run Core service # ################################################################ Core().run() ################################################################ # Manage scenarios and data nodes # ################################################################ hello_scenario = tp.create_scenario(scenario_cfg) hello_scenario.input_name.write("Taipy") hello_scenario.submit() print(hello_scenario.message.read())
|
import taipy as tp from taipy import Gui from taipy import Config from taipy import Core # Configure application def build_message(name: str)-> str: return f"Received message : {name}" # A first data node configuration to model an input name. input_name_data_node_cfg = Config.configure_data_node(id="input_name") # A second data node configuration to model the message to display message_data_node_cfg = Config.configure_data_node(id="message") # Configure task between input and output data nodes build_msg_task_cfg = Config.configure_task("build_msg", build_message, input_name_data_node_cfg,message_data_node_cfg) # Scenario Configuration to represent execution graph scenario_cfg = Config.configure_scenario("scenario", task_configs=[build_msg_task_cfg]) # Function to handle state from GUI input_name = "Taipy" message = None def submit_scenario(state): state.scenario.input_name.write(state.input_name) state.scenario.submit() state.message = scenario.message.read() # Markdown representation of the user interface page = """ Name: <|{input_name}|input|> <|submit|button|on_action=submit_scenario|> Message: <|{message}|text|> """ if __name__ == "__main__": # Instantiate and run Core service Core().run() # Instantiate the new scenario name hello_scenario from the scenario configuration built before hello_scenario = tp.create_scenario(scenario_cfg) # Run GUI Gui(page).run()
|
import taipy as tp from taipy import Config from taipy import Core def build_message(name: str)-> str: return f"Received message : {name}" # A first data node configuration to model an input name. input_name_data_node_cfg = Config.configure_data_node(id="input_name") # A second data node configuration to model the message to display message_data_node_cfg = Config.configure_data_node(id="message") # Configure task between input and output data nodes build_msg_task_cfg = Config.configure_task("build_msg", build_message, input_name_data_node_cfg,message_data_node_cfg) # Scenario Configuration to represent execution graph scenario_cfg = Config.configure_scenario("scenario", task_configs=[build_msg_task_cfg]) if __name__ == "__main__": # Instantiate and run Core service Core().run() # Instantiate the new scenario name hello_scenario from the scenario configuration built before hello_scenario = tp.create_scenario(scenario_cfg) # sets the input data node input_name of hello_scenario with the string value hello_scenario.input_name.write("In God We Trust !") # submits the hello_scenario for execution, which triggers the creation and execution of a job hello_scenario.submit() # reads and prints the output data node message written by the execution of the scenario hello_scenario print(hello_scenario.message.read())
|
print("----------------------------------------------") print("---------------------TEST---------------------") print("----------------------------------------------")
|
import os import shutil import pytest @pytest.fixture(scope="function") def tmp_sqlite(tmpdir_factory): fn = tmpdir_factory.mktemp("db") return os.path.join(fn.strpath, "test.db") @pytest.fixture(autouse=True) def cleanup_data(): from time import sleep from sqlalchemy.orm import close_all_sessions close_all_sessions() sleep(0.1) if os.path.exists(".data"): shutil.rmtree(".data", ignore_errors=True) if os.path.exists("test.db"): os.remove("test.db") init_managers() init_config() init_orchestrator() init_managers() init_config() def init_config(): from taipy import Config from taipy.config import IssueCollector from taipy.config._config import _Config from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.checker._checker import _Checker from taipy.core.config import ( CoreSection, DataNodeConfig, JobConfig, ScenarioConfig, TaskConfig, _DataNodeConfigChecker, _inject_section, _JobConfigChecker, _ScenarioConfigChecker, _TaskConfigChecker, ) Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = None Config._env_file_config = None Config._applied_config = _Config._default_config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() _Checker._checkers = [] _inject_section( JobConfig, "job_config", JobConfig("development"), [("configure_job_executions", JobConfig._configure)], True ) _inject_section( DataNodeConfig, "data_nodes", DataNodeConfig.default_config(), [ ("configure_data_node", DataNodeConfig._configure), ("configure_default_data_node", DataNodeConfig._set_default_configuration), ("configure_csv_data_node", DataNodeConfig._configure_csv), ("configure_json_data_node", DataNodeConfig._configure_json), ("configure_sql_table_data_node", DataNodeConfig._configure_sql_table), ("configure_sql_data_node", DataNodeConfig._configure_sql), ("configure_mongo_collection_data_node", DataNodeConfig._configure_mongo_collection), ("configure_in_memory_data_node", DataNodeConfig._configure_in_memory), ("configure_pickle_data_node", DataNodeConfig._configure_pickle), ("configure_excel_data_node", DataNodeConfig._configure_excel), ("configure_generic_data_node", DataNodeConfig._configure_generic), ], ) _inject_section( TaskConfig, "tasks", TaskConfig.default_config(), [("configure_task", TaskConfig._configure), ("configure_default_task", TaskConfig._set_default_configuration)], ) _inject_section( ScenarioConfig, "scenarios", ScenarioConfig.default_config(), [ ("configure_scenario", ScenarioConfig._configure), ("configure_default_scenario", ScenarioConfig._set_default_configuration), ], ) _inject_section( CoreSection, "core", CoreSection.default_config(), [("configure_core", CoreSection._configure)], add_to_unconflicted_sections=True, ) _Checker.add_checker(_JobConfigChecker) _Checker.add_checker(_DataNodeConfigChecker) _Checker.add_checker(_TaskConfigChecker) _Checker.add_checker(_ScenarioConfigChecker) def init_managers(): from taipy.core._version._version_manager_factory import _VersionManagerFactory from taipy.core.cycle._cycle_manager_factory import _CycleManagerFactory from taipy.core.data._data_manager_factory import _DataManagerFactory from taipy.core.job._job_manager_factory import _JobManagerFactory from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory from taipy.core.sequence._sequence_manager_factory import _SequenceManagerFactory from taipy.core.task._task_manager_factory import _TaskManagerFactory _CycleManagerFactory._build_manager()._delete_all() _ScenarioManagerFactory._build_manager()._delete_all() _SequenceManagerFactory._build_manager()._delete_all() _JobManagerFactory._build_manager()._delete_all() _TaskManagerFactory._build_manager()._delete_all() _DataManagerFactory._build_manager()._delete_all() _VersionManagerFactory._build_manager()._delete_all() def init_orchestrator(): from queue import Queue from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory if _OrchestratorFactory._orchestrator is None: _OrchestratorFactory._build_orchestrator() _OrchestratorFactory._build_dispatcher(force_restart=True) _OrchestratorFactory._orchestrator.jobs_to_run = Queue() _OrchestratorFactory._orchestrator.blocked_jobs = []
|
import os import pickle import taipy.core as tp from taipy.config import Config def test_pickle_files(): from tests.shared_test_cases.pickle_files import ( PICKLE_DICT_INPUT_PATH, PICKLE_DICT_OUTPUT_PATH, PICKLE_LIST_INPUT_PATH, PICKLE_LIST_OUTPUT_PATH, ROW_COUNT, gen_list_of_dict_input_pickle, gen_list_of_objects_input_pickle, scenario_cfg_1, scenario_cfg_2, ) tp.clean_all_entities_by_version(None) # generate 2 pickles files gen_list_of_dict_input_pickle(PICKLE_DICT_INPUT_PATH, ROW_COUNT) gen_list_of_objects_input_pickle(PICKLE_LIST_INPUT_PATH, ROW_COUNT) with open(PICKLE_DICT_INPUT_PATH, "rb") as f: dict_data = pickle.load(f) with open(PICKLE_LIST_INPUT_PATH, "rb") as f: list_data = pickle.load(f) # 📝 List of dicts scenario_1 = tp.create_scenario(scenario_cfg_1) input_data_node_1 = scenario_1.input_pickle_dataset_1 output_data_node_1 = scenario_1.output_pickle_dataset_1 read_data_1 = input_data_node_1.read() assert len(read_data_1) == ROW_COUNT assert read_data_1 == dict_data assert output_data_node_1.read() is None output_data_node_1.write(read_data_1) assert dict_data == output_data_node_1.read() output_data_node_1.write(None) assert output_data_node_1.read() is None scenario_1.submit() assert dict_data == output_data_node_1.read() os.remove(PICKLE_DICT_INPUT_PATH) os.remove(PICKLE_DICT_OUTPUT_PATH) # 📝 List of objects scenario_2 = tp.create_scenario(scenario_cfg_2) input_data_node_2 = scenario_2.input_pickle_dataset_2 output_data_node_2 = scenario_2.output_pickle_dataset_2 read_data_2 = input_data_node_2.read() assert len(read_data_2) == ROW_COUNT assert read_data_2 == list_data assert output_data_node_2.read() is None output_data_node_2.write(read_data_2) assert list_data == output_data_node_2.read() output_data_node_2.write(None) assert output_data_node_2.read() is None scenario_2.submit() assert list_data == output_data_node_2.read() os.remove(PICKLE_LIST_INPUT_PATH) os.remove(PICKLE_LIST_OUTPUT_PATH)
|
import os import shutil from io import StringIO from unittest.mock import patch import pytest from taipy._cli._scaffold_cli import _ScaffoldCLI from taipy._entrypoint import _entrypoint from tests.utils import clean_subparser @pytest.fixture(autouse=True, scope="function") def clean_templates(): clean_subparser() yield if os.path.exists("foo_app"): shutil.rmtree("foo_app", ignore_errors=True) if os.path.exists("bar_app"): shutil.rmtree("bar_app", ignore_errors=True) def test_default_template(): assert os.path.exists(_ScaffoldCLI._TEMPLATE_MAP["default"]) inputs = "\n".join(["foo_app", "main.py", "bar", "", "", ""]) with pytest.raises(SystemExit) as error: with patch("sys.stdin", StringIO(f"{inputs}\n")): with patch("sys.argv", ["prog", "create"]): _entrypoint() assert "foo_app" in os.listdir(os.getcwd()) assert error.value.code == 0 clean_subparser() inputs = "\n".join(["bar_app", "main.py", "bar", "", "", ""]) with pytest.raises(SystemExit) as error: with patch("sys.stdin", StringIO(f"{inputs}\n")): with patch("sys.argv", ["prog", "create", "--template", "default"]): _entrypoint() assert "bar_app" in os.listdir(os.getcwd()) assert error.value.code == 0 def test_scenario_management_template(): assert os.path.exists(_ScaffoldCLI._TEMPLATE_MAP["scenario-management"]) inputs = "\n".join(["foo_app", "main.py", "bar", ""]) with pytest.raises(SystemExit) as error: with patch("sys.stdin", StringIO(f"{inputs}\n")): with patch("sys.argv", ["prog", "create", "--template", "scenario-management"]): _entrypoint() assert "foo_app" in os.listdir(os.getcwd()) assert error.value.code == 0 def test_non_existing_template(capsys): with pytest.raises(SystemExit) as error: with patch("sys.argv", ["prog", "create", "--template", "non-existing-template"]): _entrypoint() assert error.value.code == 2 _, err_msg = capsys.readouterr() assert "argument --template: invalid choice: 'non-existing-template' (choose from" in err_msg
|
import os from unittest.mock import patch import pandas as pd import taipy.core.taipy as tp from taipy.config import Config from taipy.core import Core from taipy.core.config.job_config import JobConfig from taipy.core.job.status import Status from .complex_application_configs import ( average, build_churn_classification_config, build_complex_config, build_complex_required_file_paths, build_skipped_jobs_config, ) def assert_true_after_time(assertion, msg=None, time=120): from datetime import datetime from time import sleep start = datetime.now() while (datetime.now() - start).seconds < time: sleep(1) # Limit CPU usage try: if assertion(): return except BaseException as e: print("Raise : ", e) continue if msg: print(msg) assert assertion() def test_skipped_jobs(): scenario_config = build_skipped_jobs_config() with patch("sys.argv", ["prog"]): Core().run() scenario = tp.create_scenario(scenario_config) scenario.input.write(2) scenario.submit() assert len(tp.get_jobs()) == 2 for job in tp.get_jobs(): assert job.status == Status.COMPLETED scenario.submit() assert len(tp.get_jobs()) == 4 skipped = [] for job in tp.get_jobs(): if job.status != Status.COMPLETED: assert job.status == Status.SKIPPED skipped.append(job) assert len(skipped) == 2 def test_complex_development(): # d1 --- t1 # | # | --- t2 --- d5 --- | t10 --- d12 # | | | # | | | # d2 | --- t5 --- d7 --- t7 --- d9 --- t8 --- d10 --- t9 --- d11 # | | | # d3 --- | | | | # | | | t6 --- d8 ------------------- # | t3 --- d6 ---| # | | # | | # t4 d4 _, _, csv_path_sum, excel_path_sum, excel_path_out, csv_path_out = build_complex_required_file_paths() scenario_config = build_complex_config() with patch("sys.argv", ["prog"]): Core().run(force_restart=True) scenario = tp.create_scenario(scenario_config) tp.submit(scenario) csv_sum_res = pd.read_csv(csv_path_sum) excel_sum_res = pd.read_excel(excel_path_sum) csv_out = pd.read_csv(csv_path_out) excel_out = pd.read_excel(excel_path_out) assert csv_sum_res.to_numpy().flatten().tolist() == [i * 20 for i in range(1, 11)] assert excel_sum_res.to_numpy().flatten().tolist() == [i * 2 for i in range(1, 11)] assert average(csv_sum_res["number"] - excel_sum_res["number"]) == csv_out.to_numpy()[0] assert average((csv_sum_res["number"] - excel_sum_res["number"]) * 10) == excel_out.to_numpy()[0] for path in [csv_path_sum, excel_path_sum, csv_path_out, excel_path_out]: os.remove(path) def test_complex_standlone(): # d1 --- t1 # | # | --- t2 --- d5 --- | t10 --- d12 # | | | # | | | # d2 | --- t5 --- d7 --- t7 --- d9 --- t8 --- d10 --- t9 --- d11 # | | | # d3 --- | | | | # | | | t6 --- d8 ------------------- # | t3 --- d6 ---| # | | # | | # t4 d4 _, _, csv_path_sum, excel_path_sum, excel_path_out, csv_path_out = build_complex_required_file_paths() scenario_config = build_complex_config() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) with patch("sys.argv", ["prog"]): Core().run(force_restart=True) scenario = tp.create_scenario(scenario_config) jobs = tp.submit(scenario) assert_true_after_time(lambda: os.path.exists(csv_path_out) and os.path.exists(excel_path_out)) assert_true_after_time(lambda: all([job._status == Status.COMPLETED for job in jobs])) csv_sum_res = pd.read_csv(csv_path_sum) excel_sum_res = pd.read_excel(excel_path_sum) csv_out = pd.read_csv(csv_path_out) excel_out = pd.read_excel(excel_path_out) assert csv_sum_res.to_numpy().flatten().tolist() == [i * 20 for i in range(1, 11)] assert excel_sum_res.to_numpy().flatten().tolist() == [i * 2 for i in range(1, 11)] assert average(csv_sum_res["number"] - excel_sum_res["number"]) == csv_out.to_numpy()[0] assert average((csv_sum_res["number"] - excel_sum_res["number"]) * 10) == excel_out.to_numpy()[0] for path in [csv_path_sum, excel_path_sum, csv_path_out, excel_path_out]: os.remove(path) def test_churn_classification_development(): scenario_cfg = build_churn_classification_config() with patch("sys.argv", ["prog"]): Core().run(force_restart=True) scenario = tp.create_scenario(scenario_cfg) jobs = tp.submit(scenario) for job in jobs: if not job.is_completed(): print(job._task.config_id) assert all([job.is_completed() for job in jobs]) def test_churn_classification_standalone(): scenario_cfg = build_churn_classification_config() Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) with patch("sys.argv", ["prog"]): Core().run(force_restart=True) scenario = tp.create_scenario(scenario_cfg) jobs = tp.submit(scenario) assert_true_after_time(lambda: os.path.exists(scenario.results._path)) assert_true_after_time(lambda: all([job._status == Status.COMPLETED for job in jobs]), time=15)
|
from typing import Optional from flask_testing import TestCase from taipy.rest import Rest from tests.shared_test_cases.arima import build_arima_config class BaseTestCase(TestCase): def create_app(self): rest = Rest() rest._app.config["TESTING"] = True return rest._app class RestTest(BaseTestCase): def _create(self, entity: str, config_id: str): return self.client.post(f"/api/v1/{entity}?config_id={config_id}") def _get(self, entity: str, id: Optional[str] = None): url = f"/api/v1/{entity}" if id: url = f"{url}/{id}" return self.client.get(url) def test_create_scenario_should_create_every_entity(self): build_arima_config() response = self._create("scenarios", "Arima_scenario") assert response.status_code == 201 assert response.json["message"] == "Scenario was created." all_scenarios = self._get("scenarios") all_data_nodes = self._get("datanodes") all_tasks = self._get("tasks") assert len(all_scenarios.json) == 1 assert len(all_data_nodes.json) == 4 assert len(all_tasks.json) == 2 def test_submit_scenario(self): build_arima_config() response = self._create("scenarios", "Arima_scenario") assert response.status_code == 201 scenario_id = response.json["scenario"]["id"] response = self.client.post(f"/api/v1/scenarios/submit/{scenario_id}") assert response.status_code == 200 assert response.json == {"message": f"Scenario {scenario_id} was submitted."} all_jobs = self._get("jobs") for jb in all_jobs.json: assert jb["status"] == "Status.COMPLETED"
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import taipy as tp from tests.shared_test_cases.arima.config import build_arima_config def test_submit_scenario_submit_success(): arima_scenario_config = build_arima_config() scenario = tp.create_scenario(arima_scenario_config) tp.submit(scenario) assert scenario.forecast_values.read() is not None assert len(tp.get_tasks()) == 2 assert len(tp.get_jobs()) == 2 assert len(tp.get_data_nodes()) == 4
|
from taipy._cli._base_cli import _CLI def clean_subparser(): if getattr(_CLI._parser, "_subparsers", None): # Loop over all subparsers to find the one that has nested-subparsers and positional arguments for choice in _CLI._parser._subparsers._group_actions[0].choices.values(): # Remove nested _subparsers choice._subparsers = None # Remove positional arguments # The "==SUPPRESS==" is a hack to identify nested-subparsers as positional arguments to_remove = ["application_main_file", "==SUPPRESS=="] actions = choice._actions.copy() for action in actions: opts = action.option_strings if (opts and opts[0] in to_remove) or action.dest in to_remove: choice._remove_action(action) for argument_group in choice._action_groups: for group_action in argument_group._group_actions: opts = group_action.option_strings if (opts and opts[0] in to_remove) or group_action.dest in to_remove: argument_group._group_actions.remove(group_action) def assert_true_after_time(assertion, msg=None, time=120): from datetime import datetime from time import sleep start = datetime.now() while (datetime.now() - start).seconds < time: print(f"waiting {(datetime.now() - start).seconds} seconds...", end="\r") try: if assertion(): print(f"waiting {(datetime.now() - start).seconds} seconds...") return except BaseException as e: print("Raise : ", e) sleep(1) # Limit CPU usage continue print(f"waiting {(datetime.now() - start).seconds} seconds...") if msg: print(msg) assert assertion()
|
import os import modin.pandas as modin_pd import numpy as np import pandas as pd import taipy.core as tp from taipy.config import Config def test_excel(): from tests.shared_test_cases.single_excel_sheet import ( EXCEL_SINGLE_SHEET_INPUT_PATH, EXCEL_SINGLE_SHEET_OUTPUT_PATH, ROW_COUNT, Row, scenario_cfg, scenario_cfg_2, scenario_cfg_3, scenario_cfg_4, ) pandas_data = pd.read_excel(EXCEL_SINGLE_SHEET_INPUT_PATH) numpy_data = pandas_data.to_numpy() modin_data = modin_pd.read_excel(EXCEL_SINGLE_SHEET_INPUT_PATH) custom_data = [ Row(int(v.id), int(v.age), float(v.rating)) for i, v in pd.read_excel(EXCEL_SINGLE_SHEET_INPUT_PATH).iterrows() ] tp.clean_all_entities_by_version(None) # 📊 Without exposed type (pandas is the default exposed type) scenario_1 = tp.create_scenario(scenario_cfg) input_data_node_1 = scenario_1.input_excel_single_sheet_dataset_1 output_data_node_1 = scenario_1.output_excel_single_sheet_dataset_1 read_data_1 = input_data_node_1.read() assert len(read_data_1) == ROW_COUNT assert pandas_data.equals(read_data_1) assert output_data_node_1.read() is None output_data_node_1.write(read_data_1) assert pandas_data.equals(output_data_node_1.read()) output_data_node_1.write(None) assert output_data_node_1.read().empty scenario_1.submit() assert pandas_data.equals(output_data_node_1.read()) os.remove(EXCEL_SINGLE_SHEET_OUTPUT_PATH) # 📊 With custom class as exposed type def compare_custom_date(read_data, custom_data): return [ row_1.id == row_2.id and row_1.age == row_2.age and row_1.rating == row_2.rating for row_1, row_2 in zip(read_data, custom_data) ] scenario_2 = tp.create_scenario(scenario_cfg_2) input_data_node_2 = scenario_2.input_excel_single_sheet_dataset_2 output_data_node_2 = scenario_2.output_excel_single_sheet_dataset_2 read_data_2 = input_data_node_2.read() assert len(read_data_2) == ROW_COUNT assert all(compare_custom_date(read_data_2, custom_data)) output_data_node_2.write(read_data_2) assert len(read_data_2) == ROW_COUNT assert all(compare_custom_date(output_data_node_2.read(), custom_data)) output_data_node_2.write(None) assert isinstance(output_data_node_2.read(), list) assert len(output_data_node_2.read()) == 0 scenario_2.submit() assert all(compare_custom_date(output_data_node_2.read(), custom_data)) os.remove(EXCEL_SINGLE_SHEET_OUTPUT_PATH) # 📊 With numpy as exposed type scenario_3 = tp.create_scenario(scenario_cfg_3) input_data_node_3 = scenario_3.input_excel_single_sheet_dataset_3 output_data_node_3 = scenario_3.output_excel_single_sheet_dataset_3 read_data_3 = input_data_node_3.read() assert len(read_data_3) == ROW_COUNT assert np.array_equal(read_data_3, numpy_data) assert output_data_node_3.read() is None output_data_node_3.write(read_data_3) assert np.array_equal(output_data_node_3.read(), numpy_data) output_data_node_3.write(None) assert isinstance(output_data_node_3.read(), np.ndarray) assert output_data_node_3.read().size == 0 scenario_3.submit() assert np.array_equal(output_data_node_3.read(), numpy_data) os.remove(EXCEL_SINGLE_SHEET_OUTPUT_PATH) # 📊 With modin as exposed type scenario_4 = tp.create_scenario(scenario_cfg_4) input_data_node_4 = scenario_4.input_excel_single_sheet_dataset_4 output_data_node_4 = scenario_4.output_excel_single_sheet_dataset_4 read_data_4 = input_data_node_4.read() assert len(read_data_4) == ROW_COUNT assert all(modin_data._to_pandas() == read_data_4._to_pandas()) assert output_data_node_4.read() is None output_data_node_4.write(read_data_4) assert all(modin_data._to_pandas() == output_data_node_4.read()._to_pandas()) output_data_node_4.write(None) assert output_data_node_4.read().empty scenario_4.submit() assert all(modin_data._to_pandas() == output_data_node_4.read()._to_pandas()) os.remove(EXCEL_SINGLE_SHEET_OUTPUT_PATH)
|
import json import os import taipy.core as tp from taipy.config import Config def test_json(): from tests.shared_test_cases.json_files import ( JSON_DICT_INPUT_PATH, JSON_DICT_OUTPUT_PATH, JSON_OBJECT_INPUT_PATH, JSON_OBJECT_OUTPUT_PATH, ROW_COUNT, Row, RowDecoder, scenario_cfg_1, scenario_cfg_2, ) tp.clean_all_entities_by_version(None) with open(JSON_DICT_INPUT_PATH, "r") as f: json_dict_data = json.load(f) with open(JSON_OBJECT_INPUT_PATH, "r") as f: json_object_data = json.load(f, cls=RowDecoder) # 📝 Without encoder / decoder scenario_1 = tp.create_scenario(scenario_cfg_1) input_data_node_1 = scenario_1.input_json_dataset_1 output_data_node_1 = scenario_1.output_json_dataset_1 read_data_1 = input_data_node_1.read() assert len(read_data_1) == ROW_COUNT assert json_dict_data == read_data_1 assert output_data_node_1.read() is None output_data_node_1.write(read_data_1) assert json_dict_data == output_data_node_1.read() output_data_node_1.write(None) assert output_data_node_1.read() is None scenario_1.submit() assert json_dict_data == output_data_node_1.read() os.remove(JSON_DICT_OUTPUT_PATH) # 📝 With encoder / decoder def compare_custom_date(read_data, object_data): return [ isinstance(row_1, Row) and row_1.id == row_2.id and row_1.age == row_2.age and row_1.rating == row_2.rating for row_1, row_2 in zip(read_data, object_data) ] scenario_2 = tp.create_scenario(scenario_cfg_2) input_data_node_2 = scenario_2.input_json_dataset_2 output_data_node_2 = scenario_2.output_json_dataset_2 read_data_2 = input_data_node_2.read() assert len(read_data_2) == ROW_COUNT assert all(compare_custom_date(read_data_2, json_object_data)) assert output_data_node_2.read() is None output_data_node_2.write(read_data_2) assert all(compare_custom_date(output_data_node_2.read(), json_object_data)) output_data_node_2.write(None) assert output_data_node_2.read() is None scenario_2.submit() assert all(compare_custom_date(output_data_node_2.read(), json_object_data)) os.remove(JSON_OBJECT_OUTPUT_PATH)
|
from unittest.mock import patch import pytest from taipy._entrypoint import _entrypoint from tests.utils import clean_subparser @pytest.fixture(autouse=True, scope="function") def clean_templates(): clean_subparser() yield def test_run_simple_taipy_app_without_taipy_args(capfd): with pytest.raises(SystemExit) as error: with patch("sys.argv", ["prog", "run", "tests/simple_application/no_external_args_app.py"]): _entrypoint() std_out, _ = capfd.readouterr() assert error.value.code == 0 assert "Development mode: Clean all entities of version" in std_out assert "Config.core.version_number: " in std_out assert "Config.core.mode: development" in std_out assert "Config.core.force: False" in std_out assert "Config.gui_config.host: 127.0.0.1" in std_out assert "Config.gui_config.port: 5000" in std_out assert "Config.gui_config.debug: False" in std_out assert "Config.gui_config.use_reloader: False" in std_out assert "Config.gui_config.ngrok_token: " in std_out assert "Config.gui_config.webapp_path: None" in std_out def test_run_simple_taipy_app_with_taipy_args(capfd): with pytest.raises(SystemExit): with patch( "sys.argv", [ "prog", "run", "tests/simple_application/no_external_args_app.py", "--experiment", "1.0", "--force", "--host", "example.com", "--port", "5001", "--debug", "--use-reloader", "--ngrok-token", "1234567890", "--webapp-path", "path/webapp", ], ): _entrypoint() std_out, _ = capfd.readouterr() assert "Config.core.version_number: 1.0" in std_out assert "Config.core.mode: experiment" in std_out assert "Config.core.force: True" in std_out assert "Config.gui_config.host: example.com" in std_out assert "Config.gui_config.port: 5001" in std_out assert "Config.gui_config.debug: True" in std_out assert "Config.gui_config.use_reloader: True" in std_out assert "Config.gui_config.ngrok_token: 1234567890" in std_out assert "Config.gui_config.webapp_path: path/webapp" in std_out def test_run_simple_taipy_app_with_taipy_and_external_args(capfd): with pytest.raises(SystemExit): with patch( "sys.argv", [ "prog", "run", "tests/simple_application/external_args_app.py", "--experiment", "1.0", "--force", "--host", "example.com", "--port", "5001", "external-args", # This is the keyword that separates external args from taipy args "--mode", "inference", "--force", "yes", "--host", "user_host.com", "--port", "8081", "--non-conflict-arg", "non-conflict-arg-value", ], ): _entrypoint() std_out, _ = capfd.readouterr() assert "Config.core.mode: experiment" in std_out assert "User provided mode: inference" in std_out assert "Config.core.force: True" in std_out assert "User provided force: yes" in std_out assert "Config.gui_config.host: example.com" in std_out assert "User provided host: user_host.com" in std_out assert "Config.gui_config.port: 5001" in std_out assert "User provided port: 8081" in std_out assert "User provided non-conflict-arg: non-conflict-arg-value" in std_out
|
import os import modin.pandas as modin_pd import numpy as np import pandas as pd import taipy.core as tp from taipy.config import Config def test_csv(): from tests.shared_test_cases.csv_files.config import ( CSV_INPUT_PATH, CSV_OUTPUT_PATH, ROW_COUNT, Row, scenario_cfg, scenario_cfg_2, scenario_cfg_3, scenario_cfg_4, ) tp.clean_all_entities_by_version(None) pandas_data = pd.read_csv(CSV_INPUT_PATH) numpy_data = pandas_data.to_numpy() modin_data = modin_pd.read_csv(CSV_INPUT_PATH) custom_data = [Row(int(v.id), int(v.age), float(v.rating)) for i, v in pd.read_csv(CSV_INPUT_PATH).iterrows()] # 📊 Without exposed type (pandas is the default exposed type) scenario_1 = tp.create_scenario(scenario_cfg) input_data_node_1 = scenario_1.input_csv_dataset_1 output_data_node_1 = scenario_1.output_csv_dataset_1 read_data_1 = input_data_node_1.read() assert len(read_data_1) == ROW_COUNT assert pandas_data.equals(read_data_1) assert output_data_node_1.read() is None output_data_node_1.write(read_data_1) assert pandas_data.equals(output_data_node_1.read()) output_data_node_1.write(None) assert output_data_node_1.read().empty scenario_1.submit() assert pandas_data.equals(output_data_node_1.read()) os.remove(CSV_OUTPUT_PATH) # 📊 With custom class as exposed type def compare_custom_date(read_data, custom_data): return [ row_1.id == row_2.id and row_1.age == row_2.age and row_1.rating == row_2.rating for row_1, row_2 in zip(read_data, custom_data) ] scenario_2 = tp.create_scenario(scenario_cfg_2) input_data_node_2 = scenario_2.input_csv_dataset_2 output_data_node_2 = scenario_2.output_csv_dataset_2 read_data_2 = input_data_node_2.read() assert len(read_data_2) == ROW_COUNT assert all(compare_custom_date(read_data_2, custom_data)) output_data_node_2.write(read_data_2) assert len(read_data_2) == ROW_COUNT assert all(compare_custom_date(output_data_node_2.read(), custom_data)) output_data_node_2.write(None) assert isinstance(output_data_node_2.read(), list) assert len(output_data_node_2.read()) == 0 scenario_2.submit() assert all(compare_custom_date(output_data_node_2.read(), custom_data)) os.remove(CSV_OUTPUT_PATH) # 📊 With numpy as exposed type scenario_3 = tp.create_scenario(scenario_cfg_3) input_data_node_3 = scenario_3.input_csv_dataset_3 output_data_node_3 = scenario_3.output_csv_dataset_3 read_data_3 = input_data_node_3.read() assert len(read_data_3) == ROW_COUNT assert np.array_equal(read_data_3, numpy_data) assert output_data_node_3.read() is None output_data_node_3.write(read_data_3) assert np.array_equal(output_data_node_3.read(), numpy_data) output_data_node_3.write(None) assert isinstance(output_data_node_3.read(), np.ndarray) assert output_data_node_3.read().size == 0 scenario_3.submit() assert np.array_equal(output_data_node_3.read(), numpy_data) os.remove(CSV_OUTPUT_PATH) # 📊 With modin as exposed type scenario_4 = tp.create_scenario(scenario_cfg_4) input_data_node_4 = scenario_4.input_csv_dataset_4 output_data_node_4 = scenario_4.output_csv_dataset_4 read_data_4 = input_data_node_4.read() assert len(read_data_4) == ROW_COUNT assert all(modin_data._to_pandas() == read_data_4._to_pandas()) assert output_data_node_4.read() is None output_data_node_4.write(read_data_4) assert all(modin_data._to_pandas() == output_data_node_4.read()._to_pandas()) output_data_node_4.write(None) assert output_data_node_4.read().empty scenario_4.submit() assert all(modin_data._to_pandas() == output_data_node_4.read()._to_pandas()) os.remove(CSV_OUTPUT_PATH)
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import random import time from datetime import datetime from typing import Dict, List import pandas as pd def failing_job(historical_daily_temperature: pd.DataFrame): time.sleep(2) print("----- Prepared to raise exception -----") raise Exception def predict(model, dates: List[datetime]) -> Dict[str, List[float]]: res = [t + random.uniform(0, 3) for t in model.forecast(len(dates))] return {"result": res}
|
from datetime import datetime from taipy import Frequency, Scope from taipy.config.config import Config from .algorithms import failing_job, predict historical_data_set = Config.configure_csv_data_node( id="historical_data_set", path="tests//shared_test_cases/arima/daily-min-temperatures.csv", scope=Scope.GLOBAL ) arima_model = Config.configure_data_node(id="arima_model") dates_to_forecast = Config.configure_data_node( id="dates_to_forecast", scope=Scope.SCENARIO, default_data=[datetime(1991, 1, 1).isoformat()] ) forecast_values = Config.configure_excel_data_node( id="forecast_values", has_header=False, path="tests//shared_test_cases/arima/res.xlsx" ) arima_fail_algo = Config.configure_task( id="arima_training", input=historical_data_set, function=failing_job, output=arima_model ) arima_scoring_algo = Config.configure_task( id="arima_scoring", input=[arima_model, dates_to_forecast], function=predict, output=forecast_values ) arima_sequence = Config.configure_sequence(id="arima_sequences", task_configs=[arima_fail_algo, arima_scoring_algo]) arima_scenario_config = Config.configure_scenario( id="Arima_scenario", sequence_configs=[arima_sequence], frequency=Frequency.DAILY )
|
from .algorithms import * from .config import *
|
import random import time from datetime import datetime from typing import Dict, List import pandas as pd from statsmodels.tsa.arima.model import ARIMA def train(historical_daily_temperature: pd.DataFrame): print("----- Started training -----") time.sleep(2) for _ in range(2): print("----- Model is in training -----") return ARIMA(endog=historical_daily_temperature["Temp"].to_numpy(), order=(1, 1, 0)).fit() def predict(model, dates: List[datetime]) -> Dict[str, List]: res = [t + random.uniform(0, 3) for t in model.forecast(len(dates))] return {"result": res}
|
from datetime import datetime from taipy.config import Frequency, Scope from taipy.config.config import Config from .algorithms import predict, train def build_arima_config(): CSV_INPUT_PATH = "tests/shared_test_cases/arima/daily-min-temperatures.csv" XLSX_OUTPUT_PATH = "tests/shared_test_cases/arima/res.xlsx" historical_data_set = Config.configure_csv_data_node(id="historical_data_set", path=CSV_INPUT_PATH, scope=Scope.GLOBAL) arima_model = Config.configure_data_node(id="arima_model") dates_to_forecast = Config.configure_data_node( id="dates_to_forecast", scope=Scope.SCENARIO, default_data=[datetime(1991, 1, 1).isoformat()] ) forecast_values = Config.configure_excel_data_node(id="forecast_values", has_header=False, path=XLSX_OUTPUT_PATH) arima_training_algo = Config.configure_task( id="arima_training", input=historical_data_set, function=train, output=arima_model ) arima_scoring_algo = Config.configure_task( id="arima_scoring", input=[arima_model, dates_to_forecast], function=predict, output=forecast_values ) arima_scenario_config = Config.configure_scenario( id="Arima_scenario", task_configs=[arima_training_algo, arima_scoring_algo], frequency=Frequency.DAILY ) return arima_scenario_config
|
from .algorithms import * from .config import *
|
import pandas as pd def algorithm(df: pd.DataFrame) -> pd.DataFrame: return df
|
import dataclasses from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm CSV_INPUT_PATH = "tests/shared_test_cases/csv_files/input_1000.csv" CSV_OUTPUT_PATH = "tests/shared_test_cases/csv_files/output_1000.csv" ROW_COUNT = 1000 @dataclasses.dataclass class Row: id: int age: int rating: float def __post_init__(self): for field in dataclasses.fields(self): value = getattr(self, field.name) if not isinstance(value, field.type): setattr(self, field.name, field.type(value)) Config.unblock_update() # Config for Pandas input_dataset_cfg = Config.configure_csv_data_node(id="input_csv_dataset_1", path=CSV_INPUT_PATH, has_header=True) output_dataset_cfg = Config.configure_csv_data_node(id="output_csv_dataset_1", path=CSV_OUTPUT_PATH, has_header=True) task_cfg = Config.configure_task(id="t1", input=input_dataset_cfg, function=algorithm, output=output_dataset_cfg) scenario_cfg = Config.configure_scenario(id="s1", task_configs=[task_cfg], frequency=Frequency.DAILY) # Config for Custom class input_dataset_cfg_2 = Config.configure_csv_data_node( id="input_csv_dataset_2", path=CSV_INPUT_PATH, has_header=True, exposed_type=Row ) output_dataset_cfg_2 = Config.configure_csv_data_node( id="output_csv_dataset_2", path=CSV_OUTPUT_PATH, has_header=True, exposed_type=Row ) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY) # Config for Numpy input_dataset_cfg_3 = Config.configure_csv_data_node( id="input_csv_dataset_3", path=CSV_INPUT_PATH, has_header=True, exposed_type="numpy" ) output_dataset_cfg_3 = Config.configure_csv_data_node( id="output_csv_dataset_3", path=CSV_OUTPUT_PATH, has_header=True, exposed_type="numpy" ) task_cfg_3 = Config.configure_task(id="t3", input=input_dataset_cfg_3, function=algorithm, output=output_dataset_cfg_3) scenario_cfg_3 = Config.configure_scenario(id="s3", task_configs=[task_cfg_3], frequency=Frequency.DAILY) # Config for Modin input_dataset_cfg_4 = Config.configure_csv_data_node( id="input_csv_dataset_4", path=CSV_INPUT_PATH, has_header=True, exposed_type="modin" ) output_dataset_cfg_4 = Config.configure_csv_data_node( id="output_csv_dataset_4", path=CSV_OUTPUT_PATH, has_header=True, exposed_type="modin" ) task_cfg_4 = Config.configure_task(id="t4", input=input_dataset_cfg_4, function=algorithm, output=output_dataset_cfg_4) scenario_cfg_4 = Config.configure_scenario(id="s4", task_configs=[task_cfg_4], frequency=Frequency.DAILY)
|
from .algorithms import * from .config import *
|
import pandas as pd def algorithm(df: pd.DataFrame) -> pd.DataFrame: return df
|
import dataclasses from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm EXCEL_INPUT_PATH = "tests/shared_test_cases/multi_excel_sheets/input_1000_multi_sheets.xlsx" EXCEL_OUTPUT_PATH = "tests/shared_test_cases/multi_excel_sheets/output_1000.xlsx" ROW_COUNT = 1000 SHEET_NAMES = ["Sheet 0", "Sheet 1", "Sheet 2", "Sheet 3", "Sheet 4", "Sheet 5"] @dataclasses.dataclass class Row: id: int age: int rating: float def __post_init__(self): for field in dataclasses.fields(self): value = getattr(self, field.name) if not isinstance(value, field.type): setattr(self, field.name, field.type(value)) Config.unblock_update() input_dataset_cfg_1 = Config.configure_excel_data_node( id="input_excel_multi_sheet_dataset_1", path=EXCEL_INPUT_PATH, has_header=True, sheet_name=SHEET_NAMES ) output_dataset_cfg_1 = Config.configure_excel_data_node( id="output_excel_multi_sheet_dataset_1", path=EXCEL_OUTPUT_PATH, has_header=True, sheet_name=SHEET_NAMES ) task_cfg = Config.configure_task(id="t1", input=input_dataset_cfg_1, function=algorithm, output=output_dataset_cfg_1) scenario_cfg = Config.configure_scenario(id="s1", task_configs=[task_cfg], frequency=Frequency.DAILY) input_dataset_cfg_2 = Config.configure_excel_data_node( id="input_excel_multi_sheet_dataset_2", path=EXCEL_INPUT_PATH, has_header=True, exposed_type=Row, sheet_name=SHEET_NAMES, ) output_dataset_cfg_2 = Config.configure_excel_data_node( id="output_excel_multi_sheet_dataset_2", path=EXCEL_OUTPUT_PATH, has_header=True, exposed_type=Row, sheet_name=SHEET_NAMES, ) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY) input_dataset_cfg_3 = Config.configure_excel_data_node( id="input_excel_multi_sheet_dataset_3", path=EXCEL_INPUT_PATH, has_header=True, exposed_type="numpy", sheet_name=SHEET_NAMES, ) output_dataset_cfg_3 = Config.configure_excel_data_node( id="output_excel_multi_sheet_dataset_3", path=EXCEL_OUTPUT_PATH, has_header=True, exposed_type="numpy", sheet_name=SHEET_NAMES, ) task_cfg_3 = Config.configure_task(id="t3", input=input_dataset_cfg_3, function=algorithm, output=output_dataset_cfg_3) scenario_cfg_3 = Config.configure_scenario(id="s3", task_configs=[task_cfg_3], frequency=Frequency.DAILY) input_dataset_cfg_4 = Config.configure_excel_data_node( id="input_excel_multi_sheet_dataset_4", path=EXCEL_INPUT_PATH, has_header=True, exposed_type="modin", sheet_name=SHEET_NAMES, ) output_dataset_cfg_4 = Config.configure_excel_data_node( id="output_excel_multi_sheet_dataset_4", path=EXCEL_OUTPUT_PATH, has_header=True, exposed_type="modin", sheet_name=SHEET_NAMES, ) task_cfg_4 = Config.configure_task(id="t4", input=input_dataset_cfg_4, function=algorithm, output=output_dataset_cfg_4) scenario_cfg_4 = Config.configure_scenario(id="s4", task_configs=[task_cfg_4], frequency=Frequency.DAILY)
|
from .algorithms import * from .config import *
|
import pandas as pd def algorithm(df: pd.DataFrame) -> pd.DataFrame: return df
|
import dataclasses from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm EXCEL_SINGLE_SHEET_INPUT_PATH = "tests/shared_test_cases/single_excel_sheet/input_1000.xlsx" EXCEL_SINGLE_SHEET_OUTPUT_PATH = "tests/shared_test_cases/single_excel_sheet/output_1000.xlsx" ROW_COUNT = 1000 SHEET_NAME = "Sheet1" @dataclasses.dataclass class Row: id: int age: int rating: float def __post_init__(self): for field in dataclasses.fields(self): value = getattr(self, field.name) if not isinstance(value, field.type): setattr(self, field.name, field.type(value)) Config.unblock_update() input_dataset_cfg_1 = Config.configure_excel_data_node( id="input_excel_single_sheet_dataset_1", path=EXCEL_SINGLE_SHEET_INPUT_PATH, has_header=True, sheet_name=SHEET_NAME ) output_dataset_cfg_1 = Config.configure_excel_data_node( id="output_excel_single_sheet_dataset_1", path=EXCEL_SINGLE_SHEET_OUTPUT_PATH, has_header=True, sheet_name=SHEET_NAME, ) task_cfg = Config.configure_task(id="t1", input=input_dataset_cfg_1, function=algorithm, output=output_dataset_cfg_1) scenario_cfg = Config.configure_scenario(id="s1", task_configs=[task_cfg], frequency=Frequency.DAILY) input_dataset_cfg_2 = Config.configure_excel_data_node( id="input_excel_single_sheet_dataset_2", path=EXCEL_SINGLE_SHEET_INPUT_PATH, has_header=True, exposed_type=Row, sheet_name=SHEET_NAME, ) output_dataset_cfg_2 = Config.configure_excel_data_node( id="output_excel_single_sheet_dataset_2", path=EXCEL_SINGLE_SHEET_OUTPUT_PATH, has_header=True, exposed_type=Row, sheet_name=SHEET_NAME, ) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY) input_dataset_cfg_3 = Config.configure_excel_data_node( id="input_excel_single_sheet_dataset_3", path=EXCEL_SINGLE_SHEET_INPUT_PATH, has_header=True, exposed_type="numpy", sheet_name=SHEET_NAME, ) output_dataset_cfg_3 = Config.configure_excel_data_node( id="output_excel_single_sheet_dataset_3", path=EXCEL_SINGLE_SHEET_OUTPUT_PATH, has_header=True, exposed_type="numpy", sheet_name=SHEET_NAME, ) task_cfg_3 = Config.configure_task(id="t3", input=input_dataset_cfg_3, function=algorithm, output=output_dataset_cfg_3) scenario_cfg_3 = Config.configure_scenario(id="s3", task_configs=[task_cfg_3], frequency=Frequency.DAILY) input_dataset_cfg_4 = Config.configure_excel_data_node( id="input_excel_single_sheet_dataset_4", path=EXCEL_SINGLE_SHEET_INPUT_PATH, has_header=True, exposed_type="modin", sheet_name=SHEET_NAME, ) output_dataset_cfg_4 = Config.configure_excel_data_node( id="output_excel_single_sheet_dataset_4", path=EXCEL_SINGLE_SHEET_OUTPUT_PATH, has_header=True, exposed_type="modin", sheet_name=SHEET_NAME, ) task_cfg_4 = Config.configure_task(id="t4", input=input_dataset_cfg_4, function=algorithm, output=output_dataset_cfg_4) scenario_cfg_4 = Config.configure_scenario(id="s4", task_configs=[task_cfg_4], frequency=Frequency.DAILY)
|
from .algorithms import * from .config import *
|
def algorithm(data): return data
|
import dataclasses from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm PICKLE_DICT_INPUT_PATH = "tests/shared_test_cases/pickle_files/input_dict_1000.p" PICKLE_DICT_OUTPUT_PATH = "tests/shared_test_cases/pickle_files/output_dict_1000.p" PICKLE_LIST_INPUT_PATH = "tests/shared_test_cases/pickle_files/input_object_1000.p" PICKLE_LIST_OUTPUT_PATH = "tests/shared_test_cases/pickle_files/output_object_1000.p" ROW_COUNT = 1000 @dataclasses.dataclass class Row: id: int age: int rating: float Config.unblock_update() input_dataset_cfg_1 = Config.configure_pickle_data_node(id="input_pickle_dataset_1", path=PICKLE_DICT_INPUT_PATH) output_dataset_cfg_1 = Config.configure_pickle_data_node(id="output_pickle_dataset_1", path=PICKLE_DICT_OUTPUT_PATH) task_cfg_1 = Config.configure_task(id="t1", input=input_dataset_cfg_1, function=algorithm, output=output_dataset_cfg_1) scenario_cfg_1 = Config.configure_scenario(id="s1", task_configs=[task_cfg_1], frequency=Frequency.DAILY) input_dataset_cfg_2 = Config.configure_pickle_data_node(id="input_pickle_dataset_2", path=PICKLE_LIST_INPUT_PATH) output_dataset_cfg_2 = Config.configure_pickle_data_node(id="output_pickle_dataset_2", path=PICKLE_LIST_OUTPUT_PATH) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY)
|
from .algorithms import * from .config import * from .utils import *
|
import pickle import random from tests.shared_test_cases.pickle_files import Row def gen_list_of_dict_input_pickle(path, n): data = [] for i in range(n): row = {"id": i + 1, "age": random.randint(10, 99), "rating": round(random.uniform(0, 10), 2)} data.append(row) pickle.dump(data, open(path, "wb")) def gen_list_of_objects_input_pickle(path, n): data = [] for i in range(n): row = Row(i + 1, random.randint(10, 99), round(random.uniform(0, 10), 2)) data.append(row) pickle.dump(data, open(path, "wb"))
|
def algorithm(data): return data
|
from taipy.config.common.frequency import Frequency from taipy.config.config import Config from .algorithms import algorithm from .utils import RowDecoder, RowEncoder JSON_DICT_INPUT_PATH = "tests/shared_test_cases/json_files/input_dict_1000.json" JSON_DICT_OUTPUT_PATH = "tests/shared_test_cases/json_files/output_dict_1000.json" JSON_OBJECT_INPUT_PATH = "tests/shared_test_cases/json_files/input_object_1000.json" JSON_OBJECT_OUTPUT_PATH = "tests/shared_test_cases/json_files/output_object_1000.json" ROW_COUNT = 1000 Config.unblock_update() input_dataset_cfg_1 = Config.configure_json_data_node(id="input_json_dataset_1", path=JSON_DICT_INPUT_PATH) output_dataset_cfg_1 = Config.configure_json_data_node(id="output_json_dataset_1", path=JSON_DICT_OUTPUT_PATH) task_cfg_1 = Config.configure_task(id="t1", input=input_dataset_cfg_1, function=algorithm, output=output_dataset_cfg_1) scenario_cfg_1 = Config.configure_scenario(id="s1", task_configs=[task_cfg_1], frequency=Frequency.DAILY) input_dataset_cfg_2 = Config.configure_json_data_node( id="input_json_dataset_2", path=JSON_OBJECT_INPUT_PATH, decoder=RowDecoder ) output_dataset_cfg_2 = Config.configure_json_data_node( id="output_json_dataset_2", path=JSON_OBJECT_OUTPUT_PATH, encoder=RowEncoder, decoder=RowDecoder ) task_cfg_2 = Config.configure_task(id="t2", input=input_dataset_cfg_2, function=algorithm, output=output_dataset_cfg_2) scenario_cfg_2 = Config.configure_scenario(id="s2", task_configs=[task_cfg_2], frequency=Frequency.DAILY)
|
from .algorithms import * from .config import * from .utils import *
|
import json import random import time from dataclasses import dataclass @dataclass class Row: id: int age: int rating: float class RowEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, Row): return {"id": obj.id, "age": obj.age, "rating": obj.rating, "__type__": "Row"} return json.JSONEncoder.default(self, obj) class RowDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, d): if "__type__" in d and d["__type__"] == "Row": return Row(d["id"], d["age"], d["rating"]) def timer(f): def wrapper(*args, **kwargs): print(f"⏳ {f.__name__}") start = time.time() result = f(*args, **kwargs) end = time.time() elapsed = round(end - start, 4) print(f"\t✔️ {elapsed} seconds") return result return wrapper def get_list_of_dicts(n): data = [] for i in range(n): row = {"id": i + 1, "age": random.randint(10, 99), "rating": round(random.uniform(0, 10), 2)} data.append(row) return data def get_list_of_objects(n): data = [] for i in range(n): row = Row(i + 1, random.randint(10, 99), round(random.uniform(0, 10), 2)) data.append(row) return data
|
from unittest.mock import patch import taipy.core.taipy as tp from taipy import Config from taipy.core import Core from taipy.core.config import JobConfig from taipy.core.job.status import Status from tests.utils import assert_true_after_time def mult_by_2(a): return a def build_skipped_jobs_config(): input_config = Config.configure_data_node(id="input") intermediate_config = Config.configure_data_node(id="intermediate") output_config = Config.configure_data_node(id="output") task_config_1 = Config.configure_task("first", mult_by_2, input_config, intermediate_config, skippable=True) task_config_2 = Config.configure_task("second", mult_by_2, intermediate_config, output_config, skippable=True) scenario_config = Config.configure_scenario("scenario", task_configs=[task_config_1, task_config_2]) return scenario_config class TestSkipJobs: @staticmethod def __test(): scenario_config = build_skipped_jobs_config() with patch("sys.argv", ["prog"]): Core().run() scenario = tp.create_scenario(scenario_config) scenario.input.write(2) scenario.submit() assert len(tp.get_jobs()) == 2 for job in tp.get_jobs(): assert_true_after_time(job.is_completed, msg=f"job {job.id} is not completed. Status: {job.status}") scenario.submit() assert len(tp.get_jobs()) == 4 skipped = [] for job in tp.get_jobs(): if job.status != Status.COMPLETED: assert_true_after_time(job.is_skipped, msg=f"job {job.id} is not skipped. Status: {job.status}") skipped.append(job) assert len(skipped) == 2 def test_development_fs_repo(self): self.__test() def test_development_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) self.__test() def test_standalone_fs_repo(self): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test() def test_standalone_sql_repo(self, tmp_sqlite): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) self.__test()
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import os from unittest.mock import patch import pandas as pd import taipy.core.taipy as tp from taipy import Config from taipy.core import Core from taipy.core.config import JobConfig from tests.test_complex.utils.algos import average from tests.test_complex.utils.config_builders import build_complex_config, build_complex_required_file_paths from tests.utils import assert_true_after_time class TestComplexApp: @staticmethod def __test(): _, _, csv_path_sum, excel_path_sum, excel_path_out, csv_path_out = build_complex_required_file_paths() scenario_config = build_complex_config() with patch("sys.argv", ["prog"]): Core().run(force_restart=True) scenario = tp.create_scenario(scenario_config) jobs = tp.submit(scenario) for job in jobs: assert_true_after_time(job.is_completed, msg=f"job {job.id} is not completed. Status: {job.status}.") csv_sum_res = pd.read_csv(csv_path_sum) excel_sum_res = pd.read_excel(excel_path_sum) csv_out = pd.read_csv(csv_path_out) excel_out = pd.read_excel(excel_path_out) assert csv_sum_res.to_numpy().flatten().tolist() == [i * 20 for i in range(1, 11)] assert excel_sum_res.to_numpy().flatten().tolist() == [i * 2 for i in range(1, 11)] assert average(csv_sum_res["number"] - excel_sum_res["number"]) == csv_out.to_numpy()[0] assert average((csv_sum_res["number"] - excel_sum_res["number"]) * 10) == excel_out.to_numpy()[0] for path in [csv_path_sum, excel_path_sum, csv_path_out, excel_path_out]: os.remove(path) def test_development_fs_repo(self): self.__test() def test_development_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) self.__test() def test_standalone_fs_repo(self): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test() def test_standalone_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test()
|
from unittest.mock import patch import taipy.core.taipy as tp from taipy import Config from taipy.core import Core from taipy.core.config import JobConfig from tests.test_complex.utils.config_builders import build_churn_classification_config from tests.utils import assert_true_after_time class TestChurnClassification: @staticmethod def __test(): scenario_cfg = build_churn_classification_config() with patch("sys.argv", ["prog"]): Core().run(force_restart=True) scenario = tp.create_scenario(scenario_cfg) jobs = tp.submit(scenario) for job in jobs: assert_true_after_time( job.is_completed, msg=f"job {job.id} is not completed. Status: {job.status}.", time=30 ) def test_development_fs_repo(self): self.__test() def test_development_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) self.__test() def test_standalone_fs_repo(self): Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test() def test_standalone_sql_repo(self, tmp_sqlite): Config.configure_global_app(repository_type="sql", repository_properties={"db_location": tmp_sqlite}) Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2) self.__test()
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from taipy import Config, Core, Gui if __name__ == "__main__": core = Core() core.run() gui = Gui() gui._config._handle_argparse() print(f"Config.core.version_number: {Config.core.version_number}") print(f"Config.core.mode: {Config.core.mode}") print(f"Config.core.force: {Config.core.force}") print(f"Config.gui_config.host: {gui._config.config.get('host', None)}") print(f"Config.gui_config.port: {gui._config.config.get('port', None)}") print(f"Config.gui_config.debug: {gui._config.config.get('debug', None)}") print(f"Config.gui_config.use_reloader: {gui._config.config.get('use_reloader', None)}") print(f"Config.gui_config.ngrok_token: {gui._config.config.get('ngrok_token', None)}") print(f"Config.gui_config.webapp_path: {gui._config.config.get('webapp_path', None)}")
|
import argparse from taipy import Config, Core, Gui if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--mode", dest="mode", type=str, default="training") parser.add_argument("--force", type=str, default="no") parser.add_argument("--host", dest="host", type=str, default="user_default_host.com") parser.add_argument("--port", type=int, default=8080) parser.add_argument("--non-conflict-arg", type=str, default="") core = Core() core.run() gui = Gui() gui._config._handle_argparse() user_args, _ = parser.parse_known_args() print(f"Config.core.mode: {Config.core.mode}") print(f"User provided mode: {user_args.mode}") print(f"Config.core.force: {Config.core.force}") print(f"User provided force: {user_args.force}") print(f"Config.gui_config.host: {gui._config.config.get('host', None)}") print(f"User provided host: {user_args.host}") print(f"Config.gui_config.port: {gui._config.config.get('port', None)}") print(f"User provided port: {user_args.port}") print(f"User provided non-conflict-arg: {user_args.non_conflict_arg}")
|
import taipy as tp from taipy.gui import Gui, notify from taipy.config import Config import dask_ml.datasets import dask_ml.cluster import pandas as pd n_clusters = 3 data = dask_ml.datasets.make_blobs( n_samples=1000000, chunks=1000000, random_state=0, centers=n_clusters ) X, _ = data km = dask_ml.cluster.KMeans(n_clusters=n_clusters) km.fit(X) visual_data = pd.DataFrame( {"x": X[::1000, 0], "y": X[::1000, 1], "color": km.labels_[::1000]} ) Config.load("config.toml") scenario_object = Config.scenarios["scenario"] def on_button(state): notify(state, "info", "Running K-Means...") scenario = tp.create_scenario(scenario_object) scenario.centers.write(state.n_clusters) scenario.n_clusters.write(state.n_clusters) tp.submit(scenario) state.X = scenario.dataset.read() state.km = scenario.km.read() state.visual_data = pd.DataFrame( { "x": state.X[::1000, 0], "y": state.X[::1000, 1], "color": state.km.labels_[::1000], } ) notify(state, "success", "Done!") page = """ # Scaling K-Means with **Dask**{: .color-secondary} and **Taipy**{: .color-primary} Number of clusters: <|{n_clusters}|slider|min=1|max=10|> <|Run K-Means|button|on_action=on_button|> <|{visual_data}|chart|mode=markers|x=x|y=y|color=color|rebuild|> """ tp.Core().run() Gui(page).run()
|
import dask import dask_ml.datasets def generate_data(centers: int): """ Generates synthetic data for clustering. Args: - centers (int): number of clusters to generate Returns: - X (dask.array): array of shape (n_samples, n_features) """ X, _ = dask_ml.datasets.make_blobs( n_samples=1000000, chunks=1000000, random_state=0, centers=centers ) return X.persist() def fit(X: dask.array, n_clusters: int): """ Fit a k-means clustering model. Args: - X (dask.array): array of shape (n_samples, n_features) - n_clusters (int): number of clusters to fit Returns: - km (dask_ml.cluster.KMeans): k-means clustering model """ km = dask_ml.cluster.KMeans(n_clusters=n_clusters) km.fit(X) return km
|
#!/usr/bin/env python """The setup script.""" import json import os from pathlib import Path from setuptools import find_namespace_packages, find_packages, setup from setuptools.command.build_py import build_py readme = Path("README.md").read_text() with open(f"src{os.sep}taipy{os.sep}gui{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" requirements = [ "flask>=3.0.0,<3.1", "flask-cors>=4.0.0,<5.0", "flask-socketio>=5.3.6,<6.0", "markdown>=3.4.4,<4.0", "pandas>=2.0.0,<3.0", "python-dotenv>=1.0.0,<1.1", "pytz>=2021.3,<2022.2", "tzlocal>=3.0,<5.0", "backports.zoneinfo>=0.2.1,<0.3;python_version<'3.9'", "gevent>=23.7.0,<24.0", "gevent-websocket>=0.10.1,<0.11", "kthread>=0.2.3,<0.3", "taipy-config@git+https://git@github.com/Avaiga/taipy-config.git@develop", "gitignore-parser>=0.1,<0.2", "simple-websocket>=0.10.1,<1.0", "twisted>=23.8.0,<24.0", ] test_requirements = ["pytest>=3.8"] extras_require = { "ngrok": ["pyngrok>=5.1,<6.0"], "image": [ "python-magic>=0.4.24,<0.5;platform_system!='Windows'", "python-magic-bin>=0.4.14,<0.5;platform_system=='Windows'", ], "arrow": ["pyarrow>=10.0.1,<11.0"], } def _build_webapp(): already_exists = Path("./src/taipy/gui/webapp/index.html").exists() if not already_exists: os.system("cd frontend/taipy-gui/dom && npm ci") os.system("cd frontend/taipy-gui && npm ci --omit=optional && npm run build") class NPMInstall(build_py): def run(self): _build_webapp() build_py.run(self) setup( author="Avaiga", author_email="dev@taipy.io", python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", ], description="Low-code library to create graphical user interfaces on the Web for your Python applications.", long_description=readme, long_description_content_type="text/markdown", install_requires=requirements, license="Apache License 2.0", include_package_data=True, keywords="taipy-gui", name="taipy-gui", package_dir={"": "src"}, packages=find_namespace_packages(where="src") + find_packages(include=["taipy", "taipy.gui", "taipy.gui.*"]), test_suite="tests", tests_require=test_requirements, url="https://github.com/avaiga/taipy-gui", version=version_string, zip_safe=False, extras_require=extras_require, cmdclass={"build_py": NPMInstall}, )
|
# ############################################################ # Generate Python interface definition files # ############################################################ import json import os import typing as t # ############################################################ # Generate gui pyi file (gui/gui.pyi) # ############################################################ gui_py_file = "./src/taipy/gui/gui.py" gui_pyi_file = gui_py_file + "i" os.system(f"pipenv run stubgen {gui_py_file} --no-import --parse-only --export-less -o ./") from src.taipy.gui.config import Config gui_config = "".join( f", {k}: {v.__name__} = ..." if "<class" in str(v) else f", {k}: {str(v).replace('typing', 't').replace('src.taipy.gui.config.', '')} = ..." for k, v in Config.__annotations__.items() ) replaced_content = "" with open(gui_pyi_file, "r") as file: for line in file: if "def run(" in line: line = line.replace( ", run_server: bool = ..., run_in_thread: bool = ..., async_mode: str = ..., **kwargs", gui_config ) replaced_content = replaced_content + line with open(gui_pyi_file, "w") as write_file: write_file.write(replaced_content) # ############################################################ # Generate Page Builder pyi file (gui/builder/__init__.pyi) # ############################################################ builder_py_file = "./src/taipy/gui/builder/__init__.py" builder_pyi_file = builder_py_file + "i" with open("./src/taipy/gui/viselements.json", "r") as file: viselements = json.load(file) with open("./tools/builder/block.txt", "r") as file: block_template = file.read() with open("./tools/builder/control.txt", "r") as file: control_template = file.read() os.system(f"pipenv run stubgen {builder_py_file} --no-import --parse-only --export-less -o ./") with open(builder_pyi_file, "a") as file: file.write("from ._element import _Element, _Block\n") def get_properties(element, viselements) -> t.List[t.Dict[str, t.Any]]: properties = element["properties"] if "inherits" not in element: return properties for inherit in element["inherits"]: inherit_element = next((e for e in viselements["undocumented"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements["blocks"] if e[0] == inherit), None) if inherit_element is None: inherit_element = next((e for e in viselements["controls"] if e[0] == inherit), None) if inherit_element is None: raise RuntimeError(f"Can't find element with name {inherit}") properties += get_properties(inherit_element[1], viselements) return properties def build_doc(element: t.Dict[str, t.Any]): if "doc" not in element: return "" doc = str(element["doc"]).replace("\n", f'\n{16*" "}') return f"{element['name']} ({element['type']}): {doc} {'(default: '+element['default_value'] + ')' if 'default_value' in element else ''}" for control_element in viselements["controls"]: name = control_element[0] property_list = [] property_names = [] for property in get_properties(control_element[1], viselements): if property["name"] not in property_names and "[" not in property["name"]: property_list.append(property) property_names.append(property["name"]) properties = ", ".join([f"{p} = ..." for p in property_names]) doc_arguments = f"\n{12*' '}".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, "a") as file: file.write( control_template.replace("{{name}}", name) .replace("{{properties}}", properties) .replace("{{doc_arguments}}", doc_arguments) ) for block_element in viselements["blocks"]: name = block_element[0] property_list = [] property_names = [] for property in get_properties(block_element[1], viselements): if property["name"] not in property_names and "[" not in property["name"]: property_list.append(property) property_names.append(property["name"]) properties = ", ".join([f"{p} = ..." for p in property_names]) doc_arguments = f"{8*' '}".join([build_doc(p) for p in property_list]) # append properties to __init__.pyi with open(builder_pyi_file, "a") as file: file.write( block_template.replace("{{name}}", name) .replace("{{properties}}", properties) .replace("{{doc_arguments}}", doc_arguments) ) os.system(f"pipenv run isort {gui_pyi_file}") os.system(f"pipenv run black {gui_pyi_file}") os.system(f"pipenv run isort {builder_pyi_file}") os.system(f"pipenv run black {builder_pyi_file}")
|
import pytest def pytest_addoption(parser): parser.addoption("--e2e-base-url", action="store", default="/", help="base url for e2e testing") parser.addoption("--e2e-port", action="store", default="5000", help="port for e2e testing") @pytest.fixture(scope="session") def e2e_base_url(request): return request.config.getoption("--e2e-base-url") @pytest.fixture(scope="session") def e2e_port(request): return request.config.getoption("--e2e-port")
|
"""Unit test package for taipy."""
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os import sys from importlib.util import find_spec from pathlib import Path import pandas as pd # type: ignore import pytest from flask import Flask, g def pytest_configure(config): if (find_spec("src") and find_spec("src.taipy")) and (not find_spec("taipy") or not find_spec("taipy.gui")): import src.taipy.gui import src.taipy.gui._renderers.builder import src.taipy.gui._warnings import src.taipy.gui.builder import src.taipy.gui.data.decimator.lttb import src.taipy.gui.data.decimator.minmax import src.taipy.gui.data.decimator.rdp import src.taipy.gui.data.decimator.scatter_decimator import src.taipy.gui.data.utils import src.taipy.gui.extension import src.taipy.gui.utils._map_dict import src.taipy.gui.utils._variable_directory import src.taipy.gui.utils.expr_var_name sys.modules["taipy.gui._warnings"] = sys.modules["src.taipy.gui._warnings"] sys.modules["taipy.gui._renderers.builder"] = sys.modules["src.taipy.gui._renderers.builder"] sys.modules["taipy.gui.utils._variable_directory"] = sys.modules["src.taipy.gui.utils._variable_directory"] sys.modules["taipy.gui.utils.expr_var_name"] = sys.modules["src.taipy.gui.utils.expr_var_name"] sys.modules["taipy.gui.utils._map_dict"] = sys.modules["src.taipy.gui.utils._map_dict"] sys.modules["taipy.gui.extension"] = sys.modules["src.taipy.gui.extension"] sys.modules["taipy.gui.data.utils"] = sys.modules["src.taipy.gui.data.utils"] sys.modules["taipy.gui.data.decimator.lttb"] = sys.modules["src.taipy.gui.data.decimator.lttb"] sys.modules["taipy.gui.data.decimator.rdp"] = sys.modules["src.taipy.gui.data.decimator.rdp"] sys.modules["taipy.gui.data.decimator.minmax"] = sys.modules["src.taipy.gui.data.decimator.minmax"] sys.modules["taipy.gui.data.decimator.scatter_decimator"] = sys.modules[ "src.taipy.gui.data.decimator.scatter_decimator" ] sys.modules["taipy.gui"] = sys.modules["src.taipy.gui"] sys.modules["taipy.gui.builder"] = sys.modules["src.taipy.gui.builder"] csv = pd.read_csv( f"{Path(Path(__file__).parent.resolve())}{os.path.sep}current-covid-patients-hospital.csv", parse_dates=["Day"] ) small_dataframe_data = {"name": ["A", "B", "C"], "value": [1, 2, 3]} @pytest.fixture(scope="function") def csvdata(): yield csv @pytest.fixture(scope="function") def small_dataframe(): yield small_dataframe_data @pytest.fixture(scope="function") def gui(helpers): from taipy.gui import Gui gui = Gui() yield gui # Delete Gui instance and state of some classes after each test gui.stop() helpers.test_cleanup() @pytest.fixture def helpers(): from .helpers import Helpers return Helpers @pytest.fixture def test_client(): flask_app = Flask("Test App") # Create a test client using the Flask application configured for testing with flask_app.test_client() as testing_client: # Establish an application context with flask_app.app_context(): g.client_id = "test client id" yield testing_client # this is where the testing happens!
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import inspect from taipy.gui import Gui, Html def test_simple_html(gui: Gui, helpers): # html_string = "<html><head></head><body><h1>test</h1><taipy:field value=\"test\"/></body></html>" html_string = "<html><head></head><body><h1>test</h1></body></html>" gui._set_frame(inspect.currentframe()) gui.add_page("test", Html(html_string)) gui.run(run_server=False) client = gui._server.test_client() jsx = client.get("/taipy-jsx/test").json["jsx"] assert jsx == "<h1>test</h1>"
|
import pytest from taipy.gui import Gui def test_invalid_control_name(gui: Gui, helpers): md_string = "<|invalid|invalid|>" expected_list = ["INVALID SYNTAX - Control is 'invalid'"] helpers.test_control_md(gui, md_string, expected_list) def test_value_to_negated_property(gui: Gui, helpers): md_string = "<|button|not active=true|>" expected_list = ["<Button", "active={false}"] helpers.test_control_md(gui, md_string, expected_list) def test_invalid_property_value(gui: Gui, helpers): md_string = "<|button|let's try that!|>" expected_list = ["<Button", 'label="<Empty>"'] helpers.test_control_md(gui, md_string, expected_list) def test_unclosed_block(gui: Gui, helpers): md_string = "<|" expected_list = ["<Part", "</Part>"] helpers.test_control_md(gui, md_string, expected_list) def test_opening_unknown_block(gui: Gui, helpers): md_string = "<|unknown" expected_list = ["<Part", 'className="unknown"'] helpers.test_control_md(gui, md_string, expected_list) def test_closing_unknown_block(gui: Gui, helpers): md_string = "|>" expected_list = ["<div>", "No matching opened tag", "</div>"] helpers.test_control_md(gui, md_string, expected_list) def test_md_link(gui: Gui, helpers): md_string = "[content](link)" expected_list = ["<a", 'href="link"', "content</a>"] helpers.test_control_md(gui, md_string, expected_list)
|
import pytest from taipy.gui.utils._bindings import _Bindings def test_exception_binding_twice(gui, test_client): bind = _Bindings(gui) bind._new_scopes() bind._bind("x", 10) with pytest.raises(ValueError): bind._bind("x", 10) def test_exception_binding_invalid_name(gui): bind = _Bindings(gui) bind._new_scopes() with pytest.raises(ValueError): bind._bind("invalid identifier", 10)
|
from email import message import pytest from taipy.gui._page import _Page def test_exception_page(gui): page = _Page() page._route = "page1" with pytest.raises(RuntimeError, match="Can't render page page1: no renderer found"): page.render(gui)
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os import tempfile from unittest.mock import patch import pytest from taipy.config import Config from taipy.config._config import _Config from taipy.config._serializer._toml_serializer import _TomlSerializer from taipy.config.checker._checker import _Checker from taipy.config.checker.issue_collector import IssueCollector from taipy.gui import Gui class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile("w", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, "r") as fp: return fp.read() def __del__(self): os.unlink(self.filename) def init_config(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = None Config._env_file_config = None Config._applied_config = _Config._default_config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() _Checker._checkers = [] from taipy.config import _inject_section from taipy.gui._default_config import default_config from taipy.gui._gui_section import _GuiSection _inject_section( _GuiSection, "gui_config", _GuiSection(property_list=list(default_config)), [("configure_gui", _GuiSection._configure)], add_to_unconflicted_sections=True, ) @pytest.fixture(scope="function", autouse=True) def cleanup_test(helpers): init_config() helpers.test_cleanup() yield init_config() helpers.test_cleanup() def test_gui_service_arguments_hierarchy(): # Test default configuration gui = Gui() gui.run(run_server=False) service_config = gui._config.config assert not service_config["allow_unsafe_werkzeug"] assert service_config["async_mode"] == "gevent" assert service_config["change_delay"] is None assert service_config["chart_dark_template"] is None assert service_config["dark_mode"] assert service_config["dark_theme"] is None assert not service_config["debug"] assert not service_config["extended_status"] assert service_config["favicon"] is None assert not service_config["flask_log"] assert service_config["host"] == "127.0.0.1" assert service_config["light_theme"] is None assert service_config["margin"] is None assert service_config["ngrok_token"] == "" assert service_config["notification_duration"] == 3000 assert service_config["propagate"] assert service_config["run_browser"] assert not service_config["run_in_thread"] assert not service_config["run_server"] assert not service_config["single_client"] assert not service_config["system_notification"] assert service_config["theme"] is None assert service_config["time_zone"] is None assert service_config["title"] is None assert service_config["upload_folder"] is None assert not service_config["use_arrow"] assert not service_config["use_reloader"] assert service_config["watermark"] == "Taipy inside" assert service_config["webapp_path"] is None assert service_config["port"] == 5000 gui.stop() # Override default configuration by explicit defined arguments in Gui.run() gui = Gui() gui.run(run_server=False, watermark="", host="my_host", port=5001) service_config = gui._config.config assert service_config["watermark"] == "" assert service_config["host"] == "my_host" assert service_config["port"] == 5001 gui.stop() # Override Gui.run() arguments by explicit defined arguments in Config.configure_gui() Config.configure_gui(dark_mode=False, host="my_2nd_host", port=5002) gui = Gui() gui.run(run_server=False, watermark="", host="my_host", port=5001) service_config = gui._config.config assert not service_config["dark_mode"] assert service_config["host"] == "my_2nd_host" assert service_config["watermark"] == "" assert service_config["port"] == 5002 gui.stop() # Override Config.configure_gui() arguments by loading a TOML file with [gui] section toml_config = NamedTemporaryFile( content=""" [TAIPY] [gui] host = "my_3rd_host" port = 5003 use_reloader = "true:bool" """ ) Config.load(toml_config.filename) gui = Gui() gui.run(run_server=False, host="my_host", port=5001) service_config = gui._config.config assert service_config["host"] == "my_3rd_host" assert service_config["port"] == 5003 assert service_config["use_reloader"] gui.stop() # Override TOML configuration file with CLI arguments with patch("sys.argv", ["prog", "--host", "my_4th_host", "--port", "5004", "--no-reloader", "--debug"]): gui = Gui() gui.run(run_server=False, host="my_host", port=5001) service_config = gui._config.config assert service_config["host"] == "my_4th_host" assert service_config["port"] == 5004 assert not service_config["use_reloader"] assert service_config["debug"] gui.stop() def test_clean_config(): gui_config = Config.configure_gui(dark_mode=False) assert Config.gui_config is gui_config gui_config._clean() # Check if the instance before and after _clean() is the same assert Config.gui_config is gui_config assert gui_config.dark_mode is None assert gui_config.properties == {}
|
import inspect import warnings import pytest from taipy.gui import Gui def test_no_ignore_file(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get("/resource.txt") assert ( response.status_code == 200 ), f"file resource.txt request status should be 200 but is {response.status_code}"
|
import inspect import warnings import pytest from taipy.gui import Gui def test_ignore_file_found(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get("/resource.txt") assert ( response.status_code == 404 ), f"file resource.txt request status should be 404 but is {response.status_code}" def test_ignore_file_not_found(gui: Gui): with warnings.catch_warnings(record=True): gui._set_frame(inspect.currentframe()) gui.run(run_server=False) client = gui._server.test_client() response = client.get("/resource2.txt") assert ( response.status_code == 200 ), f"file resource2.txt request status should be 200 but is {response.status_code}"
|
import inspect import time from urllib.request import urlopen from taipy.gui import Gui # this hangs in github def test_run_thread(gui: Gui, helpers): gui._set_frame(inspect.currentframe()) gui.add_page("page1", "# first page") gui.run(run_in_thread=True, run_browser=False) while not helpers.port_check(): time.sleep(0.1) assert ">first page</h1>" in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8") gui.stop() while helpers.port_check(): time.sleep(0.1) gui.run(run_in_thread=True, run_browser=False) while not helpers.port_check(): time.sleep(0.1) assert ">first page</h1>" in urlopen("http://127.0.0.1:5000/taipy-jsx/page1").read().decode("utf-8")
|
import pytest from taipy.gui import Gui def test_add_shared_variables(gui: Gui): Gui.add_shared_variable("var1", "var2") assert isinstance(gui._Gui__shared_variables, list) assert len(gui._Gui__shared_variables) == 2 Gui.add_shared_variables("var1", "var2") assert len(gui._Gui__shared_variables) == 2
|
import json from taipy.gui.gui import Gui def test_multiple_instance(): gui1 = Gui("<|gui1|>") gui2 = Gui("<|gui2|>") gui1.run(run_server=False) gui2.run(run_server=False) client1 = gui1._server.test_client() client2 = gui2._server.test_client() assert_multiple_instance(client1, 'value="gui1"') assert_multiple_instance(client2, 'value="gui2"') def assert_multiple_instance(client, expected_value): response = client.get("/taipy-jsx/TaiPy_root_page") response_data = json.loads(response.get_data().decode("utf-8", "ignore")) assert response.status_code == 200 assert isinstance(response_data, dict) assert "jsx" in response_data assert expected_value in response_data["jsx"]
|
from taipy.gui.utils._variable_directory import _MODULE_NAME_MAP, _variable_decode, _variable_encode def test_variable_encode_decode(): assert _variable_encode("x", "module") == "x_TPMDL_0" assert _MODULE_NAME_MAP[0] == "module" assert _variable_decode("x_TPMDL_0") == ("x", "module") assert _variable_encode("x", None) == "x" assert _variable_decode("x") == ("x", None) assert _variable_encode("TpExPr_x", "module1") == "TpExPr_x_TPMDL_1" assert _MODULE_NAME_MAP[1] == "module1" assert _variable_decode("TpExPr_x_TPMDL_1") == ("x", "module1")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.