Spaces:
Build error
Build error
| # Step 6.1: Define different input components | |
| import gradio as gr | |
| # a. define text data type | |
| input_module1 = gr.inputs.Slider(1, 100, step=5, label = "Longitude") | |
| # b. define image data type | |
| input_module2 = gr.inputs.Slider(1, 100, step=5, label = "Latitude") | |
| # c. define Number data type | |
| input_module3 = gr.inputs.Slider(1, 100, step=5, label = "Feature3") | |
| # d. define Slider data type | |
| input_module4 = gr.inputs.Slider(1, 100, step=5, label = "Feature4") | |
| # e. define Checkbox data type | |
| input_module5 = gr.inputs.Slider(1, 100, step=5, label = "Feature5") | |
| # f. define Radio data type | |
| input_module6 = gr.inputs.Slider(1, 100, step=5, label = "Feature6") | |
| # g. define Dropdown data type | |
| input_module7 = gr.inputs.Slider(1, 100, step=5, label = "Feature7") | |
| # g. define Dropdown data type | |
| input_module8 = gr.inputs.Slider(1, 100, step=5, label = "Feature8") | |
| # Step 6.2: Define different output components | |
| # a. define text data type | |
| output_module1 = gr.outputs.Textbox(label = "Output Text") | |
| # b. define image data type | |
| output_module2 = gr.outputs.Image(label = "Output Image") | |
| # you can define more output components | |
| def machine_learning_pipeline(input1, input2, input3, input4, input5, input6, input7, input8): | |
| print("Start ml processing") | |
| import numpy as np | |
| import pandas as pd | |
| print(input1, input2, input3, input4, input5, input6, input7, input8) | |
| ### 1. process the user submission, collect the features and save them into one numpy array | |
| new_feature = np.array([[input1, input2, input3, input4, input5, input6, input7, input8]]) | |
| print(new_feature) | |
| ### 2. follow the data preprocessing steps as we have done in trainig data | |
| ### 2.1 check missing values in total_bedroom | |
| ### 2.2 feature normalization | |
| test_set = pd.DataFrame(new_feature, columns = ['longitude', 'latitude', 'housing_median_age', 'total_rooms', | |
| 'total_bedrooms', 'population', 'households', 'median_income']) | |
| ## 1. clean the missing values in test set | |
| test_set_clean = test_set.dropna(subset=["total_bedrooms"]) | |
| test_set_clean | |
| ### reload the scaler from the local file | |
| import pickle | |
| with open('minmax_scaler.pkl', 'rb') as f: | |
| scaler = pickle.load(f) | |
| ## 4. scale the numeric features in test set. | |
| ## important note: do not apply fit function on the test set, using same scalar from training set | |
| test_features_normalized = scaler.transform(test_set_clean) | |
| print("test_features_normalized: ",test_features_normalized) | |
| ### 3. load the pre-train machine learning | |
| # load | |
| with open('tree_reg.pkl', 'rb') as f: | |
| tree_reg = pickle.load(f) | |
| ### 4. apply the loaded model on the features to make a prediction | |
| ### Step 5: make a prediction using tree model | |
| test_predictions_trees = tree_reg.predict(test_features_normalized) | |
| print("test_predictions_trees: ",test_predictions_trees) | |
| ### 5. send back the prediction | |
| print("Start processing") | |
| import numpy as np | |
| #output1 = 'This is the output' | |
| output2 = np.random.rand(28,28) | |
| import matplotlib.pyplot as plt | |
| plt.plot([input1], [input2]) | |
| plt.savefig('test.png') | |
| return test_predictions_trees, 'test.png' | |
| gr.Interface(fn=machine_learning_pipeline, | |
| inputs = [input_module1, input_module2, input_module3, | |
| input_module4, input_module5, input_module6, | |
| input_module7, input_module8], | |
| outputs = [output_module1, output_module2]).launch(debug=True) | |