Upload 10 files
Browse files- Untitled-1.ipynb +101 -0
- Untitled-2.ipynb +75 -0
- Untitled-3.ipynb +73 -0
- app (2).py +174 -0
- best_model.pkl +3 -0
- database (1).py +58 -0
- inspection_model.pkl +3 -0
- machining_model.pkl +3 -0
- model.pkl +3 -0
- requirements.txt +4 -0
Untitled-1.ipynb
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [
|
| 8 |
+
{
|
| 9 |
+
"name": "stdout",
|
| 10 |
+
"output_type": "stream",
|
| 11 |
+
"text": [
|
| 12 |
+
"Test MSE: 1.9502589114484195\n",
|
| 13 |
+
"0.9252566690286127\n"
|
| 14 |
+
]
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
"source": [
|
| 18 |
+
"import pandas as pd\n",
|
| 19 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 20 |
+
"from sklearn.ensemble import HistGradientBoostingRegressor\n",
|
| 21 |
+
"from sklearn.metrics import mean_squared_error,r2_score\n",
|
| 22 |
+
"import pickle\n",
|
| 23 |
+
"\n",
|
| 24 |
+
"# Load the dataset from CSV\n",
|
| 25 |
+
"selected_columns = ['Process type', 'Part Od', 'Part ID', 'Part Width', 'Finish Wt', 'Input Weight']\n",
|
| 26 |
+
"data = pd.read_csv('/Users/abhijay/Desktop/EY_INTERNSHIP/Data_4.csv', usecols=selected_columns)\n",
|
| 27 |
+
"\n",
|
| 28 |
+
"# Assuming 'Input Weight' is the target variable, and other columns are features\n",
|
| 29 |
+
"X = data.drop(columns=['Input Weight'])\n",
|
| 30 |
+
"y = data['Input Weight']\n",
|
| 31 |
+
"\n",
|
| 32 |
+
"# Split the data into training and testing sets (80% train, 20% test)\n",
|
| 33 |
+
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
|
| 34 |
+
"\n",
|
| 35 |
+
"from sklearn.model_selection import RandomizedSearchCV\n",
|
| 36 |
+
"from scipy.stats import uniform, randint\n",
|
| 37 |
+
"\n",
|
| 38 |
+
"# Define the parameter grid\n",
|
| 39 |
+
"param_dist = {\n",
|
| 40 |
+
" 'learning_rate': uniform(0.01, 0.2),\n",
|
| 41 |
+
" 'max_iter': randint(100, 1000),\n",
|
| 42 |
+
" 'max_leaf_nodes': randint(10, 50),\n",
|
| 43 |
+
" 'max_depth': randint(3, 10),\n",
|
| 44 |
+
" 'min_samples_leaf': randint(1, 20),\n",
|
| 45 |
+
" 'max_bins': randint(50, 256),\n",
|
| 46 |
+
" 'l2_regularization': uniform(0, 1),\n",
|
| 47 |
+
" 'early_stopping': [True, False]\n",
|
| 48 |
+
"}\n",
|
| 49 |
+
"\n",
|
| 50 |
+
"# Instantiate the model\n",
|
| 51 |
+
"model = HistGradientBoostingRegressor()\n",
|
| 52 |
+
"\n",
|
| 53 |
+
"# Instantiate the RandomizedSearchCV object\n",
|
| 54 |
+
"random_search = RandomizedSearchCV(model, param_distributions=param_dist, n_iter=100, cv=5, random_state=42, n_jobs=-1)\n",
|
| 55 |
+
"\n",
|
| 56 |
+
"# Fit the random search model\n",
|
| 57 |
+
"random_search.fit(X_train, y_train)\n",
|
| 58 |
+
"\n",
|
| 59 |
+
"# Get the best estimator\n",
|
| 60 |
+
"best_model = random_search.best_estimator_\n",
|
| 61 |
+
"\n",
|
| 62 |
+
"# Use the best model to predict on the test set\n",
|
| 63 |
+
"y_pred = best_model.predict(X_test)\n",
|
| 64 |
+
"\n",
|
| 65 |
+
"\n",
|
| 66 |
+
"\n",
|
| 67 |
+
"\n",
|
| 68 |
+
"from sklearn.metrics import mean_squared_error\n",
|
| 69 |
+
"mse_test = mean_squared_error(y_test, y_pred)\n",
|
| 70 |
+
"r_square=r2_score(y_test, y_pred)\n",
|
| 71 |
+
"print(\"Test MSE: \", mse_test)\n",
|
| 72 |
+
"print(r_square)\n",
|
| 73 |
+
"\n",
|
| 74 |
+
"\n",
|
| 75 |
+
"with open('best_model.pkl', 'wb') as model_file:\n",
|
| 76 |
+
" pickle.dump(best_model, model_file)\n"
|
| 77 |
+
]
|
| 78 |
+
}
|
| 79 |
+
],
|
| 80 |
+
"metadata": {
|
| 81 |
+
"kernelspec": {
|
| 82 |
+
"display_name": "base",
|
| 83 |
+
"language": "python",
|
| 84 |
+
"name": "python3"
|
| 85 |
+
},
|
| 86 |
+
"language_info": {
|
| 87 |
+
"codemirror_mode": {
|
| 88 |
+
"name": "ipython",
|
| 89 |
+
"version": 3
|
| 90 |
+
},
|
| 91 |
+
"file_extension": ".py",
|
| 92 |
+
"mimetype": "text/x-python",
|
| 93 |
+
"name": "python",
|
| 94 |
+
"nbconvert_exporter": "python",
|
| 95 |
+
"pygments_lexer": "ipython3",
|
| 96 |
+
"version": "3.11.3"
|
| 97 |
+
}
|
| 98 |
+
},
|
| 99 |
+
"nbformat": 4,
|
| 100 |
+
"nbformat_minor": 2
|
| 101 |
+
}
|
Untitled-2.ipynb
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [
|
| 8 |
+
{
|
| 9 |
+
"name": "stdout",
|
| 10 |
+
"output_type": "stream",
|
| 11 |
+
"text": [
|
| 12 |
+
"Machining Time Test MSE: 1.0179070252739131e-11\n"
|
| 13 |
+
]
|
| 14 |
+
}
|
| 15 |
+
],
|
| 16 |
+
"source": [
|
| 17 |
+
"import pandas as pd\n",
|
| 18 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 19 |
+
"from sklearn.ensemble import HistGradientBoostingRegressor\n",
|
| 20 |
+
"from sklearn.metrics import mean_squared_error\n",
|
| 21 |
+
"import pickle\n",
|
| 22 |
+
"\n",
|
| 23 |
+
"# Load the dataset from CSV\n",
|
| 24 |
+
"selected_columns = ['Process type', 'Part Od', 'Part ID', 'Part Width', 'Finish Wt', 'Input Weight',\n",
|
| 25 |
+
" 'Raw material cost', 'Process cost', 'Machining Time'] # Add more columns as needed\n",
|
| 26 |
+
"data = pd.read_csv('/Users/abhijay/Desktop/EY_INTERNSHIP/Data_4.csv', usecols=selected_columns) # Replace 'your_dataset.csv' with the path to your CSV file\n",
|
| 27 |
+
"\n",
|
| 28 |
+
"# For predicting machining time, assume 'Machining Time' is the target variable\n",
|
| 29 |
+
"X_machining = data.drop(columns=['Machining Time'])\n",
|
| 30 |
+
"y_machining = data['Machining Time']\n",
|
| 31 |
+
"\n",
|
| 32 |
+
"# Split the data for machining time into training and testing sets (80% train, 20% test)\n",
|
| 33 |
+
"X_machining_train, X_machining_test, y_machining_train, y_machining_test = train_test_split(X_machining, y_machining, test_size=0.2, random_state=42)\n",
|
| 34 |
+
"\n",
|
| 35 |
+
"# Train the model for machining time\n",
|
| 36 |
+
"model_machining = HistGradientBoostingRegressor()\n",
|
| 37 |
+
"model_machining.fit(X_machining_train, y_machining_train)\n",
|
| 38 |
+
"\n",
|
| 39 |
+
"# Evaluate the model\n",
|
| 40 |
+
"\n",
|
| 41 |
+
"test_predictions_machining = model_machining.predict(X_machining_test)\n",
|
| 42 |
+
"\n",
|
| 43 |
+
"test_mse_machining = mean_squared_error(y_machining_test, test_predictions_machining)\n",
|
| 44 |
+
"\n",
|
| 45 |
+
"\n",
|
| 46 |
+
"print(\"Machining Time Test MSE:\", test_mse_machining)\n",
|
| 47 |
+
"\n",
|
| 48 |
+
"# Save the model to disk\n",
|
| 49 |
+
"with open('machining_model.pkl', 'wb') as model_file:\n",
|
| 50 |
+
" pickle.dump(model_machining, model_file)\n"
|
| 51 |
+
]
|
| 52 |
+
}
|
| 53 |
+
],
|
| 54 |
+
"metadata": {
|
| 55 |
+
"kernelspec": {
|
| 56 |
+
"display_name": "base",
|
| 57 |
+
"language": "python",
|
| 58 |
+
"name": "python3"
|
| 59 |
+
},
|
| 60 |
+
"language_info": {
|
| 61 |
+
"codemirror_mode": {
|
| 62 |
+
"name": "ipython",
|
| 63 |
+
"version": 3
|
| 64 |
+
},
|
| 65 |
+
"file_extension": ".py",
|
| 66 |
+
"mimetype": "text/x-python",
|
| 67 |
+
"name": "python",
|
| 68 |
+
"nbconvert_exporter": "python",
|
| 69 |
+
"pygments_lexer": "ipython3",
|
| 70 |
+
"version": "3.11.3"
|
| 71 |
+
}
|
| 72 |
+
},
|
| 73 |
+
"nbformat": 4,
|
| 74 |
+
"nbformat_minor": 2
|
| 75 |
+
}
|
Untitled-3.ipynb
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 3,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [
|
| 8 |
+
{
|
| 9 |
+
"name": "stdout",
|
| 10 |
+
"output_type": "stream",
|
| 11 |
+
"text": [
|
| 12 |
+
"Inspection Time Test MSE: 4.0716280667897373e-13\n"
|
| 13 |
+
]
|
| 14 |
+
}
|
| 15 |
+
],
|
| 16 |
+
"source": [
|
| 17 |
+
"import pandas as pd\n",
|
| 18 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 19 |
+
"from sklearn.ensemble import HistGradientBoostingRegressor\n",
|
| 20 |
+
"from sklearn.metrics import mean_squared_error\n",
|
| 21 |
+
"import pickle\n",
|
| 22 |
+
"# Load the dataset from CSV\n",
|
| 23 |
+
"selected_columns = ['Process type', 'Part Od', 'Part ID', 'Part Width', 'Finish Wt', 'Input Weight',\n",
|
| 24 |
+
" 'Raw material cost', 'Process cost', 'Machining Time', 'Machining cost ', 'Inspection Time'] # Add more columns as needed\n",
|
| 25 |
+
"data = pd.read_csv('/Users/abhijay/Desktop/EY_INTERNSHIP/Data_4.csv', usecols=selected_columns) # Replace 'your_dataset.csv' with the path to your CSV file\n",
|
| 26 |
+
"\n",
|
| 27 |
+
"# For predicting inspection time, assume 'Inspection Time' is the target variable\n",
|
| 28 |
+
"X_inspection = data.drop(columns=['Inspection Time'])\n",
|
| 29 |
+
"y_inspection = data['Inspection Time']\n",
|
| 30 |
+
"\n",
|
| 31 |
+
"# Split the data for inspection time into training and testing sets (80% train, 20% test)\n",
|
| 32 |
+
"X_inspection_train, X_inspection_test, y_inspection_train, y_inspection_test = train_test_split(X_inspection, y_inspection, test_size=0.2, random_state=42)\n",
|
| 33 |
+
"\n",
|
| 34 |
+
"# Train the model for inspection time\n",
|
| 35 |
+
"model_inspection = HistGradientBoostingRegressor()\n",
|
| 36 |
+
"model_inspection.fit(X_inspection_train, y_inspection_train)\n",
|
| 37 |
+
"\n",
|
| 38 |
+
"# Evaluate the model\n",
|
| 39 |
+
"\n",
|
| 40 |
+
"test_predictions_inspection = model_inspection.predict(X_inspection_test)\n",
|
| 41 |
+
"\n",
|
| 42 |
+
"test_mse_inspection = mean_squared_error(y_inspection_test, test_predictions_inspection)\n",
|
| 43 |
+
"\n",
|
| 44 |
+
"print(\"Inspection Time Test MSE:\", test_mse_inspection)\n",
|
| 45 |
+
"\n",
|
| 46 |
+
"# Save the model to disk\n",
|
| 47 |
+
"with open('inspection_model.pkl', 'wb') as model_file:\n",
|
| 48 |
+
" pickle.dump(model_inspection, model_file)\n"
|
| 49 |
+
]
|
| 50 |
+
}
|
| 51 |
+
],
|
| 52 |
+
"metadata": {
|
| 53 |
+
"kernelspec": {
|
| 54 |
+
"display_name": "base",
|
| 55 |
+
"language": "python",
|
| 56 |
+
"name": "python3"
|
| 57 |
+
},
|
| 58 |
+
"language_info": {
|
| 59 |
+
"codemirror_mode": {
|
| 60 |
+
"name": "ipython",
|
| 61 |
+
"version": 3
|
| 62 |
+
},
|
| 63 |
+
"file_extension": ".py",
|
| 64 |
+
"mimetype": "text/x-python",
|
| 65 |
+
"name": "python",
|
| 66 |
+
"nbconvert_exporter": "python",
|
| 67 |
+
"pygments_lexer": "ipython3",
|
| 68 |
+
"version": "3.11.3"
|
| 69 |
+
}
|
| 70 |
+
},
|
| 71 |
+
"nbformat": 4,
|
| 72 |
+
"nbformat_minor": 2
|
| 73 |
+
}
|
app (2).py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import pickle
|
| 4 |
+
import os
|
| 5 |
+
from database import create_connection, initialize_database, insert_data, fetch_data
|
| 6 |
+
|
| 7 |
+
# Database setup
|
| 8 |
+
conn = create_connection('example.db')
|
| 9 |
+
initialize_database(conn)
|
| 10 |
+
|
| 11 |
+
# Load the trained models
|
| 12 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 13 |
+
|
| 14 |
+
with open(os.path.join(current_dir, 'best_model.pkl'), 'rb') as model_file:
|
| 15 |
+
input_weight_model = pickle.load(model_file)
|
| 16 |
+
with open(os.path.join(current_dir, 'machining_model.pkl'), 'rb') as model_file:
|
| 17 |
+
machining_model = pickle.load(model_file)
|
| 18 |
+
with open(os.path.join(current_dir, 'inspection_model.pkl'), 'rb') as model_file:
|
| 19 |
+
inspection_model = pickle.load(model_file)
|
| 20 |
+
|
| 21 |
+
# Final landed cost based on grade type
|
| 22 |
+
final_landed_cost = {
|
| 23 |
+
'1 MT XX (25-95 dia)': 103,
|
| 24 |
+
'1 MT XX (100-210 dia)': 113,
|
| 25 |
+
'1 MT YY (25-95 dia)': 160,
|
| 26 |
+
'1 MT YY (100-125 dia)': 173,
|
| 27 |
+
'1 MT XY (25-95 dia))': 106,
|
| 28 |
+
'1 MT 8319 (100-210 dia)':116,
|
| 29 |
+
'1 MT 8319':104
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
# Function to calculate raw material cost
|
| 33 |
+
def calculate_raw_material_cost(process_type, input_weight, grade_type):
|
| 34 |
+
if process_type == 0: # 0 represents casting
|
| 35 |
+
return 0
|
| 36 |
+
elif process_type == 1: # 1 represents forging
|
| 37 |
+
return input_weight * final_landed_cost[grade_type]
|
| 38 |
+
|
| 39 |
+
# Function to calculate process cost
|
| 40 |
+
def calculate_process_cost(process_type, input_weight):
|
| 41 |
+
if process_type == 0: # 0 represents casting
|
| 42 |
+
return (input_weight * (120.57788 / 1000) * 1000)
|
| 43 |
+
elif process_type == 1: # 1 represents forging
|
| 44 |
+
return input_weight * 30
|
| 45 |
+
|
| 46 |
+
# Streamlit interface
|
| 47 |
+
st.title("EX-Works Calculator")
|
| 48 |
+
|
| 49 |
+
# Page navigation
|
| 50 |
+
pages = ["Home", "Vendor Data", "Material Data", "RM Cost Data", "Supplier Data"]
|
| 51 |
+
page = st.sidebar.selectbox("Select Page", pages)
|
| 52 |
+
|
| 53 |
+
if page == "Home":
|
| 54 |
+
st.write("Welcome to the EX-Works Calculator application.")
|
| 55 |
+
|
| 56 |
+
elif page == "Vendor Data":
|
| 57 |
+
st.header("Vendor Data")
|
| 58 |
+
vendor_name = st.text_input("Vendor Name")
|
| 59 |
+
vendor_type = st.text_input("Vendor Type")
|
| 60 |
+
gst_no =st.number_input("GST NO")
|
| 61 |
+
contact_person_name=st.text_input("CONTACT PERSON/NAME")
|
| 62 |
+
address=st.text_input("ADDRESS")
|
| 63 |
+
city=st.text_input("CITY")
|
| 64 |
+
panno=st.text_input("PAN NO")
|
| 65 |
+
|
| 66 |
+
if st.button("Add Vendor"):
|
| 67 |
+
vendor_data = pd.DataFrame({'vendor_name': [vendor_name], 'vendor_type': [vendor_type], 'GST_NO': [gst_no], 'Contact_person_name': [contact_person_name], 'address': [address], 'city': [city], 'pan_no':[panno]})
|
| 68 |
+
insert_data(conn, 'vendor_data', vendor_data)
|
| 69 |
+
st.success("Vendor data added successfully")
|
| 70 |
+
|
| 71 |
+
elif page == "Material Data":
|
| 72 |
+
st.header("Material Data")
|
| 73 |
+
part_id = st.number_input("Part ID")
|
| 74 |
+
part_no = st.number_input("Part Number")
|
| 75 |
+
scf = st.selectbox("SCF", options=[0, 1])
|
| 76 |
+
process_type = st.selectbox("Process Type", options=[0, 1])
|
| 77 |
+
part_od = st.number_input("Part Outer Dimension")
|
| 78 |
+
part_width = st.number_input("Part Width")
|
| 79 |
+
part_inner_dimension = st.number_input("Part Inner Dimension")
|
| 80 |
+
material_spec = st.selectbox("Material Specification", options=[0, 1])
|
| 81 |
+
finish_wt = st.number_input("Finish Weight")
|
| 82 |
+
green_drg_no = st.selectbox("Green DRG Number", options=[0, 1])
|
| 83 |
+
|
| 84 |
+
if st.button("Add Material"):
|
| 85 |
+
material_data = pd.DataFrame({'Part_id': [part_id],'part_no': [part_no], 'scf': [scf],'process_type': [process_type],'part_od': [part_od],'part_width': [part_width],'part_inner_dimension': [part_inner_dimension],'material_specification': [material_spec],'finish_wt': [finish_wt],'green_drg_no': [green_drg_no]})
|
| 86 |
+
insert_data(conn, 'material_data', material_data)
|
| 87 |
+
st.success("Material data added successfully")
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
elif page == "RM Cost Data":
|
| 91 |
+
st.header("RM Cost Data")
|
| 92 |
+
rm_type = st.text_input("RM Type")
|
| 93 |
+
rm_cost = st.number_input("RM Cost", min_value=0.0, step=0.01)
|
| 94 |
+
vendor_id = st.number_input("Vendor ID", min_value=1, step=1)
|
| 95 |
+
|
| 96 |
+
if st.button("Add RM Cost Data"):
|
| 97 |
+
rm_cost_data = pd.DataFrame({'rm_type': [rm_type], 'rm_cost': [rm_cost], 'vendor_id': [vendor_id]})
|
| 98 |
+
insert_data(conn, 'rm_cost_data', rm_cost_data)
|
| 99 |
+
st.success("RM cost data added successfully")
|
| 100 |
+
|
| 101 |
+
elif page == "Supplier Data":
|
| 102 |
+
st.header("Supplier Data")
|
| 103 |
+
part_no = st.number_input("Part No", min_value=1, step=1)
|
| 104 |
+
process_type = st.selectbox("Process Type", options=[0, 1])
|
| 105 |
+
part_od = st.number_input("Part OD", min_value=0.0, step=0.1)
|
| 106 |
+
part_id = st.number_input("Part ID", min_value=0.0, step=0.1)
|
| 107 |
+
part_width = st.number_input("Part Width", min_value=0, step=1)
|
| 108 |
+
finish_wt = st.number_input("Finish Wt", min_value=0.0, step=0.1)
|
| 109 |
+
grade_type = st.selectbox("Grade Type", options=list(final_landed_cost.keys()))
|
| 110 |
+
material_id = st.number_input("Material ID", min_value=1, step=1)
|
| 111 |
+
|
| 112 |
+
if st.button("Calculate and Add Supplier Data"):
|
| 113 |
+
# Prepare the input data for prediction
|
| 114 |
+
input_data = pd.DataFrame({
|
| 115 |
+
'Process type': [process_type],
|
| 116 |
+
'Part Od': [part_od],
|
| 117 |
+
'Part ID': [part_id],
|
| 118 |
+
'Part Width': [part_width],
|
| 119 |
+
'Finish Wt': [finish_wt]
|
| 120 |
+
})
|
| 121 |
+
|
| 122 |
+
# Predict the input weight
|
| 123 |
+
predicted_input_weight = input_weight_model.predict(input_data)[0]
|
| 124 |
+
|
| 125 |
+
# Calculate raw material cost
|
| 126 |
+
raw_material_cost = calculate_raw_material_cost(process_type, predicted_input_weight, grade_type)
|
| 127 |
+
|
| 128 |
+
# Calculate process cost
|
| 129 |
+
process_cost = calculate_process_cost(process_type, predicted_input_weight)
|
| 130 |
+
|
| 131 |
+
# Prepare the data for machining time prediction
|
| 132 |
+
machining_data = pd.DataFrame({
|
| 133 |
+
'Process type': [process_type],
|
| 134 |
+
'Part Od': [part_od],
|
| 135 |
+
'Part ID': [part_id],
|
| 136 |
+
'Part Width': [part_width],
|
| 137 |
+
'Finish Wt': [finish_wt],
|
| 138 |
+
'Input Weight': [predicted_input_weight],
|
| 139 |
+
'Raw material cost': [raw_material_cost],
|
| 140 |
+
'Process cost': [process_cost]
|
| 141 |
+
})
|
| 142 |
+
|
| 143 |
+
# Predict the machining time
|
| 144 |
+
predicted_machining_time = machining_model.predict(machining_data)[0]
|
| 145 |
+
|
| 146 |
+
# Calculate machining cost
|
| 147 |
+
machining_cost = predicted_machining_time * 375.71
|
| 148 |
+
|
| 149 |
+
# Calculate scrap recovery
|
| 150 |
+
scrap_recovery = (predicted_input_weight - finish_wt) * 11.5
|
| 151 |
+
|
| 152 |
+
# Prepare the data for inspection time prediction
|
| 153 |
+
inspection_data = pd.DataFrame({
|
| 154 |
+
'Process type': [process_type],
|
| 155 |
+
'Part Od': [part_od],
|
| 156 |
+
'Part ID': [part_id],
|
| 157 |
+
'Part Width': [part_width],
|
| 158 |
+
'Finish Wt': [finish_wt],
|
| 159 |
+
'Input Weight': [predicted_input_weight],
|
| 160 |
+
'Raw material cost': [raw_material_cost],
|
| 161 |
+
'Process cost': [process_cost],
|
| 162 |
+
'Machining Time': [predicted_machining_time],
|
| 163 |
+
'Machining cost': [machining_cost],
|
| 164 |
+
'Scrap recovery': [scrap_recovery]
|
| 165 |
+
})
|
| 166 |
+
|
| 167 |
+
# Predict the inspection time
|
| 168 |
+
predicted_inspection_time = inspection_model.predict(inspection_data)[0]
|
| 169 |
+
|
| 170 |
+
# Calculate inspection cost
|
| 171 |
+
inspection_cost = predicted_inspection_time * 375.71
|
| 172 |
+
|
| 173 |
+
# Calculate total mg cost
|
| 174 |
+
total_mg_cost = raw_material_cost + process_cost + machining_cost + scrap_recovery + inspection
|
best_model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3892c1949117792f374863ca4257ef20e1f6844e59bc74afbb582fdb2a05fe14
|
| 3 |
+
size 59325
|
database (1).py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sqlite3
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
def create_connection(db_file):
|
| 5 |
+
"""Create a database connection to the SQLite database."""
|
| 6 |
+
conn = sqlite3.connect(db_file)
|
| 7 |
+
return conn
|
| 8 |
+
|
| 9 |
+
def initialize_database(conn):
|
| 10 |
+
"""Initialize the database with tables and constraints."""
|
| 11 |
+
with conn:
|
| 12 |
+
conn.execute('''CREATE TABLE IF NOT EXISTS vendor_data (
|
| 13 |
+
vendor_type TEXT NOT NULL,
|
| 14 |
+
vendor_name TEXT PRIMARY KEY,
|
| 15 |
+
GST_NO INTEGER NOT NULL,
|
| 16 |
+
Contact_person_name TEXT NOT NULL,
|
| 17 |
+
address TEXT NOT NULL,
|
| 18 |
+
city TEXT NOT NULL,
|
| 19 |
+
pan_no TEXT NOT NULL CHECK(length(pan_no) <= 10)
|
| 20 |
+
)''')
|
| 21 |
+
|
| 22 |
+
conn.execute('''CREATE TABLE IF NOT EXISTS material_data (
|
| 23 |
+
Part_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 24 |
+
part_no INTEGER NOT NULL,
|
| 25 |
+
scf TEXT NOT NULL,
|
| 26 |
+
process_type TEXT NOT NULL,
|
| 27 |
+
part_od REAL NOT NULL,
|
| 28 |
+
part_width INTEGER NOT NULL,
|
| 29 |
+
part_inner_dimension REAL NOT NULL,
|
| 30 |
+
material_specification TEXT NOT NULL,
|
| 31 |
+
finish_wt REAL NOT NULL,
|
| 32 |
+
green_drg_no INTEGER NOT NULL
|
| 33 |
+
)''')
|
| 34 |
+
|
| 35 |
+
conn.execute('''CREATE TABLE IF NOT EXISTS rm_cost_data (
|
| 36 |
+
grade_type TEXT NOT NULL,
|
| 37 |
+
usd_cif REAL NOT NULL,
|
| 38 |
+
rate REAL NOT NULL,
|
| 39 |
+
final_landed_cost REAL
|
| 40 |
+
)''')
|
| 41 |
+
|
| 42 |
+
conn.execute('''CREATE TABLE IF NOT EXISTS supplier_data (
|
| 43 |
+
input_weight REAL,
|
| 44 |
+
process_code TEXT,
|
| 45 |
+
machining_time REAL,
|
| 46 |
+
inspection_time REAL,
|
| 47 |
+
process_cost REAL,
|
| 48 |
+
machining_cost REAL,
|
| 49 |
+
inspection_cost REAL
|
| 50 |
+
)''')
|
| 51 |
+
|
| 52 |
+
def insert_data(conn, table, data):
|
| 53 |
+
"""Insert data into SQLite database."""
|
| 54 |
+
data.to_sql(table, conn, if_exists='append', index=False)
|
| 55 |
+
|
| 56 |
+
def fetch_data(query, conn):
|
| 57 |
+
"""Fetch data from SQLite database."""
|
| 58 |
+
return pd.read_sql_query(query, conn)
|
inspection_model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ff7355298024e9f4ed93d65a428bb4714bcf66d30d385a45f34ebb22a7465ce
|
| 3 |
+
size 35374
|
machining_model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b4034770f0a2a9059ec948eb8b16e09a18b6d28ffcf79137c83ec446b0ba70a1
|
| 3 |
+
size 34353
|
model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a559e548fd44ecd683f11b9476b7b4c5fd68681a4fb81fb72039b40bb115d968
|
| 3 |
+
size 515
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit==1.32.2
|
| 2 |
+
pandas==1.5.3
|
| 3 |
+
scikit-learn==1.2.2
|
| 4 |
+
|