Update README.md
Browse files
README.md
CHANGED
|
@@ -114,13 +114,38 @@ The model expects input data in JSON format with the following fields:
|
|
| 114 |
### Inference Procedure
|
| 115 |
|
| 116 |
```python
|
| 117 |
-
from
|
| 118 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
|
| 120 |
# Load the preprocessor and trained models
|
| 121 |
-
preprocessor = load(
|
| 122 |
-
numerical_model = load(
|
| 123 |
-
categorical_model = load(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
# Example input data
|
| 126 |
new_data = {
|
|
@@ -145,9 +170,35 @@ numerical_predictions = numerical_model.predict(input_transformed)
|
|
| 145 |
# Make categorical predictions
|
| 146 |
categorical_predictions = categorical_model.predict(input_transformed)
|
| 147 |
|
|
|
|
|
|
|
|
|
|
| 148 |
# Decode categorical predictions
|
| 149 |
-
|
| 150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
# Combine predictions into a single dictionary
|
| 153 |
predictions_combined = {**{col: numerical_predictions[0, i] for i, col in enumerate(numerical_targets)}, **categorical_predictions_decoded}
|
|
|
|
| 114 |
### Inference Procedure
|
| 115 |
|
| 116 |
```python
|
| 117 |
+
from huggingface_hub import hf_hub_download
|
| 118 |
import pandas as pd
|
| 119 |
+
from joblib import load
|
| 120 |
+
import numpy as np
|
| 121 |
+
from sklearn.preprocessing import LabelEncoder
|
| 122 |
+
from googletrans import Translator
|
| 123 |
+
|
| 124 |
+
# Initialize translator
|
| 125 |
+
translator = Translator()
|
| 126 |
+
|
| 127 |
+
# Download models from Hugging Face Hub
|
| 128 |
+
preprocessor_path = hf_hub_download(repo_id='your-username/your-repo', filename='preprocessor.joblib')
|
| 129 |
+
numerical_model_path = hf_hub_download(repo_id='your-username/your-repo', filename='numerical_model.joblib')
|
| 130 |
+
categorical_model_path = hf_hub_download(repo_id='your-username/your-repo', filename='categorical_model.joblib')
|
| 131 |
|
| 132 |
# Load the preprocessor and trained models
|
| 133 |
+
preprocessor = load(preprocessor_path)
|
| 134 |
+
numerical_model = load(numerical_model_path)
|
| 135 |
+
categorical_model = load(categorical_model_path)
|
| 136 |
+
|
| 137 |
+
# Define categorical targets (same as used during training)
|
| 138 |
+
categorical_targets = [
|
| 139 |
+
'Lime Application - Instruction',
|
| 140 |
+
'Lime Application',
|
| 141 |
+
'Organic Matter Application - Instruction',
|
| 142 |
+
'Organic Matter Application',
|
| 143 |
+
'1st Application',
|
| 144 |
+
'1st Application - Type fertilizer (1)',
|
| 145 |
+
'1st Application - Type fertilizer (2)',
|
| 146 |
+
'2nd Application',
|
| 147 |
+
'2nd Application - Type fertilizer (1)'
|
| 148 |
+
]
|
| 149 |
|
| 150 |
# Example input data
|
| 151 |
new_data = {
|
|
|
|
| 170 |
# Make categorical predictions
|
| 171 |
categorical_predictions = categorical_model.predict(input_transformed)
|
| 172 |
|
| 173 |
+
# Load label encoders from Hugging Face Hub (if they are saved separately)
|
| 174 |
+
label_encoders = {col: load(hf_hub_download(repo_id='your-username/your-repo', filename=f'label_encoder_{col}.joblib')) for col in categorical_targets}
|
| 175 |
+
|
| 176 |
# Decode categorical predictions
|
| 177 |
+
categorical_predictions_decoded = {}
|
| 178 |
+
for i, col in enumerate(categorical_targets):
|
| 179 |
+
le = label_encoders[col]
|
| 180 |
+
try:
|
| 181 |
+
decoded_labels = le.inverse_transform(categorical_predictions[:, i])
|
| 182 |
+
# Translate to English
|
| 183 |
+
translated_labels = [translator.translate(label, dest='en').text for label in decoded_labels]
|
| 184 |
+
categorical_predictions_decoded[col] = translated_labels
|
| 185 |
+
except ValueError as e:
|
| 186 |
+
print(f"Error decoding predictions for {col}: {e}")
|
| 187 |
+
categorical_predictions_decoded[col] = ["Unknown"] * len(categorical_predictions[:, i])
|
| 188 |
+
|
| 189 |
+
# Define numerical targets (same as used during training)
|
| 190 |
+
numerical_targets = [
|
| 191 |
+
'Nitrogen (N) Need',
|
| 192 |
+
'Phosphorus (P2O5) Need',
|
| 193 |
+
'Potassium (K2O) Need',
|
| 194 |
+
'Organic Matter Need',
|
| 195 |
+
'Lime Need',
|
| 196 |
+
'Lime Application - Requirement',
|
| 197 |
+
'Organic Matter Application - Requirement',
|
| 198 |
+
'1st Application - Requirement (1)',
|
| 199 |
+
'1st Application - Requirement (2)',
|
| 200 |
+
'2nd Application - Requirement (1)'
|
| 201 |
+
]
|
| 202 |
|
| 203 |
# Combine predictions into a single dictionary
|
| 204 |
predictions_combined = {**{col: numerical_predictions[0, i] for i, col in enumerate(numerical_targets)}, **categorical_predictions_decoded}
|