File size: 1,537 Bytes
8ec5561
 
 
 
 
cb3a3a4
 
 
 
 
 
 
 
5169ec3
 
8ec5561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5169ec3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from imblearn.over_sampling import SMOTE

try:
    import lightgbm as lgb
except:
    print("An exception occurred")
    !pip install lightgbm
    import lightgbm as lgb

    
# Load the dataset
file_path = 'datasets'  # Update with your dataset path
df = pd.read_excel(file_path + '/dataset.xlsx')

X = df.drop('target', axis=1)
y = df['target']

# Encode the target variable
le = LabelEncoder()
y_enc = le.fit_transform(y)

# Split the data into training and validation sets
X_train, X_val, y_train, y_val = train_test_split(X, y_enc, test_size=0.2, random_state=42)

# Apply SMOTE for handling imbalanced classes
smote = SMOTE(sampling_strategy='minority', n_jobs=-1)
X_train_res, y_train_res = smote.fit_resample(X_train, y_train)

# Define LightGBM parameters
params = {
    'objective': 'multiclass',
    'num_class': len(le.classes_),
    'metric': 'multi_logloss',
    'boosting_type': 'gbdt',
    'num_leaves': 31,
    'learning_rate': 0.05,
    'feature_fraction': 0.9,
    'bagging_fraction': 0.8,
    'bagging_freq': 5,
    'verbose': -1
}

# Create a LightGBM dataset
train_data = lgb.Dataset(X_train_res, label=y_train_res)
val_data = lgb.Dataset(X_val, label=y_val)

# Train the LightGBM model
num_round = 1000
bst = lgb.train(params, train_data, num_round, valid_sets=[val_data], early_stopping_rounds=10)

# Save the trained model in the 'inference' folder
bst.save_model('inference/lgbm_model.txt')