ProjectE-11 / app.py
MuzammilMax's picture
Upload app.py
0615822 verified
# -*- coding: utf-8 -*-
"""chem-sim.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1rpq0orE7c2E_K8SsmIeH6gxNjw8ucycA
# Chem simulation using scipy
"""
# !pip install tensorflow==2.15
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.integrate import solve_ivp
import random
import tensorflow as tf
"""# Dataset
$$
\displaystyle
k = A \cdot e^{-\frac{E_a}{RT}}
$$
k : Rate constant (what we’re solving for)
A : Pre-exponential factor (frequency factor)
Ea : Activation energy (J/mol)
R : Gas constant 8.314 J/molΒ·K
T : Temperature (in Kelvin)
| temp: Kelvin | pH: 0–14 scale | Ea: in kJ/mol | A_factor: 1/s |
## Zero order
"""
def zero(t, y, k):
A, B, C = y
dA_dt = -k
dB_dt = 0
dC_dt = k
return [dA_dt, dB_dt, dC_dt]
"""## First Order"""
def first(t, y, k):
A, B, C = y
dA_dt = -k * A
dB_dt = 0
dC_dt = +k * A
return [dA_dt, dB_dt, dC_dt]
def decay_first(t, y, k):
A, B, C = y
dA_dt = -k * A
dB_dt = 0
dC_dt = 0
return [dA_dt, dB_dt, dC_dt]
def reversible_first(t, y, k, k_1):
A, B, C = y
dA_dt = -k * A + k_1 * C
dB_dt = 0
dC_dt = k * A - k_1 * C
return [dA_dt, dB_dt, dC_dt]
"""## Second Order"""
def second1(t, y, k):
A, B, C = y
dA_dt = -k * A * B
dB_dt = -k * A * B
dC_dt = +k * A * B
return [dA_dt, dB_dt, dC_dt]
def second2(t, y, k):
A, B, C = y
dA_dt = -2 * k * A**2
dB_dt = 0
dC_dt = +k * A**2
return [dA_dt, dB_dt, dC_dt]
def reversible_second1(t, y, k, k_1):
A, B, C = y
dA_dt = -k * A * B + k_1 * C
dB_dt = -k * A * B + k_1 * C
dC_dt = +k * A * B - k_1 * C
return [dA_dt, dB_dt, dC_dt]
def reversible_second2(t, y, k, k_1):
A, B, C = y
dA_dt = -2 * k * A**2 + 2 * k_1 * C
dB_dt = 0
dC_dt = +k * A**2 - k_1 * C
return [dA_dt, dB_dt, dC_dt]
"""## Third order"""
def third1(t, y, k):
A, B, C = y
dA_dt = -3 * k * A**3
dB_dt = 0
dC_dt = +k * A**3
return [dA_dt, dB_dt, dC_dt]
def third2(t, y, k):
A, B, C = y
dA_dt = -2 * k * A**2 * B
dB_dt = -1 * k * A**2 * B
dC_dt = +k * A**2 * B
return [dA_dt, dB_dt, dC_dt]
def reversible_third1(t, y, k, k_1):
A, B, C = y
dA_dt = -3 * k * A**3 + 3 * k_1 * C
dB_dt = 0
dC_dt = +k * A**3 - k_1 * C
return [dA_dt, dB_dt, dC_dt]
def reversible_third2(t, y, k, k_1):
A, B, C = y
dA_dt = -2 * k * A**2 * B + 2 * k_1 * C
dB_dt = -1 * k * A**2 * B + 1 * k_1 * C
dC_dt = +k * A**2 * B - k_1 * C
return [dA_dt, dB_dt, dC_dt]
"""## functions"""
def compute_k(temp, Ea, A_factor):
R = 8.314
Ea_J = Ea * 1000 # Convert Ea from kJ/mol to J/mol
k = A_factor * np.exp(-Ea_J / (R * temp))
return k
def ode1(A0, B0, C0, temp, Ea, A_factor):
y0 = [A0, B0, C0]
k = compute_k(temp, Ea, A_factor)
k_1 = k * random.uniform(0.5, 0.9)
t_span = (0, 8)
t_eval = np.linspace(0, 8, 11)
num = random.randint(0, 11) # For choosing between different functions randomly
match num:
case 0:
func_name = zero
is_reversible = 0
order = 'zero'
case 1:
func_name = first
is_reversible = 0
order = 'first'
case 2:
func_name = decay_first
is_reversible = 0
order = 'first'
case 3:
func_name = reversible_first
is_reversible = 1
order = 'first'
case 4:
func_name = second1
is_reversible = 0
order = 'second'
case 5:
func_name = second2
is_reversible = 0
order = 'second'
case 6:
func_name = reversible_second1
is_reversible = 1
order = 'second'
case 7:
func_name = reversible_second2
is_reversible = 1
order = 'second'
case 8:
func_name = third1
is_reversible = 0
order = 'third'
case 9:
func_name = third2
is_reversible = 0
order = 'third'
case 10:
func_name = reversible_third1
is_reversible = 1
order = 'third'
case 11:
func_name = reversible_third2
is_reversible = 1
order = 'third'
if is_reversible == 1:
solution = solve_ivp(
func_name,
t_span,
y0,
args=(k, k_1),
t_eval=t_eval
)
elif is_reversible == 0:
solution = solve_ivp(
func_name,
t_span,
y0,
args=(k,),
t_eval=t_eval
)
return solution.t, solution.y[0], solution.y[1], solution.y[2], k, k_1, is_reversible, order
"""## dataframe"""
results = []
counter = 0
while counter < 100000:
counter += 1
A0 = round(random.uniform(1.0, 10.0), 2)
B0 = round(random.uniform(0.0, 5.0), 2)
C0 = round(random.uniform(0.0, 5.0), 2)
temp = random.randint(270, 280)
pH = round(random.uniform(1.0, 14.0), 2)
Ea = random.randint(90, 100)
A_factor = round(random.uniform(2e16, 5e17), 2)
pressure = round(random.uniform(0.5, 5.0), 2)
weight = round(random.uniform(20, 200), 1)
structure = random.choice(['Linear', 'Ring', 'Branched', 'Unknown'])
catalyst = random.choice(['None', 'Enzyme', 'Acid', 'Base'])
time, A, B, C, k, k_1, is_reversible, order = ode1(A0, B0, C0, temp, Ea, A_factor)
row = {
'order' : order,
'temp': temp,
'pH': pH,
'Ea': Ea,
'A_factor': A_factor,
'pressure': pressure,
'log_pressure' : np.log(pressure),
'weight': weight,
'structure': structure,
'catalyst': catalyst,
'is_reversible': is_reversible,
'k' : k,
'k_1' : k_1,
'A0': A[0], 'A1': A[1], 'A2': A[2], 'A3': A[3], 'A4': A[4],
'A5': A[5], 'A6': A[6], 'A7': A[7], 'A8': A[8], 'A9': A[9], 'A10': A[10],
'B0': B[0], 'B1': B[1], 'B2': B[2], 'B3': B[3], 'B4': B[4],
'B5': B[5], 'B6': B[6], 'B7': B[7], 'B8': B[8], 'B9': B[9], 'B10': B[10],
'C0': C[0], 'C1': C[1], 'C2': C[2], 'C3': C[3], 'C4': C[4],
'C5': C[5], 'C6': C[6], 'C7': C[7], 'C8': C[8], 'C9': C[9], 'C10': C[10]
}
results.append(row)
df_train = pd.DataFrame(results)
df_train.to_csv('chem_data_train.csv',index=False)
df_train
results = []
counter = 0
while counter < 20000:
counter += 1
A0 = round(random.uniform(1.0, 10.0), 2)
B0 = round(random.uniform(0.0, 5.0), 2)
C0 = round(random.uniform(0.0, 5.0), 2)
temp = random.randint(270, 280)
pH = round(random.uniform(1.0, 14.0), 2)
Ea = random.randint(90, 100)
A_factor = round(random.uniform(2e16, 5e17), 2)
pressure = round(random.uniform(0.5, 5.0), 2)
weight = round(random.uniform(20, 200), 1)
structure = random.choice(['Linear', 'Ring', 'Branched', 'Unknown'])
catalyst = random.choice(['None', 'Enzyme', 'Acid', 'Base'])
time, A, B, C, k, k_1, is_reversible, order = ode1(A0, B0, C0, temp, Ea, A_factor)
row = {
'order' : order,
'temp': temp,
'pH': pH,
'Ea': Ea,
'A_factor': A_factor,
'pressure': pressure,
'log_pressure' : np.log(pressure),
'weight': weight,
'structure': structure,
'catalyst': catalyst,
'is_reversible': is_reversible,
'k' : k,
'k_1' : k_1,
'A0': A[0], 'A1': A[1], 'A2': A[2], 'A3': A[3], 'A4': A[4],
'A5': A[5], 'A6': A[6], 'A7': A[7], 'A8': A[8], 'A9': A[9], 'A10': A[10],
'B0': B[0], 'B1': B[1], 'B2': B[2], 'B3': B[3], 'B4': B[4],
'B5': B[5], 'B6': B[6], 'B7': B[7], 'B8': B[8], 'B9': B[9], 'B10': B[10],
'C0': C[0], 'C1': C[1], 'C2': C[2], 'C3': C[3], 'C4': C[4],
'C5': C[5], 'C6': C[6], 'C7': C[7], 'C8': C[8], 'C9': C[9], 'C10': C[10]
}
results.append(row)
df_test = pd.DataFrame(results)
df_test.to_csv('chem_data_test.csv',index=False)
df_test
"""- To concatenate df_test and df_train into df"""
df = pd.concat([df_test, df_train])
df
"""# Machine learning
## Data preparation
- removing 'structure' and 'catalyst' from dataframe
- mapping 0 to zero , 1 to first, 2 to second and 3 to third in order column
- mapping structure and catalyst
"""
structure_map = {'Linear': 0, 'Ring': 1, 'Branched': 2, 'Unknown': 3}
catalyst_map = {'None': 0, 'Enzyme': 1, 'Acid': 2, 'Base': 3}
order_map = {'zero': 0, 'first': 1, 'second': 2, 'third' : 3}
df['structure'] = df['structure'].map(structure_map)
df['catalyst'] = df['catalyst'].map(catalyst_map)
df['order'] = df['order'].map(order_map)
df
"""- creating x and y datasets for train and test"""
X = df.drop(['order'], axis=1)
y = df['order']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
"""- scaling dataset"""
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
"""## Models"""
# from sklearn.metrics import accuracy_score
"""### Logistic Regression"""
# from sklearn.linear_model import LogisticRegression
# lr = LogisticRegression(max_iter=1000, C=10, penalty='l2')
# lr.fit(X_train_scaled, y_train)
# lr_pred = lr.predict(X_test_scaled)
# print("Logistic Regression Accuracy:", accuracy_score(y_test, lr_pred))
"""### RandomForestClassifier"""
# from sklearn.ensemble import RandomForestClassifier
# rf = RandomForestClassifier(class_weight='balanced', random_state=42, n_estimators=200, max_depth=None)
# rf.fit(X_train, y_train)
# rf_pred = rf.predict(X_test)
# print("RandomForestClassifier Accuracy:", accuracy_score(y_test, rf_pred))
"""### Gradient Boosting Classifier"""
# from sklearn.ensemble import GradientBoostingClassifier
# gb = GradientBoostingClassifier(n_estimators=200, max_depth=5, random_state=42)
# gb.fit(X_train, y_train)
# gb_pred = gb.predict(X_test)
# print("Gradient Boosting Accuracy:", accuracy_score(y_test, gb_pred))
"""### Support Vector Classifier"""
# from sklearn.svm import SVC
# svc = SVC(C=10, kernel='rbf', class_weight='balanced')
# svc.fit(X_train_scaled, y_train)
# svc_pred = svc.predict(X_test_scaled)
# print("SVC Accuracy:", accuracy_score(y_test, svc_pred))
"""### K-Nearest Neighbors"""
# from sklearn.neighbors import KNeighborsClassifier
# knn = KNeighborsClassifier(n_neighbors=7, weights='uniform')
# knn.fit(X_train_scaled, y_train)
# knn_pred = knn.predict(X_test_scaled)
# print("KNN Accuracy:", accuracy_score(y_test, knn_pred))
"""### XG Boost"""
# from xgboost import XGBClassifier
# xgb_model = XGBClassifier(learning_rate=0.1, max_depth=7, n_estimators=200, eval_metric='mlogloss', random_state=42)
# xgb_model.fit(X_train, y_train)
# xgb_pred = xgb_model.predict(X_test)
# print("XGBoost Accuracy:", accuracy_score(y_test, xgb_pred))
"""### Hyperparameter tuning"""
# from sklearn.linear_model import LogisticRegression
# from sklearn.svm import SVC
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
# import xgboost as xgb
# models = {
# 'LogisticRegression': LogisticRegression(class_weight='balanced', max_iter=1000),
# 'SVC': SVC(class_weight='balanced'),
# 'KNN': KNeighborsClassifier(),
# 'RandomForest': RandomForestClassifier(class_weight='balanced', random_state=42),
# 'GradientBoosting': GradientBoostingClassifier(random_state=42),
# 'XGBoost': xgb.XGBClassifier(eval_metric='mlogloss', random_state=42)
# }
# param_grids = {
# 'LogisticRegression': {
# 'C': [0.1, 1, 10],
# 'penalty': ['l2']
# },
# 'SVC': {
# 'C': [0.1, 1, 10],
# 'kernel': ['linear', 'rbf']
# },
# 'KNN': {
# 'n_neighbors': [3, 5, 7],
# 'weights': ['uniform', 'distance']
# },
# 'RandomForest': {
# 'n_estimators': [100, 200],
# 'max_depth': [5, 10, None]
# },
# 'GradientBoosting': {
# 'n_estimators': [100, 200],
# 'max_depth': [3, 5, 7]
# },
# 'XGBoost': {
# 'n_estimators': [100, 200],
# 'max_depth': [3, 5, 7],
# 'learning_rate': [0.05, 0.1]
# }
# }
# from sklearn.model_selection import GridSearchCV
# best_models = {}
# for name, model in models.items():
# print(f"Running GridSearch for {name}...")
# grid = GridSearchCV(model, param_grids[name], cv=5, scoring='accuracy')
# if name in ['LogisticRegression', 'SVC', 'KNN']:
# grid.fit(X_train_scaled, y_train)
# else:
# grid.fit(X_train, y_train)
# best_models[name] = grid.best_estimator_
# print(f"Best params for {name}:", grid.best_params_)
# print("Best CV Score:", grid.best_score_)
# print("=====================================")
"""### BEST PARAMS
==========================================================================
- LogisticRegression
==========================================================================
Best params for LogisticRegression: {'C': 10, 'penalty': 'l2'}
Best CV Score: 0.8008333333333335
==========================================================================
- SVC
==========================================================================
Best params for SVC: {'C': 10, 'kernel': 'rbf'}
Best CV Score: 0.8791666666666668
==========================================================================
- KNN
==========================================================================
Best params for KNN: {'n_neighbors': 7, 'weights': 'uniform'}
Best CV Score: 0.5670833333333334
==========================================================================
- RandomForest
==========================================================================
Best params for RandomForest: {'max_depth': None, 'n_estimators': 200}
Best CV Score: 0.8362499999999999
==========================================================================
- GradientBoosting
==========================================================================
Best params for GradientBoosting: {'max_depth': 5, 'n_estimators': 200}
Best CV Score: 0.8945833333333333
==========================================================================
- XGBOOST
==========================================================================
Best params for XGBOOST: {'learning_rate': 0.1, 'max_depth': 7, 'n_estimators': 200}
Best CV Score: 0.8950000000000001
==========================================================================
## DNN
"""
csv_columns = ['temp', 'pH', 'Ea', 'A_factor', 'pressure', 'log_pressure', 'weight', 'structure', 'catalyst', 'is_reversible', 'k', 'k_1']
classes = ['First_Order','Second_Order','Third_Order']
train_path = './chem_data_train.csv'
test_path = './chem_data_train.csv'
train = pd.read_csv(train_path)
test = pd.read_csv(test_path)
train.head()
"""- Fill missing values in the 'catalyst' column
- NaN values arenot accepted by classifier thats why convert every Nan values to none
- the species column is now gone
"""
if 'order' in train.columns:
train_y = train.pop('order')
if 'order' in test.columns:
test_y = test.pop('order')
train['catalyst'] = train['catalyst'].fillna('None')
test['catalyst'] = test['catalyst'].fillna('None')
train.head()
"""- Define categorical and numerical feature columns
- Assining each string a numerical uinque value because our dumb ahh model canot understand english
"""
CATEGORICAL_COLUMNS = ['structure', 'catalyst'] #columns that have strings
NUMERIC_COLUMNS = ['temp', 'pH', 'Ea', 'A_factor', 'pressure', 'log_pressure', 'weight',
'is_reversible', 'k', 'k_1', 'A0', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9', 'A10',
'B0', 'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B9', 'B10',
'C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10'] #columns that have numerical values
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
vocabulary = train[feature_name].unique()
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary)
indicator_column = tf.feature_column.indicator_column(cat_column) #it creates binary coolumns that will be mapped in to feature columns and it will be steamlined to our DNN model
feature_columns.append(indicator_column)
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))
print(feature_columns)
import logging
tf.get_logger().setLevel(logging.INFO)
"""- setting up input function
- convert the inputs to a dataset
"""
def input_fn(features,labels,training=True,batch_size=500):
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels)) #this cnonverts the dataset into tensorflow object
if training:
dataset = dataset.shuffle(3000).repeat()
return dataset.batch(batch_size)
"""- Normalize the numerical features in the training data"""
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_normalized = train.copy()
train_normalized[NUMERIC_COLUMNS] = scaler.fit_transform(train[NUMERIC_COLUMNS])
test_normalized = test.copy()
test_normalized[NUMERIC_COLUMNS] = scaler.transform(test[NUMERIC_COLUMNS])
"""- Convert the 'order' labels to numerical values"""
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
train_y_encoded = le.fit_transform(train_y) #we used sckit label encoder to encode the values
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[50, 40],
n_classes=4, # We have 4 classes: zero, first, second, third
optimizer=tf.keras.optimizers.legacy.RMSprop(learning_rate=0.001))
classifier.train(
input_fn=lambda: input_fn(train_normalized, train_y_encoded, training=True),
steps=3000
)
test_y_encoded = le.fit_transform(test_y) #we used sckit label encoder to encode the values better than 1 2 3 4 5
classifier.evaluate(input_fn=lambda: input_fn(test_normalized,test_y_encoded,training=False))
"""- accuracy = 0.99983335
## Interactive
"""
def predict_order(inputs):
try:
# Create a pandas DataFrame from the input dictionary
input_df = pd.DataFrame(inputs, index=[0])
# Normalize the numerical features
input_df[NUMERIC_COLUMNS] = scaler.transform(input_df[NUMERIC_COLUMNS])
# Make a prediction
predictions = classifier.predict(input_fn=lambda: input_fn(input_df, labels=None, training=False))
# Get the predicted class and probability
for pred_dict in predictions:
class_id = pred_dict['class_ids'][0]
probability = pred_dict['probabilities'][class_id]
# Get the class name from the label encoder
class_name = le.inverse_transform([class_id])[0]
print('Order is "{}" ({:.1f}%)'.format(class_name, 100 * probability))
return class_name
except Exception as e:
print(f"An error occurred: {e}")
return None
#example input data
example_inputs = {
'temp': 277,
'pH': 6.5,
'Ea': 93,
'A_factor': 4.2e17,
'pressure': 3.0,
'log_pressure': 1.1,
'weight': 150,
'structure': 'Ring',
'catalyst': 'Acid',
'is_reversible': 1,
'k': 0.05,
'k_1': 0.02,
'A0': 5.0,
'A1': 4.5,
'A2': 4.0,
'A3': 3.5,
'A4': 3.0,
'A5': 2.5,
'A6': 2.0,
'A7': 1.5,
'A8': 1.0,
'A9': 0.5,
'A10': 0.0,
'B0': 2.0,
'B1': 1.8,
'B2': 1.6,
'B3': 1.4,
'B4': 1.2,
'B5': 1.0,
'B6': 0.8,
'B7': 0.6,
'B8': 0.4,
'B9': 0.2,
'B10': 0.0,
'C0': 1.0,
'C1': 1.2,
'C2': 1.4,
'C3': 1.6,
'C4': 1.8,
'C5': 2.0,
'C6': 2.2,
'C7': 2.4,
'C8': 2.6,
'C9': 2.8,
'C10': 3.0
}
predict_order(example_inputs)
"""- ode2"""
def ode2(A0, B0, C0, temp, Ea, A_factor, is_reversible, predicted_order):
y0 = [A0, B0, C0]
k = compute_k(temp, Ea, A_factor)
k_1 = k * random.uniform(0.5, 0.9) # Assuming k_1 is related to k, similar to ode1
t_span = (0, 8)
t_eval = np.linspace(0, 8, 11)
func_name = None
if predicted_order == 'zero':
func_name = zero
elif predicted_order == 'first':
if is_reversible:
func_name = reversible_first
else:
# Assuming decay_first is not used for plotting based on predicted order
func_name = first
elif predicted_order == 'second':
if is_reversible:
# Assuming reversible_second1 or reversible_second2 based on A and B concentrations
# For simplicity, let's use reversible_second1 if B0 > 0, otherwise reversible_second2
if B0 > 0:
func_name = reversible_second1
else:
func_name = reversible_second2
else:
# Assuming second1 or second2 based on A and B concentrations
# For simplicity, let's use second1 if B0 > 0, otherwise second2
if B0 > 0:
func_name = second1
else:
func_name = second2
elif predicted_order == 'third':
if is_reversible:
# Assuming reversible_third1 or reversible_third2 based on A and B concentrations
# For simplicity, let's use reversible_third2 if B0 > 0, otherwise reversible_third1
if B0 > 0:
func_name = reversible_third2
else:
func_name = reversible_third1
else:
# Assuming third1 or third2 based on A and B concentrations
# For simplicity, let's use third2 if B0 > 0, otherwise third1
if B0 > 0:
func_name = third2
else:
func_name = third1
if func_name is None:
raise ValueError(f"Could not determine ODE function for predicted order: {predicted_order}")
if is_reversible and predicted_order != 'zero': # Add condition to exclude zero order
solution = solve_ivp(
func_name,
t_span,
y0,
args=(k, k_1),
t_eval=t_eval
)
else: # Handle zero order separately, regardless of is_reversible
solution = solve_ivp(
func_name,
t_span,
y0,
args=(k,),
t_eval=t_eval
)
return solution.t, solution.y[0], solution.y[1], solution.y[2], k, k_1
"""### Gradio"""
import gradio as gr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import random
import tensorflow as tf
from sklearn.preprocessing import StandardScaler, LabelEncoder
# Assuming all the necessary functions (compute_k, ode2, predict_order, etc.) and models are defined and trained in the previous cells.
def run_simulation_and_plot(temp, Ea, A_factor_base, A_factor_exponent, A_factor_std_perc, pH, pressure, is_reversible, structure, catalyst, A0, B0, C0):
# --- 1. Data Preparation for Prediction ---
# Reconstruct A_factor from user-friendly inputs
A_factor = A_factor_base * (10**A_factor_exponent)
A_factor_std = A_factor * (A_factor_std_perc / 100)
# Add randomness to A_factor using standard deviation
A_factor_randomized = np.random.normal(A_factor, A_factor_std)
k = compute_k(temp, Ea, A_factor_randomized)
k_1 = k * 0.7 # Using a fixed ratio for k_1 for consistency
# Simulate reaction to get concentration data for prediction
time_sim, A_sim, B_sim, C_sim, _, _ = ode2(A0, B0, C0, temp, Ea, A_factor_randomized, int(is_reversible), "zero")
inputs = {
'temp': temp, 'pH': pH, 'Ea': Ea, 'A_factor': A_factor_randomized,
'pressure': pressure, 'log_pressure': np.log(pressure), 'weight': 150,
'structure': structure, 'catalyst': catalyst, 'is_reversible': int(is_reversible),
'k': k, 'k_1': k_1,
'A0': A_sim[0], 'A1': A_sim[1], 'A2': A_sim[2], 'A3': A_sim[3], 'A4': A_sim[4],
'A5': A_sim[5], 'A6': A_sim[6], 'A7': A_sim[7], 'A8': A_sim[8], 'A9': A_sim[9], 'A10': A_sim[10],
'B0': B_sim[0], 'B1': B_sim[1], 'B2': B_sim[2], 'B3': B_sim[3], 'B4': B_sim[4],
'B5': B_sim[5], 'B6': B_sim[6], 'B7': B_sim[7], 'B8': B_sim[8], 'B9': B_sim[9], 'B10': B_sim[10],
'C0': C_sim[0], 'C1': C_sim[1], 'C2': C_sim[2], 'C3': C_sim[3], 'C4': C_sim[4],
'C5': C_sim[5], 'C6': C_sim[6], 'C7': C_sim[7], 'C8': C_sim[8], 'C9': C_sim[9], 'C10': C_sim[10],
}
# --- 2. Prediction ---
predicted_order = predict_order(inputs)
# --- 3. Final Simulation with Predicted Order ---
time_final, A_final, B_final, C_final, _, _ = ode2(A0, B0, C0, temp, Ea, A_factor_randomized, int(is_reversible), predicted_order)
# --- 4. Plotting ---
plt.style.use('seaborn-v0_8-whitegrid')
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(time_final, A_final, 'o-', label='[A]', color='royalblue', markersize=5)
ax.plot(time_final, B_final, 's--', label='[B]', color='forestgreen', markersize=5)
ax.plot(time_final, C_final, '^-.', label='[C]', color='darkorange', markersize=5)
ax.set_xlabel('Time (s)', fontsize=12)
ax.set_ylabel('Concentration (M)', fontsize=12)
ax.set_title(f'πŸ§ͺ Concentration vs. Time (Predicted Order: {predicted_order})', fontsize=14)
ax.legend(loc='best', fontsize=10)
ax.grid(True, which='both', linestyle='--', linewidth=0.5)
# Add watermark
fig.text(0.99, 0.01, 'pinl',
fontsize=12, color='gray',
ha='right', va='bottom', alpha=0.5)
return f"Predicted Order: {predicted_order}", fig
# --- 5. Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft()) as iface:
gr.Markdown("# Project E-11: πŸ§ͺ Chemical Reaction Simulator", elem_id="title" "made by Team PinlAI")
gr.Markdown("An interactive tool to predict reaction orders and visualize concentration changes over time.", elem_id="subtitle")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### βš™οΈ Reaction Parameters")
temp = gr.Slider(270, 280, value=277, label="🌑️ Temperature (K)")
Ea = gr.Slider(90, 100, value=93, label="⚑ Activation Energy (kJ/mol)")
A_factor_base = gr.Slider(1, 9, value=4, label="πŸ…°οΈ Pre-exponential Factor (Base)")
A_factor_exponent = gr.Slider(16, 18, value=17, step=1, label="πŸ…°οΈ Pre-exponential Factor (Exponent)")
A_factor_std_perc = gr.Slider(0, 50, value=10, label="πŸ“ˆ A Factor Std Dev (%)")
pH = gr.Slider(1.0, 14.0, value=6.5, label="πŸ’§ pH")
pressure = gr.Slider(0.5, 5.0, value=3.0, label="πŸ’¨ Pressure (atm)")
is_reversible = gr.Checkbox(label="πŸ”„ Reversible Reaction")
structure = gr.Dropdown(['Linear', 'Ring', 'Branched', 'Unknown'], label="🧬 Molecular Structure")
catalyst = gr.Dropdown(['None', 'Enzyme', 'Acid', 'Base'], label="πŸ”¬ Catalyst")
with gr.Column(scale=1):
gr.Markdown("### βš›οΈ Initial Concentrations")
A0 = gr.Slider(0.0, 10.0, value=5.0, label="[A]β‚€")
B0 = gr.Slider(0.0, 10.0, value=2.0, label="[B]β‚€")
C0 = gr.Slider(0.0, 10.0, value=1.0, label="[C]β‚€")
with gr.Row():
predict_button = gr.Button("πŸš€ Predict & Plot", variant="primary")
with gr.Row():
with gr.Column(scale=2):
order_output = gr.Textbox(label="πŸ“Š Predicted Reaction Order")
plot_output = gr.Plot(label="πŸ“ˆ Concentration vs. Time")
predict_button.click(
fn=run_simulation_and_plot,
inputs=[temp, Ea, A_factor_base, A_factor_exponent, A_factor_std_perc, pH, pressure, is_reversible, structure, catalyst, A0, B0, C0],
outputs=[order_output, plot_output]
)
iface.launch(debug=True)
"""### Streamlit"""
# !npm install -g localtunnel
# !streamlit run /content/app.py &>/content/logs.txt & #this starts the loca server
# get_ipython().run_line_magic('shell', 'curl https://loca.lt/mytunnelpassword') #getting ur home pass πŸ₯Ά
# !npx localtunnel --port 8501 #the tunnel