|
|
import streamlit as st |
|
|
from ultralytics import YOLO |
|
|
from streamlit_option_menu import option_menu |
|
|
import tensorflow as tf |
|
|
import json |
|
|
from streamlit_lottie import st_lottie |
|
|
from PIL import Image,ImageFont,ImageDraw |
|
|
from tensorflow.keras.models import load_model |
|
|
import numpy as np |
|
|
from tensorflow.keras.utils import custom_object_scope |
|
|
import tensorflow_addons as tfa |
|
|
from joblib import load |
|
|
from sklearn.feature_extraction.text import TfidfVectorizer |
|
|
|
|
|
def create_in(): |
|
|
return tfa.layers.InstanceNormalization() |
|
|
|
|
|
def img_prep(paths): |
|
|
out = [] |
|
|
for i in paths: |
|
|
img = Image.open(i) |
|
|
img = img.resize((256,256)) |
|
|
img = img.convert('RGB') |
|
|
out.append(img) |
|
|
return out |
|
|
|
|
|
def img_prep_YOLO(paths,size): |
|
|
out = [] |
|
|
for i in paths: |
|
|
img = Image.open(i) |
|
|
img = img.resize((size,size)) |
|
|
img = img.convert('RGB') |
|
|
out.append(img) |
|
|
return out |
|
|
|
|
|
def model_out(img,model_path): |
|
|
img = ((np.asarray(img))/127.5)-1 |
|
|
img = np.expand_dims(img,0) |
|
|
with custom_object_scope({'InstanceNormalization': create_in}): |
|
|
model = load_model(model_path) |
|
|
pred = ((model.predict(img)+1)*127.5)/255 |
|
|
return pred[0] |
|
|
|
|
|
def yolo_out(model,img): |
|
|
model = YOLO(model) |
|
|
results = model(img) |
|
|
for result in results: |
|
|
cls = result.boxes.cls[0] |
|
|
cls = arr[int(cls)] |
|
|
lbl = result.boxes.conf[0] |
|
|
boxes = result.boxes.xyxy[0] |
|
|
draw = ImageDraw.Draw(img) |
|
|
draw.rectangle([boxes[0], boxes[1], boxes[2], boxes[3]], outline="black", width=5) |
|
|
text_position = (boxes[0]+boxes[2])/2, boxes[1]-10 |
|
|
draw.text(text_position, f'{cls} {lbl}', fill="red", font=font) |
|
|
|
|
|
return img |
|
|
|
|
|
font_size = 40 |
|
|
font = ImageFont.truetype("arial.ttf", size=font_size) |
|
|
arr = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] |
|
|
|
|
|
def lottiemaker(path): |
|
|
with open(path) as f: |
|
|
return json.load(f) |
|
|
|
|
|
def loadimg(path): |
|
|
img = Image.open(path) |
|
|
img = img.resize((150,150)) |
|
|
return img |
|
|
|
|
|
|
|
|
selected = option_menu( |
|
|
menu_title=None, |
|
|
options = ['About me','Tools and Experience','Projects','Contacts'], |
|
|
icons=['house','gear','book','phone'], |
|
|
default_index=0, |
|
|
orientation="horizontal" |
|
|
) |
|
|
|
|
|
if selected == 'About me': |
|
|
col1,col2 = st.columns(2) |
|
|
with col1: |
|
|
st_lottie(lottiemaker('Animation - 1704777936737.json'), speed=1, width=400, height=370) |
|
|
with col2: |
|
|
st.write('# Hi, I am Pallab Ghosh') |
|
|
st.header('Let me introduce myself') |
|
|
|
|
|
st.write('I am Pallab Ghosh from West Bengal, India') |
|
|
st.write('I am a Self Taught Machine Learning Engineer and Programmer') |
|
|
st.write("My passion include Machine Learning, Programming, Gaming, Football, Badminton, Maths, etc") |
|
|
st.write('I am currently in High school') |
|
|
st.write('I have been learning Machine learning since 2021 and have Made more than 50 projects in the field of Machine Learning') |
|
|
st.write('---') |
|
|
elif selected == 'Tools and Experience': |
|
|
st.header('Tools and Expertise') |
|
|
col1,col2 = st.columns(2) |
|
|
with col1: |
|
|
st.write('Below are the tools I use to train AI and the Machine Learning and Deep Learning models I have expertise in') |
|
|
with col2: |
|
|
st_lottie(lottiemaker('Tools.json'),speed=1,width=300,height=200) |
|
|
|
|
|
st.write('# Programming Languages') |
|
|
_,col1,col2,_ = st.columns(4) |
|
|
with col1: |
|
|
st.button('\n \t Python \t \n') |
|
|
with col2: |
|
|
st.button('\n \t MySQL \t \n') |
|
|
|
|
|
st.write('# Machine Learning Frameworks') |
|
|
col1,col2,col3,col4,col5 = st.columns(5) |
|
|
|
|
|
with col1: |
|
|
st.button('\n \t Tensorflow \t \n') |
|
|
with col2: |
|
|
st.button('\n \t Keras \t \n') |
|
|
with col3: |
|
|
st.button('\n \t Scikit-Learn \t \n') |
|
|
with col3: |
|
|
st.button('\n \t Mediapipe \t \n') |
|
|
with col3: |
|
|
st.button('\n \t CVzone \t \n') |
|
|
|
|
|
st.write('# Machine Learning Algorithim') |
|
|
col1,col2 = st.columns(2) |
|
|
with col1: |
|
|
st.button("\n \t Linear Regression \t \n") |
|
|
with col2: |
|
|
st.button('\n \t Logistic Regression \t \n') |
|
|
|
|
|
col3,col4 = st.columns(2) |
|
|
with col3: |
|
|
st.button('\n \t Naive Bayes \t \n') |
|
|
with col4: |
|
|
st.button('\n \t Decision Tree \t \n') |
|
|
|
|
|
col1,col2 = st.columns(2) |
|
|
|
|
|
with col1: |
|
|
st.button('\n \t Random Forest \t \n') |
|
|
with col2: |
|
|
st.button('\n \t K Nearest Neighbour \t \n') |
|
|
|
|
|
st.button('\n \t Support Vector Machine \t \n') |
|
|
|
|
|
st.write('# Deep Learning Architectures') |
|
|
col1,col2,col3,col4 = st.columns(4) |
|
|
with col1: |
|
|
st.button('Artificial Neural Network') |
|
|
with col2: |
|
|
st.button("Convolutional Neural Network") |
|
|
with col3: |
|
|
st.button("Deep Convolutional Genertive Adversarial Network") |
|
|
with col4: |
|
|
st.button("Cycle Consistent Generative Adversarial Network") |
|
|
|
|
|
col1,col2,col3,col4 = st.columns(4) |
|
|
with col1: |
|
|
st.button("Paired Image Translation using Pix2Pix") |
|
|
with col2: |
|
|
st.button("Object Detection using YOLO") |
|
|
with col3: |
|
|
st.button("Unet model for semantic segmentation") |
|
|
with col4: |
|
|
st.button("Segmentation using YOLO") |
|
|
|
|
|
col1,col2,col3,col4 = st.columns(4) |
|
|
with col1: |
|
|
st.button("\n \t StyleGan \t \n") |
|
|
with col2: |
|
|
st.button("Facebook Prophet model for timeceries prediction") |
|
|
with col3: |
|
|
st.button("Bidirectional Encoder Representations from Transformers /n (BERT)") |
|
|
with col4: |
|
|
st.button("Recurrent Neural Network(RNN) and Long Stort Term Memory(LSTM)") |
|
|
|
|
|
col2,col3,col4 = st.columns(3) |
|
|
|
|
|
with col2: |
|
|
st.button("Hand Landmarks detection") |
|
|
with col3: |
|
|
st.button("Body Landmarks detection") |
|
|
with col4: |
|
|
st.button("Face Landmarks detction") |
|
|
|
|
|
st.write('# Data Preprocessing and Visualisation') |
|
|
col1,col2,col3,col4 = st.columns(4) |
|
|
with col1: |
|
|
st.button('\n \t Numpy \t \n') |
|
|
with col2: |
|
|
st.button('\n \t Pandas \t \n') |
|
|
with col3: |
|
|
st.button('\n \t Pillow \t \n') |
|
|
with col4: |
|
|
st.button('\n \t Matplotlib \t \n') |
|
|
|
|
|
st.write('# Model Deployment') |
|
|
|
|
|
_,col1,_ = st.columns(3) |
|
|
|
|
|
with col1: |
|
|
st.button('\n \t Streamlit \t \n') |
|
|
|
|
|
st.write("---") |
|
|
|
|
|
elif selected == 'Projects': |
|
|
st.header('Projects') |
|
|
st.write('I have made more than 50 projects in fields including CNN, GAN, Machine Learning, Yolo object detection and segmentation, Pix2pix, Cyclegan and more') |
|
|
st.write('Below are some models(one from each category)') |
|
|
|
|
|
options = {'Cyclegan':1, 'DCGAN':2,'Unet':3,'Machine Learning Text classifier':4,'YOLO object detection':5} |
|
|
selected_option = st.selectbox('Select an option', options) |
|
|
|
|
|
if selected_option == 'Unet': |
|
|
st.header('Description') |
|
|
st.write('This is a UNET model that segments the waterbodies in the image') |
|
|
st.header('Architecture') |
|
|
st.write('The model is of unet architecture to preserve the spatial informations in the images after applying Conv2D') |
|
|
|
|
|
imgs = img_prep(['unet1.jpg','unet2.jpg']) |
|
|
col1,col2 = st.columns(2) |
|
|
with col1: |
|
|
st.image(imgs[0],use_column_width=False) |
|
|
with col2: |
|
|
st.image(imgs[1],use_column_width=False) |
|
|
|
|
|
img_options = {'img1':1, 'img2':2} |
|
|
img_selected_option = st.selectbox('Select an Image', img_options) |
|
|
if img_selected_option == 'img1': |
|
|
st.image(model_out(imgs[0],'Portfolio Projects/FloodAreaSegmentationUnetPix2Pix.h5'),use_column_width=False) |
|
|
if img_selected_option == 'img2': |
|
|
st.image(model_out(imgs[1],'Portfolio Projects/FloodAreaSegmentationUnetPix2Pix.h5'),use_column_width=False) |
|
|
|
|
|
if selected_option == 'YOLO object detection': |
|
|
st.header('Description') |
|
|
st.write('This is an YOLO mdoel which can detect different hand sign in an image') |
|
|
st.header('Architecture') |
|
|
st.write('The model uses YOLOv8 to do the detection') |
|
|
|
|
|
imgs = img_prep_YOLO(['YOLOB.jpg','YOLOC.jpg'],416) |
|
|
col1,col2 = st.columns(2) |
|
|
with col1: |
|
|
st.image(imgs[0],use_column_width=False) |
|
|
with col2: |
|
|
st.image(imgs[1],use_column_width=False) |
|
|
|
|
|
img_options = {'img1':1, 'img2':2} |
|
|
img_selected_option = st.selectbox('Select an Image', img_options) |
|
|
if img_selected_option == 'img1': |
|
|
st.image(yolo_out('Portfolio Projects/HandSignDetector.pt',imgs[0]),use_column_width=False) |
|
|
if img_selected_option == 'img2': |
|
|
st.image(yolo_out('Portfolio Projects/HandSignDetector.pt',imgs[1]),use_column_width=False) |
|
|
|
|
|
elif selected_option == 'Machine Learning Text classifier': |
|
|
|
|
|
st.header('Spam Detection using Naive Bayes Classifier') |
|
|
vectorizer = load('Portfolio Projects/tfidf_vectorizer.joblib') |
|
|
user_input = st.text_input("Enter some text:", "") |
|
|
if user_input is not None: |
|
|
x = vectorizer.transform([user_input]) |
|
|
model = load('Portfolio Projects/Naive_Bayes_Spam_Detection.joblib') |
|
|
pred = model.predict(x) |
|
|
if pred[0] == 1: |
|
|
st.write('The entered text is a Spam') |
|
|
elif pred[0] == 0: |
|
|
st.write('The entered text is not a Spam') |
|
|
else: |
|
|
st.write('Error, Try again') |
|
|
|
|
|
elif selected_option == 'Cyclegan': |
|
|
st.header('Description') |
|
|
st.write('This is a CycleGAN model that turns an input image into an Image of a Monet Painting') |
|
|
st.header('Architecture') |
|
|
st.write('The Generator model is the prebuild tensorflow pix2pix generator model') |
|
|
st.write('The Discriminator model is the discriminator model from the same module') |
|
|
|
|
|
imgs = img_prep(['cyclegan1.jpg','cyclegan2.jpg']) |
|
|
col1,col2 = st.columns(2) |
|
|
with col1: |
|
|
st.image(imgs[0],use_column_width=False) |
|
|
with col2: |
|
|
st.image(imgs[1],use_column_width=False) |
|
|
|
|
|
img_options = {'img1':1, 'img2':2} |
|
|
img_selected_option = st.selectbox('Select an Image', img_options) |
|
|
if img_selected_option == 'img1': |
|
|
st.image(model_out(imgs[0],'Portfolio Projects/photo2monet2.h5'),use_column_width=False) |
|
|
if img_selected_option == 'img2': |
|
|
st.image(model_out(imgs[1],'Portfolio Projects/photo2monet2.h5'),use_column_width=False) |
|
|
|
|
|
elif selected_option == 'DCGAN': |
|
|
|
|
|
st.header('Description') |
|
|
st.write('This is a DCGAN model that turns a random noise vector into an Image of a dog(that sometimes turns out disformed) using Convolutional2D Transpose layers') |
|
|
st.header('Architecture') |
|
|
st.write('The Generator model is just a set of Convolutional2D Transpose, BatchNormalization and Leaky relu') |
|
|
st.write('The Discriminator model is a very simple model with Convolutional2D, Dropout and Leaky Relu') |
|
|
model = tf.keras.models.load_model('Portfolio Projects/doggen3.h5') |
|
|
|
|
|
|
|
|
button_clicked = st.button("Generate") |
|
|
|
|
|
|
|
|
if button_clicked: |
|
|
|
|
|
seed = tf.random.normal((1, 100)) |
|
|
pred = model.predict(seed) |
|
|
pred = pred * 0.5 + 0.5 |
|
|
pred = np.squeeze(pred) |
|
|
st.image(pred,use_column_width=True) |
|
|
|
|
|
st.write('To see other projects, You can Visit my profile on HuggingFace') |
|
|
st.link_button('\n \t \t \t HuggingFace Account \t \t \r \n','https://huggingface.co/Beasto') |
|
|
elif selected == 'Contacts': |
|
|
st.header('Contact me') |
|
|
col1,col2,col3,col4 = st.columns(4) |
|
|
with col1: |
|
|
st.link_button('\n \t \t \t Instagram \t \t \r \n','https://www.instagram.com/i_suck_at_coding.256/') |
|
|
with col2: |
|
|
st.link_button('\n \t \t \t Github \t \t \r \n','https://github.com/Beastojenisto') |
|
|
with col3: |
|
|
st.link_button('\n \t \t \t Twitter/X \t \t \r \n','https://twitter.com/Isuckatcodinboi') |
|
|
with col4: |
|
|
st.link_button('\n \t \t \t HuggingFace \t \t \r \n','https://huggingface.co/Beasto') |