Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import tensorflow as tf | |
| import torch | |
| from PIL import Image | |
| import torchvision.transforms as transforms | |
| import numpy as np | |
| import torch.nn as nn | |
| from tensorflow.keras.utils import load_img, img_to_array | |
| from tensorflow.keras.preprocessing import image | |
| logo = Image.open("keyce.jpg") | |
| logo = logo.resize((400, 100)) | |
| st.sidebar.image(logo) | |
| Nom = st.sidebar.button('👦🏻 INFOS ETUDIANT') | |
| if Nom: | |
| st.sidebar.markdown('<h3 style="color: red;"> 👦🏻 YEWO FEUTCHOU HIROSHI (Master 2 - IABD)</h3>', unsafe_allow_html=True) | |
| m=['CHOISIR UN MODELE ICI 🔻','✅ CLASSIFICATION TENSORFLOW','✅ CLASSIFICATION PYTORCH'] | |
| i = st.sidebar.selectbox("Menu", m) | |
| if i== 'CHOISIR UN MODELE ICI 🔻': | |
| st.title('➤ CONTROLE CONTINU DE CNN ◀') | |
| st.title('Classif Tensorflow vs Pytorch') | |
| # Charger les images | |
| image1 = Image.open("tensorflow.png") # Remplacez par le chemin de votre image | |
| image2 = Image.open("pytorch.png") # Remplacez par le chemin de votre image | |
| # Définir la taille des images | |
| image1 = image1.resize((600, 400)) # Ajustez la taille selon vos besoins | |
| image2 = image2.resize((600, 400)) # Ajustez la taille selon vos besoins | |
| # Afficher les images côte à côte | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.image(image1, caption="TensorFlow", use_container_width=True) | |
| with col2: | |
| st.image(image2, caption="Pytorch", use_container_width=True) | |
| elif i == '✅ CLASSIFICATION TENSORFLOW': | |
| tensor = Image.open("tensorflow.png") | |
| tensor = tensor.resize((600, 100)) | |
| st.image(tensor) | |
| st.title("Classification avec TensorFlow") | |
| upload_file = st.sidebar.file_uploader('Choisissez une radiographie...',type=['jpg','jpeg','png']) | |
| generated_pred = st.sidebar.button('📈 PREDICTION') | |
| model = tf.keras.models.load_model('modelTensor.h5') | |
| classes_p = {'CYST': 0, | |
| 'NORMAL': 1, | |
| 'TUMOR': 2, | |
| 'STONE': 3} | |
| if upload_file: | |
| st.image(upload_file,caption='Image telechargee', use_container_width =True) | |
| test_image = image.load_img(upload_file,target_size=(64,64)) | |
| image_array = img_to_array(test_image) | |
| image_array = np.expand_dims(image_array,axis=0) | |
| else: | |
| st.markdown('<h3>⏳ Attente de la radiographie ... </h3>', unsafe_allow_html=True) | |
| if generated_pred: | |
| predictions = model.predict(image_array) | |
| classes = np.argmax(predictions[0]) | |
| for key,value in classes_p.items(): | |
| if value == classes: | |
| st.sidebar.title(f'Je peux dire avec certitude que cette radio est ➤ {key}') | |
| elif i=='✅ CLASSIFICATION PYTORCH': | |
| # Définition du modèle (Doit être identique à celui utilisé lors de l'entraînement) | |
| class MyModel(nn.Module): | |
| def __init__(self, num_classes=4): # Modifier selon le nombre de classes | |
| super(MyModel, self).__init__() | |
| self.conv_relu_stack = nn.Sequential( | |
| nn.Conv2d(in_channels=3,out_channels=16,kernel_size=5), # (250-5-0)/1 +1 = 246 in_channels=3 car photo couleur | |
| nn.ReLU(), | |
| nn.MaxPool2d(kernel_size=2,stride=2) , # (246-2)/2 +1 = 123 | |
| nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5), #(123-5-0)/1 +1 =119 | |
| nn.MaxPool2d(kernel_size=2,stride=2), #(119-2)/2 + 1 = 59.5 | |
| nn.Conv2d(in_channels=32,out_channels=64,kernel_size=5), #(59,5-5-0)/1 +1 = 55,5 | |
| nn.MaxPool2d(kernel_size=2,stride=2)) #(55,5-2)/2 +1 = 26,75 NOUS ALLONS PRENDRE 26 | |
| self.linear_relu_stack = nn.Sequential( | |
| nn.Linear(in_features=64*27*27, out_features=500), | |
| nn.ReLU(), | |
| nn.Linear(in_features=500, out_features=100), | |
| nn.Linear(in_features=100,out_features=4)) | |
| def forward(self, x): | |
| x = self.conv_relu_stack(x) | |
| x = torch.flatten(x,1) | |
| logits = self.linear_relu_stack(x) | |
| return logits | |
| # Fonction pour charger le modèle | |
| def load_model(): | |
| model = MyModel(4) # Modifier selon le nombre de classes | |
| model.load_state_dict(torch.load("modelpytoch.pth", map_location=torch.device("cpu"), weights_only=True)) | |
| model.eval() # Mode évaluation | |
| return model | |
| model = load_model() | |
| # Transformation des images | |
| transform = transforms.Compose([ | |
| transforms.Resize((250, 250)), # Modifier selon la taille utilisée à l'entraînement | |
| transforms.ToTensor(), | |
| transforms.Normalize((0.5),0.5) | |
| ]) | |
| py = Image.open("pytorch.png") | |
| py = py.resize((600, 100)) | |
| st.image(py) | |
| # Interface utilisateur Streamlit | |
| st.title("Classification avec PyTorch") | |
| uploaded_file = st.sidebar.file_uploader("Choisissez une radiographie...", type=["jpg", "png", "jpeg"]) | |
| generated_pred = st.sidebar.button('📈 PREDICTION') | |
| if uploaded_file: | |
| image = Image.open(uploaded_file).convert("RGB") | |
| st.image(image, caption="Image chargée", use_container_width=True) | |
| # Prétraitement de l'image | |
| img_tensor = transform(image).unsqueeze(0) # Ajout d'une dimension batch | |
| else: | |
| st.markdown('<h3>⏳Attente de la radiographie ... </h3>', unsafe_allow_html=True) | |
| if generated_pred: | |
| # Prédiction | |
| with torch.no_grad(): | |
| output = model(img_tensor) | |
| predicted_class = torch.argmax(output, dim=1).item() | |
| classes_p = {'CYST': 0,'NORMAL': 1,'TUMOR': 2,'STONE': 3} | |
| for key, value in classes_p.items(): | |
| if value == predicted_class: | |
| st.sidebar.title(f'Je peux dire avec certitude que cette radio est ➤ {key}') |