Spaces:
Sleeping
Sleeping
import streamlit code from streamlit branch
Browse files- .gitattributes +6 -0
- Dockerfile +4 -2
- README.md +30 -0
- architecture.png +3 -0
- requirements.txt +4 -1
- streamlit/Home.py +51 -0
- streamlit/pages/Predictions.py +157 -0
- streamlit/pages/Sources.py +36 -0
.gitattributes
CHANGED
|
@@ -1 +1,7 @@
|
|
|
|
|
| 1 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 2 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
architecture.png filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
data/2024_semester2_merged_v2.parquet filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
data/meteo_cleaned_pivoted.parquet filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
data/pollutants_cleaned_pivoted.parquet filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
data/traffic_cleaned_pivoted.parquet filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
CHANGED
|
@@ -12,7 +12,9 @@ RUN pip install --no-cache-dir --upgrade pip \
|
|
| 12 |
&& pip install --no-cache-dir -r requirements.txt
|
| 13 |
|
| 14 |
# Make a volume mount point for the input/output CSV files
|
| 15 |
-
VOLUME ["/app/input_data.csv", "/app/output_data.csv"]
|
|
|
|
|
|
|
| 16 |
|
| 17 |
# Run the application (by default, run the main ETL process)
|
| 18 |
-
CMD ["
|
|
|
|
| 12 |
&& pip install --no-cache-dir -r requirements.txt
|
| 13 |
|
| 14 |
# Make a volume mount point for the input/output CSV files
|
| 15 |
+
# VOLUME ["/app/input_data.csv", "/app/output_data.csv"]
|
| 16 |
+
|
| 17 |
+
EXPOSE 7860
|
| 18 |
|
| 19 |
# Run the application (by default, run the main ETL process)
|
| 20 |
+
CMD ["streamlit", "run", "streamlit/Home.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
README.md
CHANGED
|
@@ -1,3 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# Guide de Configuration Jenkins et Pipeline CI/CD
|
| 2 |
|
| 3 |
## 📌 Introduction
|
|
@@ -14,6 +25,9 @@ Ce projet implémente un pipeline CI/CD dans Jenkins pour exécuter un processus
|
|
| 14 |
├── jenkins/
|
| 15 |
│ └── Jenkinsfile # Pipeline Jenkins pour CI/CD
|
| 16 |
│
|
|
|
|
|
|
|
|
|
|
| 17 |
├── tests/
|
| 18 |
│ ├── Dockerfile # Dockerfile pour lancer les tests
|
| 19 |
│ ├── requirements.txt # Dépendances spécifiques aux tests
|
|
@@ -83,3 +97,19 @@ Ou ajoutez-les directement dans Jenkins :
|
|
| 83 |
Ce pipeline CI/CD garantit l'intégration et le déploiement automatisé du processus ETL en utilisant Jenkins et Docker.
|
| 84 |
|
| 85 |
🔥 N'hésitez pas à adapter les configurations en fonction de votre environnement !
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Quality Air Streamlit App
|
| 3 |
+
emoji: 💨
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
python_version: "3.9"
|
| 8 |
+
app_file: streamlit/Home.py
|
| 9 |
+
pinned: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
# Guide de Configuration Jenkins et Pipeline CI/CD
|
| 13 |
|
| 14 |
## 📌 Introduction
|
|
|
|
| 25 |
├── jenkins/
|
| 26 |
│ └── Jenkinsfile # Pipeline Jenkins pour CI/CD
|
| 27 |
│
|
| 28 |
+
├── streamlit/
|
| 29 |
+
│ └── (streamlit files) # Each file corresponds to a panel in the streamlit app
|
| 30 |
+
│
|
| 31 |
├── tests/
|
| 32 |
│ ├── Dockerfile # Dockerfile pour lancer les tests
|
| 33 |
│ ├── requirements.txt # Dépendances spécifiques aux tests
|
|
|
|
| 97 |
Ce pipeline CI/CD garantit l'intégration et le déploiement automatisé du processus ETL en utilisant Jenkins et Docker.
|
| 98 |
|
| 99 |
🔥 N'hésitez pas à adapter les configurations en fonction de votre environnement !
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
## 🏗️ A word on streamlit
|
| 103 |
+
|
| 104 |
+
To get the streamlit app to function on HuggingFace, we need to add the HF repo as an remote repository:
|
| 105 |
+
```
|
| 106 |
+
git remote set-url --add origin https://huggingface.co/spaces/martper56/streamlit_app
|
| 107 |
+
````
|
| 108 |
+
|
| 109 |
+
Then we need to push the code in the development branch as the main branch on streamlit:
|
| 110 |
+
|
| 111 |
+
```
|
| 112 |
+
git push -f https://huggingface.co/spaces/martper56/streamlit_app development:main
|
| 113 |
+
```
|
| 114 |
+
|
| 115 |
+
|
architecture.png
ADDED
|
Git LFS Details
|
requirements.txt
CHANGED
|
@@ -5,4 +5,7 @@ apache-airflow-providers-postgres
|
|
| 5 |
apache-airflow-providers-amazon
|
| 6 |
scikit-learn
|
| 7 |
psycopg[binary]
|
| 8 |
-
python-dotenv
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
apache-airflow-providers-amazon
|
| 6 |
scikit-learn
|
| 7 |
psycopg[binary]
|
| 8 |
+
python-dotenv
|
| 9 |
+
streamlit
|
| 10 |
+
matplotlib
|
| 11 |
+
boto3
|
streamlit/Home.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
##################################################
|
| 4 |
+
# Config default settings of the page.
|
| 5 |
+
##################################################
|
| 6 |
+
st.set_page_config(page_title="Jedha dsl-ft-32 Final Project", layout="wide", )
|
| 7 |
+
|
| 8 |
+
##################################################
|
| 9 |
+
# App
|
| 10 |
+
##################################################
|
| 11 |
+
st.title("Welcome to our final project at Jedha Bootcamp")
|
| 12 |
+
st.markdown("Permettez-nous de nous presenter !")
|
| 13 |
+
st.markdown("")
|
| 14 |
+
st.markdown(
|
| 15 |
+
"Pooven CALINGHEE - [LinkedIn](https://www.linkedin.com/in/pooven-calinghee-87636479/) - 10+ annees dans le developpement web avec specialisation AWS et cloud")
|
| 16 |
+
st.markdown(
|
| 17 |
+
" Martin PERON - [LinkedIN](https://www.linkedin.com/in/martin-péron-b3b9725b/) - 10+ annees dans la data. Data scientist puis ML engineer")
|
| 18 |
+
st.markdown(
|
| 19 |
+
"Adrien LEQUILLER - [LinkedIn](https://www.linkedin.com/in/adrien-lequiller-4a9b6a81/) - 8+ annees en tant que Data analyst")
|
| 20 |
+
st.markdown(
|
| 21 |
+
"Alex LASNIER - [LinkedIn](https://www.linkedin.com/in/alex-lasnier) - 10+ annees dans l'industrie dont 3 dans le domaine de la data")
|
| 22 |
+
st.markdown("")
|
| 23 |
+
|
| 24 |
+
st.header("Prevision de la qualite de l'air")
|
| 25 |
+
st.markdown(
|
| 26 |
+
"Le but du projet final est de développer d’une application de Machine Learning embarquée permettant d’anticiper les risques de pollution sur l’ensemble du territoire français, à partir des données météorologiques et du trafic routier")
|
| 27 |
+
|
| 28 |
+
st.markdown("")
|
| 29 |
+
st.markdown("")
|
| 30 |
+
st.markdown("")
|
| 31 |
+
|
| 32 |
+
# left_co, cent_co, last_co = st.columns(3)
|
| 33 |
+
# with cent_co:
|
| 34 |
+
st.image("architecture.png")
|
| 35 |
+
st.markdown("")
|
| 36 |
+
st.markdown("")
|
| 37 |
+
st.markdown("")
|
| 38 |
+
|
| 39 |
+
st.markdown(
|
| 40 |
+
"Apres un premier nettoyage des donnees et le merge en un seul fichier parquet, premiers processing pour entrainer le(s) modele(s) de ML. \n\n"
|
| 41 |
+
"Selection des features pour les predictions ==> température, humidité, vent, pression cote meteo et niveau de traffic cote traffic\n\n"
|
| 42 |
+
"Les outputs seront donc la prediction des niveaux des polluants ==> NOx, O3, PM10 et PM25\n\n"
|
| 43 |
+
"Selection de l'API pour faire du temps reel ==> Rennes")
|
| 44 |
+
|
| 45 |
+
st.markdown("")
|
| 46 |
+
st.markdown("")
|
| 47 |
+
st.markdown("")
|
| 48 |
+
st.markdown("Main page = presentation equipe et projet \n\n"
|
| 49 |
+
"Ajout de la prediction sur main page ou sur une autre page??\n\n"
|
| 50 |
+
"Une page pour montrer les sources apres nettoyage ==> OK\n\n"
|
| 51 |
+
"")
|
streamlit/pages/Predictions.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import boto3
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import json
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
|
| 7 |
+
# Titre de la page
|
| 8 |
+
st.title("Predictions - ville de Rennes")
|
| 9 |
+
|
| 10 |
+
# Paramètres S3 (à adapter ou utiliser variables d'environnement pour plus de sécurité)
|
| 11 |
+
AWS_ACCESS_KEY_ID = "AKIAQJXL2QR4KZ2RZYW4"
|
| 12 |
+
AWS_SECRET_ACCESS_KEY = "ampR+ExwhPTC3bV7oD3y6usUGe5Bj2IVYkKW9UAZ"
|
| 13 |
+
BUCKET_NAME = "jedha-quality-air"
|
| 14 |
+
PREFIX = "datasets/output/"
|
| 15 |
+
|
| 16 |
+
# FILE_KEY = "datasets/output/20250711-064810_prediction_data.json"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# @st.cache_data(show_spinner=True)
|
| 20 |
+
def load_data_from_s3():
|
| 21 |
+
s3 = boto3.client(
|
| 22 |
+
"s3",
|
| 23 |
+
aws_access_key_id=AWS_ACCESS_KEY_ID,
|
| 24 |
+
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
|
| 25 |
+
)
|
| 26 |
+
response = s3.list_objects_v2(Bucket=BUCKET_NAME, Prefix=PREFIX)
|
| 27 |
+
|
| 28 |
+
# Vérifie s'il y a des objets
|
| 29 |
+
if "Contents" in response:
|
| 30 |
+
# Trie les objets par date de dernière modification (LastModified)
|
| 31 |
+
latest_file = max(response["Contents"], key=lambda x: x["LastModified"])
|
| 32 |
+
latest_key = latest_file["Key"]
|
| 33 |
+
|
| 34 |
+
# Récupère l'objet
|
| 35 |
+
latest_object = s3.get_object(Bucket=BUCKET_NAME, Key=latest_key)
|
| 36 |
+
content = latest_object["Body"].read().decode("utf-8")
|
| 37 |
+
print("Latest file key:", latest_key)
|
| 38 |
+
else:
|
| 39 |
+
print("Aucun fichier trouvé dans ce dossier.")
|
| 40 |
+
# response = s3.get_object(Bucket=BUCKET_NAME, Key=FILE_KEY)
|
| 41 |
+
# content = response["Body"].read().decode("utf-8")
|
| 42 |
+
data = json.loads(content)
|
| 43 |
+
|
| 44 |
+
# Extraction date et heure du nom fichier
|
| 45 |
+
filename = latest_key.split("/")[-1]
|
| 46 |
+
datetime_str = filename.split("_")[0] # "20250711-064810"
|
| 47 |
+
date_str = datetime_str[:8]
|
| 48 |
+
time_str = datetime_str[9:]
|
| 49 |
+
formatted_date = pd.to_datetime(date_str, format="%Y%m%d").date()
|
| 50 |
+
formatted_time = pd.to_datetime(time_str, format="%H%M%S").time()
|
| 51 |
+
|
| 52 |
+
df = pd.DataFrame([data])
|
| 53 |
+
df["date"] = formatted_date
|
| 54 |
+
df["heure UTC"] = formatted_time
|
| 55 |
+
return df, formatted_date, formatted_time
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Chargement des données
|
| 59 |
+
df, formatted_date, formatted_time = load_data_from_s3()
|
| 60 |
+
pollution_seuils = {
|
| 61 |
+
"PM25": [
|
| 62 |
+
(0, 10, "#4ee3dc", "Bon"),
|
| 63 |
+
(10, 20, "#53c8b5", "Moyen"),
|
| 64 |
+
(20, 25, "#f3dd57", "Dégradé"),
|
| 65 |
+
(25, 50, "#f47d61", "Mauvais"),
|
| 66 |
+
(50, 75, "#b22133", "Très mauvais"),
|
| 67 |
+
(75, float("inf"), "#7d2e8e", "Extrêmement mauvais"),
|
| 68 |
+
],
|
| 69 |
+
"PM10": [
|
| 70 |
+
(0, 20, "#4ee3dc", "Bon"),
|
| 71 |
+
(20, 40, "#53c8b5", "Moyen"),
|
| 72 |
+
(40, 50, "#f3dd57", "Dégradé"),
|
| 73 |
+
(50, 100, "#f47d61", "Mauvais"),
|
| 74 |
+
(100, 150, "#b22133", "Très mauvais"),
|
| 75 |
+
(150, float("inf"), "#7d2e8e", "Extrêmement mauvais"),
|
| 76 |
+
],
|
| 77 |
+
"NOX": [
|
| 78 |
+
(0, 40, "#4ee3dc", "Bon"),
|
| 79 |
+
(40, 90, "#53c8b5", "Moyen"),
|
| 80 |
+
(90, 120, "#f3dd57", "Dégradé"),
|
| 81 |
+
(120, 230, "#f47d61", "Mauvais"),
|
| 82 |
+
(230, 340, "#b22133", "Très mauvais"),
|
| 83 |
+
(340, float("inf"), "#7d2e8e", "Extrêmement mauvais"),
|
| 84 |
+
],
|
| 85 |
+
"O3": [
|
| 86 |
+
(0, 50, "#4ee3dc", "Bon"),
|
| 87 |
+
(50, 100, "#53c8b5", "Moyen"),
|
| 88 |
+
(100, 130, "#f3dd57", "Dégradé"),
|
| 89 |
+
(130, 240, "#f47d61", "Mauvais"),
|
| 90 |
+
(240, 380, "#b22133", "Très mauvais"),
|
| 91 |
+
(380, float("inf"), "#7d2e8e", "Extrêmement mauvais"),
|
| 92 |
+
],
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
# Fonction pour récupérer la couleur selon les seuils
|
| 96 |
+
def get_pollution_color(polluant, valeur):
|
| 97 |
+
seuils = pollution_seuils.get(polluant)
|
| 98 |
+
for bas, haut, couleur, libelle in seuils:
|
| 99 |
+
if bas <= valeur < haut:
|
| 100 |
+
return couleur, libelle
|
| 101 |
+
return "#cccccc" # fallback gris
|
| 102 |
+
# Affichage tableau
|
| 103 |
+
st.subheader("Données de prédiction")
|
| 104 |
+
st.dataframe(df)
|
| 105 |
+
st.markdown("")
|
| 106 |
+
st.markdown("")
|
| 107 |
+
st.markdown("")
|
| 108 |
+
|
| 109 |
+
# Création du plot
|
| 110 |
+
polluants = ["O3", "NOX", "PM10", "PM25"]
|
| 111 |
+
valeurs = [df[col].iloc[0] for col in polluants]
|
| 112 |
+
|
| 113 |
+
couleurs = ["#8da0cb", "#fc8d62", "#66c2a5", "#a6d854"] # tons pastel et mats
|
| 114 |
+
|
| 115 |
+
fig, ax = plt.subplots(figsize=(6, 3)) # taille plus compacte
|
| 116 |
+
bars = ax.bar(polluants, valeurs, color=couleurs)
|
| 117 |
+
|
| 118 |
+
# Titre avec date et heure
|
| 119 |
+
titre = f"Pollution le {formatted_date} à {formatted_time.strftime('%H:%M:%S')}"
|
| 120 |
+
ax.set_title(titre, fontsize=14)
|
| 121 |
+
ax.set_xlabel("Polluants", fontsize=12)
|
| 122 |
+
ax.set_ylabel("Valeurs", fontsize=12)
|
| 123 |
+
|
| 124 |
+
# Valeurs sur barres
|
| 125 |
+
max_val = max(valeurs)
|
| 126 |
+
for bar, val in zip(bars, valeurs):
|
| 127 |
+
height = bar.get_height()
|
| 128 |
+
ax.text(
|
| 129 |
+
bar.get_x() + bar.get_width() / 2,
|
| 130 |
+
height + max_val * 0.02,
|
| 131 |
+
f"{val:.1f}",
|
| 132 |
+
ha="center",
|
| 133 |
+
fontsize=8,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Centrage avec colonnes Streamlit
|
| 137 |
+
left_co, cent_co, right_co = st.columns([1, 2, 1])
|
| 138 |
+
|
| 139 |
+
with cent_co:
|
| 140 |
+
st.pyplot(fig)
|
| 141 |
+
|
| 142 |
+
st.subheader("Qualité de l’air")
|
| 143 |
+
|
| 144 |
+
for i, polluant in enumerate(polluants):
|
| 145 |
+
val = valeurs[i]
|
| 146 |
+
color, libelle = get_pollution_color(polluant, val)
|
| 147 |
+
|
| 148 |
+
st.markdown(
|
| 149 |
+
f"""
|
| 150 |
+
<div style='background-color:{color};padding:10px 15px;
|
| 151 |
+
margin:6px 0;border-radius:10px;
|
| 152 |
+
color:black;font-weight:bold;font-size:16px;'>
|
| 153 |
+
{polluant} : {val:.1f} µg/m³ - {libelle}
|
| 154 |
+
</div>
|
| 155 |
+
""",
|
| 156 |
+
unsafe_allow_html=True
|
| 157 |
+
)
|
streamlit/pages/Sources.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import pandas as pd
|
| 4 |
+
|
| 5 |
+
# Titre de l'application
|
| 6 |
+
st.title("Fichiers sources d'entrainement (apres premier nettoyage)")
|
| 7 |
+
|
| 8 |
+
##################################################
|
| 9 |
+
st.subheader("Pollutants")
|
| 10 |
+
##################################################
|
| 11 |
+
# Chemin du fichier
|
| 12 |
+
file_path_pollutants = os.path.join("data", "pollutants_cleaned_pivoted.parquet")
|
| 13 |
+
# lire dans un DataFrame
|
| 14 |
+
data_pollutants = pd.read_parquet(file_path_pollutants)
|
| 15 |
+
# Afficher le DataFrame
|
| 16 |
+
st.write(data_pollutants)
|
| 17 |
+
|
| 18 |
+
##################################################
|
| 19 |
+
st.subheader("Meteo")
|
| 20 |
+
##################################################
|
| 21 |
+
# Chemin du fichier
|
| 22 |
+
file_path_meteo = os.path.join("data", "meteo_cleaned_pivoted.parquet")
|
| 23 |
+
# lire dans un DataFrame
|
| 24 |
+
data_meteo = pd.read_parquet(file_path_meteo)
|
| 25 |
+
# Afficher le DataFrame
|
| 26 |
+
st.write(data_meteo)
|
| 27 |
+
|
| 28 |
+
##################################################
|
| 29 |
+
st.subheader("Traffic")
|
| 30 |
+
##################################################
|
| 31 |
+
# Chemin du fichier
|
| 32 |
+
file_path_traffic = os.path.join("data", "traffic_cleaned_pivoted.parquet")
|
| 33 |
+
# lire dans un DataFrame
|
| 34 |
+
data_traffic = pd.read_parquet(file_path_traffic)
|
| 35 |
+
# Afficher le DataFrame
|
| 36 |
+
st.write(data_traffic)
|