cd@bziiit.com commited on
Commit
a594a20
·
1 Parent(s): 9d1c006

Include fingerprint carbon

Browse files
.gitignore CHANGED
@@ -1,4 +1,6 @@
1
  /__pycache__/*
 
2
  /.qodo/*
3
 
4
- .env
 
 
1
  /__pycache__/*
2
+ */__pycache__/*
3
  /.qodo/*
4
 
5
+ .env
6
+ emissions.csv
app.py CHANGED
@@ -9,6 +9,8 @@ from langchain_core.messages import AIMessageChunk
9
  from typing import Literal
10
  import re
11
 
 
 
12
 
13
  # Charger les variables d'environnement
14
  load_dotenv()
@@ -54,6 +56,8 @@ def process_query(query, architecture: Literal["A", "B", "C"]):
54
  st.markdown(query)
55
 
56
  full_response = ""
 
 
57
 
58
  events = agent.stream(initial_state, config=config, stream_mode="messages")
59
 
@@ -64,17 +68,44 @@ def process_query(query, architecture: Literal["A", "B", "C"]):
64
 
65
  for event in events:
66
  for message in event:
67
- if isinstance(message, AIMessageChunk) and hasattr(message, 'content'):
68
- full_response += message.content # Accumuler les morceaux
69
- # 🛑 Vérification si la réponse contient une requête Cypher
70
- if re.search(r"(?i)(MATCH|CREATE|MERGE|DELETE|CALL)[\s\S]+;", full_response):
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- full_response = full_response = re.sub(r"(?i)(MATCH|CREATE|MERGE|DELETE|CALL)[\s\S]+?;", "", full_response).strip()
73
 
74
 
75
  # 🔄 Affichage final
76
- response_placeholder.markdown(full_response)
77
  st.session_state.chat_history.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
 
80
 
@@ -84,11 +115,35 @@ def display_sidebar():
84
  lien_ressource = "https://www.fnac.com/livre-numerique/a21290809/Gaspard-Boreal-La-Confession-muette"
85
  #st.title("📄 La confession muette")
86
  #st.write("Posez vos questions sur le document.")
87
- st.image(agent_B.get_graph().draw_mermaid_png(), caption="Workflow Graph")
88
  st.markdown("Document de référence 📄 : \nLa confession muette (2025)")
89
  st.markdown("Avec l'aimable autorisation de Gaspard Boréal: \n[Récit d'origine]({})".format(lien_ressource))
90
 
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  def display_chat_history():
93
  """Affiche l'historique de chat."""
94
  for message in st.session_state.chat_history:
@@ -100,11 +155,21 @@ def display_chat_history():
100
  st.markdown(message["content"])
101
 
102
  def main():
103
- """Point d'entrée de l'application."""
 
 
 
 
 
 
 
 
 
104
  # Vérifier que les index Pinecone sont prêts
105
  if not check_indexes_ready():
106
  return
107
 
 
108
  st.title("RAG architectures")
109
  st.markdown(
110
  """
@@ -124,6 +189,9 @@ def main():
124
  )
125
 
126
  display_sidebar()
 
 
 
127
 
128
  query = st.chat_input("Posez votre question ici:")
129
  if query:
@@ -136,5 +204,27 @@ def main():
136
 
137
  process_query(query, architecture)
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  if __name__ == "__main__":
140
  main()
 
9
  from typing import Literal
10
  import re
11
 
12
+ from carbon.empreinte_carbone import initCarbon, traceImpact, display_cf_comparison
13
+
14
 
15
  # Charger les variables d'environnement
16
  load_dotenv()
 
56
  st.markdown(query)
57
 
58
  full_response = ""
59
+ usages = None
60
+ start_time = time.time() # Start timing
61
 
62
  events = agent.stream(initial_state, config=config, stream_mode="messages")
63
 
 
68
 
69
  for event in events:
70
  for message in event:
71
+ if isinstance(message, AIMessageChunk):
72
+ # Vérifiez le type de message et affichez son contenu
73
+ if hasattr(message, 'content'):
74
+ full_response += message.content
75
+ response_placeholder.markdown(full_response)
76
+
77
+ if hasattr(message, 'usage_metadata'):
78
+ usages = message.usage_metadata
79
+
80
+ latency = time.time() - start_time # Calculate elapsed time
81
+
82
+ # if isinstance(message, AIMessageChunk) and hasattr(message, 'content'):
83
+ # full_response += message.content # Accumuler les morceaux
84
+ # # 🛑 Vérification si la réponse contient une requête Cypher
85
+ # if re.search(r"(?i)(MATCH|CREATE|MERGE|DELETE|CALL)[\s\S]+;", full_response):
86
 
87
+ # full_response = full_response = re.sub(r"(?i)(MATCH|CREATE|MERGE|DELETE|CALL)[\s\S]+?;", "", full_response).strip()
88
 
89
 
90
  # 🔄 Affichage final
91
+ # response_placeholder.markdown(full_response)
92
  st.session_state.chat_history.append({"role": "assistant", "content": full_response})
93
+
94
+ traceImpact(
95
+ st.session_state.current_model,
96
+ trace={
97
+ 'completion_tokens' : usages.get('output_tokens', None),
98
+ 'latency' : latency # Use the measured latency
99
+ }
100
+ )
101
+
102
+ # Mise à jour des tokens + coût
103
+ st.session_state['tokens_metrics']['input_tokens'] += usages.get('input_tokens', 0)
104
+ st.session_state['tokens_metrics']['output_tokens'] += usages.get('output_tokens', 0)
105
+ st.session_state['tokens_metrics']['total_tokens'] += usages.get('total_tokens', 0)
106
+ calculate_tokens_cost()
107
+
108
+ st.rerun()
109
 
110
 
111
 
 
115
  lien_ressource = "https://www.fnac.com/livre-numerique/a21290809/Gaspard-Boreal-La-Confession-muette"
116
  #st.title("📄 La confession muette")
117
  #st.write("Posez vos questions sur le document.")
 
118
  st.markdown("Document de référence 📄 : \nLa confession muette (2025)")
119
  st.markdown("Avec l'aimable autorisation de Gaspard Boréal: \n[Récit d'origine]({})".format(lien_ressource))
120
 
121
 
122
+ # Token metrics containers
123
+ st.sidebar.markdown("### Tokens")
124
+
125
+ # HTML pour le tableau sans index ni colonnes inutiles
126
+ table_html = f"""
127
+ <table style="width:100%">
128
+ <tr><td>Input Tokens</td><td style="text-align: right;"><b>{st.session_state['tokens_metrics'].get('input_tokens', 0)}</b></td></tr>
129
+ <tr><td>Output Tokens</td><td style="text-align: right;"><b>{st.session_state['tokens_metrics'].get('output_tokens', 0)}</b></td></tr>
130
+ <tr><td>Total Tokens</td><td style="text-align: right;"><b>{st.session_state['tokens_metrics'].get('total_tokens', 0)}</b></td></tr>
131
+ <tr><td>Coût</td><td style="text-align: right;"><b>{st.session_state['tokens_cost']} €</b></td></tr>
132
+ </table>
133
+ """
134
+
135
+ st.sidebar.markdown(table_html, unsafe_allow_html=True)
136
+
137
+ st.sidebar.markdown("### Empreinte Carbone")
138
+
139
+ display_cf_comparison(st.sidebar)
140
+
141
+ st.sidebar.markdown("---")
142
+ st.sidebar.image("./assets/bziiit.png", width=100)
143
+ st.sidebar.markdown("2025 : Open source en Licence MIT")
144
+ st.sidebar.markdown("info@bziiit.com")
145
+
146
+
147
  def display_chat_history():
148
  """Affiche l'historique de chat."""
149
  for message in st.session_state.chat_history:
 
155
  st.markdown(message["content"])
156
 
157
  def main():
158
+
159
+ initCarbon()
160
+
161
+ st.session_state['current_model'] = "mistral-large-latest"
162
+
163
+ if "tokens_metrics" not in st.session_state or "chat_history" not in st.session_state :
164
+ initialize_conversation()
165
+
166
+
167
+ # """Point d'entrée de l'application."""
168
  # Vérifier que les index Pinecone sont prêts
169
  if not check_indexes_ready():
170
  return
171
 
172
+
173
  st.title("RAG architectures")
174
  st.markdown(
175
  """
 
189
  )
190
 
191
  display_sidebar()
192
+
193
+ if(st.session_state.chat_history):
194
+ display_chat_history()
195
 
196
  query = st.chat_input("Posez votre question ici:")
197
  if query:
 
204
 
205
  process_query(query, architecture)
206
 
207
+
208
+ def calculate_tokens_cost():
209
+ input_tokens = st.session_state['tokens_metrics'].get('input_tokens', 0)
210
+ output_tokens = st.session_state['tokens_metrics'].get('output_tokens', 0)
211
+ cost_input = (input_tokens / 1_000_000) * 1.80
212
+ cost_output = (output_tokens / 1_000_000) * 5.40
213
+ total_cost = round(cost_input + cost_output, 3)
214
+ st.session_state['tokens_cost'] = total_cost
215
+
216
+ def initialize_conversation():
217
+ # st.session_state["messages"] = []
218
+ st.session_state["chat_history"] = []
219
+ st.session_state["tokens_cost"] = 0
220
+ st.session_state['tokens_metrics'] = {
221
+ 'input_tokens': 0,
222
+ 'output_tokens': 0,
223
+ 'total_tokens': 0
224
+ }
225
+ calculate_tokens_cost()
226
+
227
+
228
+
229
  if __name__ == "__main__":
230
  main()
assets/bziiit.png ADDED
carbon/__init__.py ADDED
File without changes
carbon/comparateur.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
+ import pandas as pd
4
+
5
+ dict_comparaison_1kgCO2 = {
6
+ "eau en litre":[374/100,"https://impactco2.fr/icons/eauenbouteille.svg"],
7
+ "smartphones": [1.16/100,"https://impactco2.fr/icons/smartphone.svg"],
8
+ "voiture en km": [460/100,"https://impactco2.fr/icons/voiturethermique.svg"],
9
+ "tgv en km":[34130/100,"https://impactco2.fr/icons/tgv.svg"],
10
+ }
11
+
12
+ def compare(value_init, ratio_equivalent):
13
+ return ratio_equivalent * value_init if ratio_equivalent else None
14
+
15
+ def display_comparaison(container,value_init, ratio_equivalent,icon, unit):
16
+ #value_init must be in kgCO2
17
+ link_url = f"https://impactco2.fr/outils/comparateur?value={value_init}&comparisons=tgv,eauenbouteille,voiturethermique"
18
+ container.markdown(f"<div style='text-align: center;'><a href='{link_url}' target='_blank'><img src='{icon}' alt='{unit}' width='50'></div>", unsafe_allow_html=True)
19
+ container.markdown(f"<div style='text-align: center;'><b>{compare(value_init, ratio_equivalent):.2f}</b> {unit}</div>", unsafe_allow_html=True)
20
+
21
+ def custom_cumsum(ecologits_series, codecarbon_series):
22
+ cumsum = 0
23
+ result = []
24
+ for eco, code in zip(ecologits_series, codecarbon_series):
25
+ if code == 0:
26
+ result.append(0)
27
+ else:
28
+ cumsum += eco
29
+ result.append(cumsum)
30
+ return pd.Series(result, index=ecologits_series.index)
31
+
32
+
33
+ def get_table_empreintes_detailed() -> pd.DataFrame:
34
+ emissions_data = st.session_state["partial_emissions"]
35
+ emissions_df = pd.DataFrame(emissions_data).T # Transpose to match the desired format
36
+
37
+ # Rename columns and index values
38
+ emissions_df = emissions_df.rename(columns={"cc": "CodeCarbon (Cumulative)", "el": "EcoLogits (par requete)"})
39
+ emissions_df = emissions_df.applymap(lambda x: x * 1000)
40
+ emissions_df = emissions_df.round(2)
41
+ emissions_df = emissions_df.rename(index={
42
+ "Scrapping": "Collecte de documents et scrapping",
43
+ "extraction_pp": "Extraction des parties prenantes (serveur et IA)",
44
+ "cartographie": "Sauvegarde de la cartographie",
45
+ "chatbot": "Dialogue avec chatBot IA",
46
+ "download_rapport": "Téléchargement cartographie"
47
+ })
48
+
49
+ # Make Ecologits column cumulative
50
+ emissions_df["EcoLogits (Cumulative)"] = custom_cumsum(emissions_df["EcoLogits (par requete)"], emissions_df["CodeCarbon (Cumulative)"])
51
+
52
+ emissions_df['Consommation Totale'] = emissions_df["CodeCarbon (Cumulative)"] + emissions_df["EcoLogits (Cumulative)"]
53
+ emissions_df = emissions_df.round(2)
54
+ return emissions_df
carbon/empreinte_carbone.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import pandas as pd
3
+ import altair as alt
4
+
5
+ import streamlit as st
6
+ from codecarbon import EmissionsTracker
7
+ from ecologits.tracers.utils import compute_llm_impacts
8
+
9
+ from .comparateur import get_table_empreintes_detailed, display_comparaison, dict_comparaison_1kgCO2
10
+
11
+
12
+ def initCarbon():
13
+
14
+ if "emission" not in st.session_state:
15
+ tracker = EmissionsTracker()
16
+ tracker.start()
17
+ st.session_state["emission"] = tracker
18
+
19
+ if "partial_emissions" not in st.session_state:
20
+ st.session_state["partial_emissions"] = {
21
+ "chatbot": {"cc": 0, "el": 0} #cc for codecarbon , el for ecologits
22
+ }
23
+
24
+ def traceImpact(model_name, trace):
25
+
26
+ print(trace)
27
+ impact = compute_llm_impacts(
28
+ provider="mistralai",
29
+ model_name=model_name,
30
+ output_token_count=trace['completion_tokens'],
31
+ request_latency=trace['latency'],
32
+ )
33
+
34
+ if impact.gwp and impact.gwp.value:
35
+ st.session_state["partial_emissions"]["chatbot"]["el"] += impact.gwp.value
36
+
37
+ # Function to read and encode an SVG file to Base64
38
+ def load_svg_as_base64(file_path):
39
+ with open(file_path, "rb") as f:
40
+ svg_data = f.read()
41
+ return base64.b64encode(svg_data).decode()
42
+
43
+ def color_scale(val):
44
+ if val == '-':
45
+ return 'background-color: {color}'
46
+ elif val <= 1:
47
+ color = '#008571' #'rgba(0,238,0,0.5)' # green with opacity
48
+ elif val <= 10:
49
+ color = '#83c2b8' # light green with opacity
50
+ elif val <= 50:
51
+ color = '#efcd82' # light yellow with opacity
52
+ elif val <= 100:
53
+ color = '#f2aa56' # light orange with opacity
54
+ else:
55
+ color = '#e87a58' # light red with opacity
56
+ return f'background-color: {color};color:white'
57
+
58
+ def display_cf_comparison(stm: st):
59
+ # svg_file_path = "feuille.svg"
60
+ # svg_base64 = load_svg_as_base64(svg_file_path)
61
+ # stm.markdown(
62
+ # f"""
63
+ # **Votre consommation carbone**
64
+ # <img src='data:image/svg+xml;base64,{svg_base64}' alt='svg' width='15' height='15' style='margin-left: 10px;'>
65
+ # """,
66
+ # unsafe_allow_html=True
67
+ # )
68
+
69
+ serveur_emission = st.session_state['emission'].stop()
70
+ emission_api = sum([value["el"] for value in st.session_state["partial_emissions"].values()])
71
+
72
+ if serveur_emission is None :
73
+ serveur_emission = 0
74
+ if emission_api is None :
75
+ emission_api = 0
76
+ total_emission = serveur_emission + emission_api
77
+
78
+ if total_emission == 0:
79
+ pourcentage_api = 0
80
+ pourcentage_serveur = 0
81
+ else:
82
+ pourcentage_api = emission_api / total_emission
83
+ pourcentage_serveur = serveur_emission / total_emission
84
+
85
+
86
+ # HTML pour le tableau sans index ni colonnes inutiles
87
+ table_html = f"""
88
+ <table style="width:100%">
89
+ <tr><td colspan="2" style="text-align: center;"><b>{total_emission*1000:.2f}</b> g eq. CO2</td></tr>
90
+ <tr><td style="text-align: center;">Empreinte serveur (via CodeCarbon)</td><td style="text-align: right;"><b>{serveur_emission*1000:.2f}</b> ({pourcentage_serveur:.2%})</td></tr>
91
+ <tr><td style="text-align: center;">Empreinte IA (via EcoLogits)</td><td style="text-align: right;"><b>{emission_api*1000:.2f}</b> ({pourcentage_api:.2%})</td></tr>
92
+ </table>
93
+ """
94
+
95
+ st.sidebar.markdown(table_html, unsafe_allow_html=True)
96
+
97
+
98
+ c1,c2,c3 = stm.columns([1,1,1])
99
+
100
+ c2.write("---")
101
+
102
+ stm.markdown("**Votre équivalence**")
103
+ col1,col2,col3 = stm.columns([1,1,1])
104
+ display_comparaison(col1,total_emission,dict_comparaison_1kgCO2["eau en litre"][0]*1000,dict_comparaison_1kgCO2["eau en litre"][1],"ml")
105
+ display_comparaison(col2,total_emission,dict_comparaison_1kgCO2["tgv en km"][0],dict_comparaison_1kgCO2["tgv en km"][1],"km")
106
+ display_comparaison(col3,total_emission,dict_comparaison_1kgCO2["voiture en km"][0]*1000,dict_comparaison_1kgCO2["voiture en km"][1],"m")
107
+ stm.markdown("\n")
108
+ stm.markdown(
109
+ f"""
110
+ Fournis par **ADEME**
111
+ <a href='https://www.ademe.fr' target='_blank'><img src='https://www.ademe.fr/wp-content/uploads/2022/11/ademe-logo-2022-1.svg' alt='svg' width='30' height='30' style='margin-left: 10px;'>
112
+ """,
113
+ unsafe_allow_html=True
114
+ )
115
+
116
+ def display_carbon_footprint():
117
+ st.title("EMPREINTE ÉNERGÉTIQUE DE L'APPLICATION IA CARTO RSE")
118
+ display_cf_comparison(st)
119
+ table = get_table_empreintes_detailed()
120
+ # table[['Consommation Totale']] = table[['Consommation Totale']].map('${:,.2f}'.format)
121
+
122
+ table.replace({0.00: '-'}, inplace=True)
123
+ #just 2 digits after the comma
124
+ styled_df = table[['Consommation Totale']].rename(columns={'Consommation Totale': 'Consommation totale (g eqCo2)'})
125
+ styled_df = styled_df.round(2)
126
+
127
+ styled_df = styled_df.style.applymap(color_scale, subset=['Consommation totale (g eqCo2)'])
128
+ st.markdown("---")
129
+ st.markdown("### DÉTAIL PAR TÂCHE")
130
+ st.table(styled_df)
131
+ with st.expander("Plus de détails"):
132
+ st.table(table)
133
+
134
+ st.markdown("### SYNTHESE (Dialogue IA et non IA)")
135
+
136
+ serveur_emission = st.session_state['emission'].stop()
137
+ emission_api = sum([value["el"] for value in st.session_state["partial_emissions"].values()])
138
+ print(serveur_emission, emission_api)
139
+ total_emission = serveur_emission + emission_api
140
+
141
+ pourcentage_api = emission_api / total_emission
142
+ pourcentage_serveur = serveur_emission / total_emission
143
+
144
+ df = pd.DataFrame({"Catégorie": ["Identification + dessin","IA (extraction pp + dialogue)"], "valeur": [pourcentage_serveur, pourcentage_api]})
145
+ color_scale_alt = alt.Scale(domain=['Identification + dessin', 'IA (extraction pp + dialogue)'], range=['#011166', '#63abdf'])
146
+
147
+ base=alt.Chart(df).encode(
148
+ theta=alt.Theta(field="valeur", type="quantitative", stack=True),
149
+ color=alt.Color(field="Catégorie", type="nominal", scale=color_scale_alt),
150
+ )
151
+
152
+
153
+ pie = base.mark_arc(outerRadius=100)
154
+ text = base.mark_text(radius=150,fill= "black",align='center', baseline='middle',fontSize=20).encode(alt.Text(field="valeur", type="quantitative", format=".2%"))
155
+
156
+ chart = alt.layer(pie, text, data=df).resolve_scale(theta="independent")
157
+ st.altair_chart(chart, use_container_width=True)
158
+
langchain-neo4j.ipynb CHANGED
@@ -240,7 +240,7 @@
240
  ],
241
  "metadata": {
242
  "kernelspec": {
243
- "display_name": "Python 3.10.16 ('python_env')",
244
  "language": "python",
245
  "name": "python3"
246
  },
@@ -254,12 +254,7 @@
254
  "name": "python",
255
  "nbconvert_exporter": "python",
256
  "pygments_lexer": "ipython3",
257
- "version": "3.10.16"
258
- },
259
- "vscode": {
260
- "interpreter": {
261
- "hash": "61b9aa377fb83f63bad7741d096e61bd539c2bb481b5373a09d4b308f1781ad2"
262
- }
263
  }
264
  },
265
  "nbformat": 4,
 
240
  ],
241
  "metadata": {
242
  "kernelspec": {
243
+ "display_name": "Python 3",
244
  "language": "python",
245
  "name": "python3"
246
  },
 
254
  "name": "python",
255
  "nbconvert_exporter": "python",
256
  "pygments_lexer": "ipython3",
257
+ "version": "3.12.2"
 
 
 
 
 
258
  }
259
  },
260
  "nbformat": 4,