Spaces:
Running
Running
File size: 3,782 Bytes
197f5ec 017c2ee 197f5ec 017c2ee 403ed1b 197f5ec 87d384c 017c2ee 197f5ec faf13f8 8a56d57 197f5ec f1d9ee8 faf13f8 8a56d57 f1d9ee8 faf13f8 197f5ec faf13f8 8a56d57 faf13f8 197f5ec 8a56d57 197f5ec 8a56d57 faf13f8 8a56d57 197f5ec 8a56d57 66e8595 8a56d57 66e8595 8a56d57 66e8595 faf13f8 8a56d57 197f5ec 8a56d57 197f5ec 017c2ee 197f5ec 017c2ee 197f5ec 017c2ee 197f5ec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import streamlit as st
from ecologits.tracers.utils import llm_impacts
from src.impacts import get_impacts, display_impacts, display_equivalent_ghg, display_equivalent_energy
from src.utils import format_impacts
from src.content import WARNING_CLOSED_SOURCE, WARNING_MULTI_MODAL, WARNING_BOTH, HOW_TO_TEXT
from src.models import load_models
from src.constants import PROMPTS
def calculator_mode():
st.expander("How to use this calculator?", expanded = False).markdown(HOW_TO_TEXT)
with st.container(border=True):
df = load_models(filter_main=True)
col1, col2, col3 = st.columns(3)
with col1:
providers_clean = [x for x in df["provider_clean"].unique()]
provider = st.selectbox(
label="Provider",
options=providers_clean,
index=providers_clean.index("OpenAI"),
)
with col2:
model = st.selectbox(
label="Model",
options=[
x
for x in df["name_clean"].unique()
if x in df[df["provider_clean"] == provider]["name_clean"].unique()
],
)
with col3:
output_tokens = st.selectbox("Example prompt", [x[0] for x in PROMPTS])
# WARNING DISPLAY
provider_raw = df[
(df["provider_clean"] == provider) & (df["name_clean"] == model)
]["provider"].values[0]
model_raw = df[
(df["provider_clean"] == provider) & (df["name_clean"] == model)
]["name"].values[0]
df_filtered = df[
(df["provider_clean"] == provider) & (df["name_clean"] == model)
]
if (
df_filtered["warning_arch"].values[0]
and not df_filtered["warning_multi_modal"].values[0]
):
st.warning(WARNING_CLOSED_SOURCE, icon="⚠️")
if (
df_filtered["warning_multi_modal"].values[0]
and not df_filtered["warning_arch"].values[0]
):
st.warning(WARNING_MULTI_MODAL, icon="⚠️")
if (
df_filtered["warning_arch"].values[0]
and df_filtered["warning_multi_modal"].values[0]
):
st.warning(WARNING_BOTH, icon="⚠️")
try:
impacts = llm_impacts(
provider=provider_raw,
model_name=model_raw,
output_token_count=[x[1] for x in PROMPTS if x[0] == output_tokens][0],
request_latency=100000,
)
impacts, _, _ = format_impacts(impacts)
with st.container(border=True):
st.markdown('<h3 align = "center">Environmental impacts</h3>', unsafe_allow_html=True)
#st.markdown('<p align = "center">To understand how the environmental impacts are computed go to the 📖 Methodology tab.</p>', unsafe_allow_html=True)
display_impacts(impacts)
with st.container(border=False):
st.markdown('<h3 align = "center">Equivalences</h3>', unsafe_allow_html=True)
st.markdown('<p align = "center">Making this request to the LLM is equivalent to the following actions :</p>', unsafe_allow_html=True)
page = st.radio(' ', ['Energy' , 'GHG'], horizontal=True)
with st.container(border=True):
if page == 'Energy' :
display_equivalent_energy(impacts)
else :
display_equivalent_ghg(impacts)
except Exception as e:
st.error('Could not find the model in the repository. Please try another model.') |