jennifee's picture
initial commit
7976296 verified
import math # For access to infinity
import gradio # For building the interface
import pandas # For working with tables
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # For LLMS
# Instantiate the model that we'll be calling. This is a tiny one!
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
pipe = pipeline(
task="text-generation",
model=AutoModelForCausalLM.from_pretrained(
MODEL_ID,
),
tokenizer=tokenizer
)
# Create a function to calculate the Drake Equation
def drake_equation(R: float, fp: float, ne: float, fl: float, fi: float, fc: float, L: float) -> float:
"""
Calculate the Drake Equation
R is the rate at which stars are born
fp is the fraction of stars that host planets
ne is the number of habitable planets per planetary system
fl is the fraction of those planets where life occurs
fi is the fraction of life that evolves intelligence
fc is the fraction of intelligent life that develops communication capabilities
L is the average length of time civilizations are detectable
Return N - the number of civilizations in our galaxy with which communication might be possible
"""
N = R * fp * ne * fl * fi * fc * L
return dict(
results={
"N" : N,
},
verdict={
"Based on your input, the number of alien civilizations that communication may be possible with is": N,
}
)
# This helper function applies a chat format to help the LLM understand what is going on
def _format_chat(system_prompt: str, user_prompt: str) -> str:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
template = getattr(tokenizer, "chat_template", None)
return tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# This functoin uses the LLM to generate a response.
def _llm_generate(prompt: str, max_tokens: int) -> str:
out = pipe(
prompt,
max_new_tokens=max_tokens,
do_sample=True,
temperature=0.5,
return_full_text=False,
)
return out[0]["generated_text"]
# This function generates an explanation of the results
def llm_explain(results: dict, inputs: list) -> str:
R, fp, ne, fl, fi, fc, L = inputs
r = results["results"]
v = results["verdict"]
system_prompt = (
"You explain the implications of Drake Equation calculation to a smart college student."
"You comment on the implication of their results and how many or few extraterrestrial civilizations are identified."
"You always return CONCISE responses, only one sentence."
)
user_prompt = (
f"The rate at which stars are born is {R}.\n"
f"The fraction of stars that host planets is {fp}.\n"
f"The number of habitable planets per planetary system is {ne}.\n "
f"The fraction of those planets where life occurs is {fl}.\n"
f"The fraction of life that evolves intelligence is {fi}.\n"
f"The fraction of intelligent life that develops communication capabilities is {fc}.\n"
f"The average length of time civilizations are detectable is {L}.\n"
f"The number of civilizations in our galaxy with which communication may be possible is {r['N']}.\n"
"Explain the results of this Drake Equation calculation in ONE friendly sentence for a non-expert"
""
)
formatted = _format_chat(system_prompt, user_prompt)
return _llm_generate(formatted, max_tokens=128)
# This function ties everythign together (evaluation, LLM explanaation, output)
# And will be out main entry point for teh GUI
def run_once(R, fp, ne, fl, fi, fc, L):
inputs = [R, fp, ne, fl, fi, fc, L]
d = drake_equation(
R=float(R),
fp=float(fp),
ne=float(ne),
fl=float(fl),
fi=float(fi),
fc=float(fc),
L=float(L),
)
df = pandas.DataFrame([{
"The number of civilizations in our galaxy with which communication may be possible": round(d["results"]["N"], 3),
}])
narrative = llm_explain(d, inputs).split("\n")[0]
return df, narrative
# Last but not least, here's the UI!
with gradio.Blocks() as demo:
# Let's start by adding a title and introduction
gradio.Markdown(
"# Run and Explain the Drake Equation"
)
gradio.Markdown(
"This app runs the Drake Equation calculation for estimating extraterrestrial life and returns a natural language description of the results"
)
# This row contains all of the physical parameters
with gradio.Row():
R = gradio.Number(value=0.0, label="Rate at which stars are born [stars per year]")
fp = gradio.Number(value=0.0, label="Fraction of stars that host planets")
ne = gradio.Number(value=0.0, label="Number of habitable planets per planetary system")
fl = gradio.Number(value=0.0, label="Fraction of those planets where life occurs")
fi = gradio.Number(value=0.0, label="Fraction of life that evolves intelligence")
fc = gradio.Number(value=0.0, label="Fraction of intelligent life that develops communication capabilities")
L = gradio.Number(value=0.0, label="Average length of time civilizations are detectable [years]")
# Add a button to click to run the interface
run_btn = gradio.Button("Compute")
# These are the outputs. We use both a dataframe (for tabular info) and a markdown box
# for info from teh LLM
results_df = gradio.Dataframe(label="Numerical results (deterministic)", interactive=False)
explain_md = gradio.Markdown(label="Explanation")
# Run the calculations when the button is clicked
run_btn.click(fn=run_once, inputs=[R, fp, ne, fl, fi, fc, L], outputs=[results_df, explain_md])
# Finally, add a few examples
gradio.Examples(
examples=[
[3.0, 0.95, 5.0, 0.01, 0.01, 0.5, 100000.0],
[1.5, 0.75, 3.0, 0.0001, 0.001, 0.01, 10000.0],
[4.0, 0.9, 6.5, 0.000005, 0.05, 0.2, 5000000.0],
],
inputs=[R, fp, ne, fl, fi, fc, L],
label="Representative cases",
examples_per_page=3,
cache_examples=False,
)
if __name__ == "__main__":
demo.launch()