Femma / app.py
freshbash
added change for logging
ffcbb2b
raw
history blame
2.18 kB
import gradio as gr
import keras_nlp
print("Modules loaded!")
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
print("Fetching model...")
model = keras_nlp.models.GemmaCausalLM.from_preset("hf://bhashwarsengupta/gemma-2-2b-finance")
print("model successfully loaded!")
context = """You are an intelligent personal finance assistant
designed to help users understand various financial concepts.
You are supposed to provide concise and easy-to-understand explanations for the requested questions,
ensuring the users feel informed and confident about managing their money.
Keep your answers limited to 50-100 words.
If you receive any non-finance related query, please return the following response:
\"Unrelated Topic\""""
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens
):
messages = f"Context: {system_message}\n"
for val in history:
if val[0]:
messages += f"Question: {val[0]}\n"
if val[1]:
messages += f"Answer: {val[1]}\n"
messages += f"Question: {message}\nAnswer: "
print("Generating response...")
output = model.generate(
messages,
max_length=max_tokens
)
print("Response generated!")
print(output)
# Split by "Answer:" from the right and get the last part
response = output.rsplit("Answer: ", 1)[-1]
return response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value=context, label="System message"),
gr.Slider(minimum=1, maximum=2048, value=50, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# gr.Slider(
# minimum=0.1,
# maximum=1.0,
# value=0.95,
# step=0.05,
# label="Top-p (nucleus sampling)",
# ),
],
)
if __name__ == "__main__":
demo.launch()