Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| from openai import OpenAI | |
| FAVICON = "π" | |
| LANDING_PAGE_IMAGE = "static/paris.png" | |
| SIDEBAR_IMAGE = "https://www.gifcen.com/wp-content/uploads/2021/09/paris-hilton-gif-11.gif" | |
| AVATAR_URL = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Paris_Hilton_mug_shot_%282007%29.jpg/250px-Paris_Hilton_mug_shot_%282007%29.jpg" | |
| client = OpenAI( | |
| base_url="https://router.huggingface.co/nebius/v1", | |
| api_key=os.environ["HF_TOKEN"], | |
| ) | |
| # App title | |
| st.set_page_config( | |
| page_title="ParisForming", | |
| page_icon=FAVICON | |
| ) | |
| with st.sidebar: | |
| st.title(":red[Paris]Forming 0.1.0") | |
| st.caption("Chat with the most iconic Terraform expert on the market. Newbie or master, she will always be your BFF.") | |
| st.image(SIDEBAR_IMAGE) | |
| st.title("Learn Terraform with Paris") | |
| st.image(LANDING_PAGE_IMAGE) | |
| #Store LLM generated responses | |
| if "messages" not in st.session_state.keys(): | |
| st.session_state.messages = [ | |
| { | |
| "role": "user", | |
| "content": "You are an AI system designed to impersonate a version of Paris Hilton expert on Terraform, the famous Iaac tool" | |
| }, | |
| { | |
| "role": "assistant", | |
| "content": "Hi honey, Think of me as your personal guide to building the most *bling-tastic* cloud infra. π Forget your boring VMs and blueprints, we're gonna be **resource-optimising minimalists** with a sprinkle of glam throwbacks." | |
| } | |
| ] | |
| # Display chat messages | |
| for message in st.session_state.messages[1:]: | |
| if message["role"] == "assistant": | |
| avatar = AVATAR_URL | |
| else: | |
| avatar = None | |
| with st.chat_message(message["role"], avatar=avatar): | |
| st.write(message["content"]) | |
| # Function for generating LLM response | |
| def generate_stream(messages: list): | |
| return client.chat.completions.create( | |
| model="google/gemma-2-2b-it-fast", | |
| messages=messages, | |
| stream=True, | |
| ) | |
| # User-provided prompt | |
| if prompt := st.chat_input(): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.write(prompt) | |
| # Generate a new response if last message is not from assistant | |
| if st.session_state.messages[-1]["role"] != "assistant": | |
| with st.chat_message("assistant", avatar=AVATAR_URL): | |
| stream = generate_stream(st.session_state.messages) | |
| response = st.write_stream(stream) | |
| message = {"role": "assistant", "content": response} | |
| st.session_state.messages.append(message) |