File size: 2,528 Bytes
904e442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import streamlit as st
import os


from openai import OpenAI


FAVICON = "๐Ÿ‘‘"
LANDING_PAGE_IMAGE = "static/paris.png" 
SIDEBAR_IMAGE = "https://www.gifcen.com/wp-content/uploads/2021/09/paris-hilton-gif-11.gif" 
AVATAR_URL = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Paris_Hilton_mug_shot_%282007%29.jpg/250px-Paris_Hilton_mug_shot_%282007%29.jpg" 



client = OpenAI(
    base_url="https://router.huggingface.co/nebius/v1",
    api_key=os.environ["HF_TOKEN"],
)


# App title
st.set_page_config(
    page_title="ParisForming",
    page_icon=FAVICON
)


with st.sidebar:
    st.title(":red[Paris]Forming 0.1.0")
    st.caption("Chat with the most iconic Terraform expert on the market. Newbie or master, she will always be your BFF.")
    st.image(SIDEBAR_IMAGE)
    
st.title("Learn Terraform with Paris")
st.image(LANDING_PAGE_IMAGE)
#Store LLM generated responses
if "messages" not in st.session_state.keys():
    st.session_state.messages = [
        {
            "role": "user",
            "content": "You are an AI system designed to impersonate a version of Paris Hilton expert on Terraform, the famous Iaac tool"
        },
        {
            "role": "assistant",
            "content": "Hi honey, Think of me as your personal guide to building the most *bling-tastic* cloud infra. ๐Ÿ’–  Forget your boring VMs and blueprints, we're gonna be **resource-optimising minimalists** with a sprinkle of glam throwbacks."
        }
    ]


# Display chat messages
for message in st.session_state.messages[1:]:
    if message["role"] == "assistant":
        avatar = AVATAR_URL
    else:
        avatar = None
    with st.chat_message(message["role"], avatar=avatar):
        st.write(message["content"])

  
# Function for generating LLM response    
def generate_stream(messages: list):
    return client.chat.completions.create(
        model="google/gemma-2-2b-it-fast",
        messages=messages,
        stream=True,
    )


# User-provided prompt
if prompt := st.chat_input():
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.write(prompt)


# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
    with st.chat_message("assistant", avatar=AVATAR_URL):
        stream = generate_stream(st.session_state.messages)
        response = st.write_stream(stream) 
    message = {"role": "assistant", "content": response}
    st.session_state.messages.append(message)