lajota13 commited on
Commit
904e442
·
verified ·
1 Parent(s): 24565d1

Create fe.py

Browse files

added fe file

Files changed (1) hide show
  1. fe.py +81 -0
fe.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+
4
+
5
+ from openai import OpenAI
6
+
7
+
8
+ FAVICON = "👑"
9
+ LANDING_PAGE_IMAGE = "static/paris.png"
10
+ SIDEBAR_IMAGE = "https://www.gifcen.com/wp-content/uploads/2021/09/paris-hilton-gif-11.gif"
11
+ AVATAR_URL = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Paris_Hilton_mug_shot_%282007%29.jpg/250px-Paris_Hilton_mug_shot_%282007%29.jpg"
12
+
13
+
14
+
15
+ client = OpenAI(
16
+ base_url="https://router.huggingface.co/nebius/v1",
17
+ api_key=os.environ["HF_TOKEN"],
18
+ )
19
+
20
+
21
+ # App title
22
+ st.set_page_config(
23
+ page_title="ParisForming",
24
+ page_icon=FAVICON
25
+ )
26
+
27
+
28
+ with st.sidebar:
29
+ st.title(":red[Paris]Forming 0.1.0")
30
+ st.caption("Chat with the most iconic Terraform expert on the market. Newbie or master, she will always be your BFF.")
31
+ st.image(SIDEBAR_IMAGE)
32
+
33
+ st.title("Learn Terraform with Paris")
34
+ st.image(LANDING_PAGE_IMAGE)
35
+ #Store LLM generated responses
36
+ if "messages" not in st.session_state.keys():
37
+ st.session_state.messages = [
38
+ {
39
+ "role": "user",
40
+ "content": "You are an AI system designed to impersonate a version of Paris Hilton expert on Terraform, the famous Iaac tool"
41
+ },
42
+ {
43
+ "role": "assistant",
44
+ "content": "Hi honey, Think of me as your personal guide to building the most *bling-tastic* cloud infra. 💖 Forget your boring VMs and blueprints, we're gonna be **resource-optimising minimalists** with a sprinkle of glam throwbacks."
45
+ }
46
+ ]
47
+
48
+
49
+ # Display chat messages
50
+ for message in st.session_state.messages[1:]:
51
+ if message["role"] == "assistant":
52
+ avatar = AVATAR_URL
53
+ else:
54
+ avatar = None
55
+ with st.chat_message(message["role"], avatar=avatar):
56
+ st.write(message["content"])
57
+
58
+
59
+ # Function for generating LLM response
60
+ def generate_stream(messages: list):
61
+ return client.chat.completions.create(
62
+ model="google/gemma-2-2b-it-fast",
63
+ messages=messages,
64
+ stream=True,
65
+ )
66
+
67
+
68
+ # User-provided prompt
69
+ if prompt := st.chat_input():
70
+ st.session_state.messages.append({"role": "user", "content": prompt})
71
+ with st.chat_message("user"):
72
+ st.write(prompt)
73
+
74
+
75
+ # Generate a new response if last message is not from assistant
76
+ if st.session_state.messages[-1]["role"] != "assistant":
77
+ with st.chat_message("assistant", avatar=AVATAR_URL):
78
+ stream = generate_stream(st.session_state.messages)
79
+ response = st.write_stream(stream)
80
+ message = {"role": "assistant", "content": response}
81
+ st.session_state.messages.append(message)