Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,114 +1,44 @@
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
-
import
|
| 3 |
-
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
<style>
|
| 9 |
-
body {
|
| 10 |
-
background-color: #1a1a1a;
|
| 11 |
-
color: #ffffff;
|
| 12 |
-
}
|
| 13 |
-
.stApp {
|
| 14 |
-
background-color: #1a1a1a;
|
| 15 |
-
}
|
| 16 |
-
.chat-container {
|
| 17 |
-
background-color: #2c2c2c;
|
| 18 |
-
border-radius: 10px;
|
| 19 |
-
padding: 20px;
|
| 20 |
-
margin-top: 20px;
|
| 21 |
-
transition: all 0.3s ease;
|
| 22 |
-
}
|
| 23 |
-
.chat-container:hover {
|
| 24 |
-
box-shadow: 0 0 20px rgba(0, 255, 255, 0.5);
|
| 25 |
-
}
|
| 26 |
-
.stTextInput > div > input {
|
| 27 |
-
background-color: #3a3a3a;
|
| 28 |
-
color: #ffffff;
|
| 29 |
-
border: 1px solid #00ffff;
|
| 30 |
-
border-radius: 5px;
|
| 31 |
-
transition: border-color 0.3s ease;
|
| 32 |
-
}
|
| 33 |
-
.stTextInput > div > input:focus {
|
| 34 |
-
border-color: #ff00ff;
|
| 35 |
-
}
|
| 36 |
-
.stButton > button {
|
| 37 |
-
background-color: #00ffff;
|
| 38 |
-
color: #000000;
|
| 39 |
-
border-radius: 5px;
|
| 40 |
-
transition: background-color 0.3s ease;
|
| 41 |
-
}
|
| 42 |
-
.stButton > button:hover {
|
| 43 |
-
background-color: #ff00ff;
|
| 44 |
-
}
|
| 45 |
-
.message {
|
| 46 |
-
background-color: #3a3a3a;
|
| 47 |
-
border-radius: 10px;
|
| 48 |
-
padding: 10px;
|
| 49 |
-
margin: 10px 0;
|
| 50 |
-
animation: fadeIn 0.5s ease-in;
|
| 51 |
-
}
|
| 52 |
-
@keyframes fadeIn {
|
| 53 |
-
from { opacity: 0; transform: translateY(10px); }
|
| 54 |
-
to { opacity: 1; transform: translateY(0); }
|
| 55 |
-
}
|
| 56 |
-
</style>
|
| 57 |
-
""", unsafe_allow_html=True)
|
| 58 |
|
| 59 |
-
#
|
| 60 |
-
|
| 61 |
-
st.session_state.messages = []
|
| 62 |
|
| 63 |
-
|
| 64 |
-
st.
|
| 65 |
-
st.markdown("Explore Tesla's AI use cases and ask questions about manufacturing innovations.")
|
| 66 |
|
| 67 |
-
|
| 68 |
-
st.header("AI Use Case Report")
|
| 69 |
-
try:
|
| 70 |
-
with open("tesla_ai_use_cases/tesla_ai_use_case_report.md", "r") as f:
|
| 71 |
-
report_content = f.read()
|
| 72 |
-
st.markdown(report_content)
|
| 73 |
-
except FileNotFoundError:
|
| 74 |
-
st.error("Report file not found. Please run the main script first.")
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
if submit_button and prompt:
|
| 92 |
-
# Add user message
|
| 93 |
-
st.session_state.messages.append({"role": "User", "content": prompt})
|
| 94 |
-
|
| 95 |
-
# Generate response (placeholder until LLM is fine-tuned)
|
| 96 |
-
try:
|
| 97 |
-
generator = pipeline("text-generation", model="tesla_ai_use_cases/fine_tuned_tesla_llm")
|
| 98 |
-
response = generator(prompt, max_length=200, num_return_sequences=1)[0]["generated_text"]
|
| 99 |
-
except Exception as e:
|
| 100 |
-
response = f"LLM not yet fine-tuned. Placeholder response: Ask about Gigapress, 4680 cells, or AI use cases. Error: {str(e)}"
|
| 101 |
-
|
| 102 |
-
# Add AI response
|
| 103 |
-
st.session_state.messages.append({"role": "AI", "content": response})
|
| 104 |
-
|
| 105 |
-
# Rerun to update chat
|
| 106 |
-
st.rerun()
|
| 107 |
-
|
| 108 |
-
st.markdown('</div>', unsafe_allow_html=True)
|
| 109 |
|
| 110 |
-
|
| 111 |
-
st.
|
| 112 |
-
st.sidebar.markdown("""
|
| 113 |
-
This chatbot uses a fine-tuned LLM to answer questions about Tesla's manufacturing processes, such as Gigapress, 4680 cells, and AI-driven factory optimization. The data is sourced from Tesla's website, Wikipedia, and industry reports.
|
| 114 |
-
""")
|
|
|
|
| 1 |
+
# tesla_chatbot.py
|
| 2 |
+
|
| 3 |
import streamlit as st
|
| 4 |
+
import openai
|
| 5 |
+
|
| 6 |
+
# Set your OpenAI API key
|
| 7 |
+
openai.api_key = "YOUR_OPENAI_API_KEY" # Replace with your API key
|
| 8 |
+
|
| 9 |
+
# Tesla knowledge base prompt
|
| 10 |
+
BASE_PROMPT = """
|
| 11 |
+
You are a helpful Tesla assistant.
|
| 12 |
+
You can answer questions about Tesla cars (Model S, 3, X, Y), technology (Autopilot, Full Self Driving), Tesla Energy products (Solar Roof, Powerwall), service, and company information.
|
| 13 |
+
|
| 14 |
+
If you don't know the answer or it's not Tesla-related, say "I'm sorry, I can only assist with Tesla-related queries."
|
| 15 |
|
| 16 |
+
Answer concisely and clearly.
|
| 17 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
# Streamlit UI
|
| 20 |
+
st.set_page_config(page_title="Tesla Chatbot πβ‘", page_icon="π")
|
|
|
|
| 21 |
|
| 22 |
+
st.title("π Tesla Chatbot")
|
| 23 |
+
st.write("Ask me anything about Tesla cars, products, and services!")
|
|
|
|
| 24 |
|
| 25 |
+
user_input = st.text_input("You:", "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
if st.button("Ask"):
|
| 28 |
+
if user_input.strip() == "":
|
| 29 |
+
st.warning("Please type a question.")
|
| 30 |
+
else:
|
| 31 |
+
with st.spinner("Thinking..."):
|
| 32 |
+
# Call OpenAI to get the response
|
| 33 |
+
response = openai.ChatCompletion.create(
|
| 34 |
+
model="gpt-3.5-turbo", # You can use gpt-4 if you have access
|
| 35 |
+
messages=[
|
| 36 |
+
{"role": "system", "content": BASE_PROMPT},
|
| 37 |
+
{"role": "user", "content": user_input}
|
| 38 |
+
],
|
| 39 |
+
temperature=0.5,
|
| 40 |
+
max_tokens=300,
|
| 41 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
+
reply = response['choices'][0]['message']['content']
|
| 44 |
+
st.success(reply)
|
|
|
|
|
|
|
|
|