SiennaClarke commited on
Commit
dd8667e
·
verified ·
1 Parent(s): 10d4d1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -118
app.py CHANGED
@@ -2,147 +2,138 @@ import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
3
  from threading import Thread
4
  import torch
5
- from pypdf import PdfReader
6
- import io
7
 
8
- # 1. Page Configuration & Aesthetic Injection
9
  st.set_page_config(page_title="Claude", page_icon="☁️", layout="wide")
10
 
11
- # CSS to override Streamlit's look to match Claude's 2026 UI
12
- st.markdown("""
 
 
 
13
  <style>
14
- /* Claude's specific background and fonts */
15
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap');
16
 
17
- .stApp {
18
- background-color: #ffffff;
19
- font-family: 'Inter', sans-serif;
20
- }
21
-
22
- /* Sidebar styling: Off-white with a very thin border */
23
- [data-testid="stSidebar"] {
24
- background-color: #f9f9f8 !important;
25
- border-right: 1px solid #e5e5e5;
26
- width: 300px !important;
27
- }
28
-
29
- /* The Main Chat Container (Centered) */
30
- .block-container {
31
- max-width: 850px;
32
- padding-top: 3rem;
33
- }
34
-
35
- /* Message Bubbles: Claude uses a clean, borderless look */
36
- .stChatMessage {
37
- border: none !important;
38
- padding: 1.5rem 0 !important;
39
- background-color: transparent !important;
40
- }
41
-
42
- /* Assistant Message (Bot) specific styling */
43
- [data-testid="chatAvatarAssistant"] {
44
- background-color: #d97757 !important; /* Claude's signature orange */
45
- border-radius: 6px !important;
46
- }
47
-
48
- /* Floating, Centered Chat Input */
49
- [data-testid="stBottom"] {
50
- background-color: white !important;
51
- border-top: none !important;
52
- }
53
 
54
- .stChatInputContainer {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  border: 1px solid #d1d1d1 !important;
56
- border-radius: 14px !important;
57
- box-shadow: 0 4px 24px rgba(0,0,0,0.06) !important;
58
  max-width: 800px !important;
59
  margin: 0 auto 20px auto !important;
60
- }
61
-
62
- /* Hide default Streamlit elements */
63
- header {visibility: hidden;}
64
- footer {visibility: hidden;}
65
  </style>
66
  """, unsafe_allow_html=True)
67
 
68
- # 2. Optimized 2026 Local Model (Qwen 2.5 3B)
69
  @st.cache_resource
70
  def load_llm():
71
- model_id = "Qwen/Qwen2.5-3B-Instruct"
72
  tokenizer = AutoTokenizer.from_pretrained(model_id)
73
- device = "cuda" if torch.cuda.is_available() else "cpu"
74
- dtype = torch.bfloat16
75
-
76
- model = AutoModelForCausalLM.from_pretrained(
77
- model_id,
78
- torch_dtype=dtype,
79
- device_map="auto"
80
- )
81
- return model, tokenizer, device
82
 
83
- model, tokenizer, device = load_llm()
84
 
85
- # 3. Sidebar: "Projects" & Document Upload
86
  with st.sidebar:
87
- st.markdown("<div style='font-size:1.1rem; font-weight:600; color:#1a1a1a; margin-bottom:1rem;'>Claude 3.5 Sonnet</div>", unsafe_allow_html=True)
88
  if st.button("+ Start New Chat", use_container_width=True):
89
  st.session_state.messages = []
90
- st.session_state.pdf_text = ""
91
  st.rerun()
92
 
93
  st.divider()
94
- st.caption("UPLOAD DOCUMENTS")
95
- uploaded_file = st.file_uploader("Drop a PDF here to analyze", type="pdf")
96
-
97
- if uploaded_file:
98
- reader = PdfReader(uploaded_file)
99
- text = "".join([page.extract_text() for page in reader.pages])
100
- st.session_state.pdf_text = text
101
- st.success(f"Attached: {uploaded_file.name}")
102
-
103
- st.divider()
104
- st.caption("RECENT CHATS")
105
- st.markdown("📝 **LOS and LMS Training**")
106
-
107
- # 4. Main Chat Interface Logic
108
- if "messages" not in st.session_state:
109
- st.session_state.messages = []
110
- if "pdf_text" not in st.session_state:
111
- st.session_state.pdf_text = ""
112
-
113
- # Empty State: Landing screen
114
- if not st.session_state.messages:
115
- st.markdown("<div style='height: 12vh;'></div>", unsafe_allow_html=True)
116
- st.markdown("<h1 style='text-align: center; font-weight: 500; color: #1a1a1a;'>How can I help you today?</h1>", unsafe_allow_html=True)
117
-
118
- # Render Chat History
119
- for msg in st.session_state.messages:
120
- with st.chat_message(msg["role"]):
121
- st.markdown(msg["content"])
122
-
123
- # 5. Chat Input & Generation
124
- if prompt := st.chat_input("Ask anything..."):
125
- st.session_state.messages.append({"role": "user", "content": prompt})
126
- with st.chat_message("user"):
127
- st.markdown(prompt)
128
-
129
- with st.chat_message("assistant"):
130
- streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
131
-
132
- # Build context (System Prompt + PDF Content + History)
133
- sys_msg = "You are a world-class AI assistant. Be helpful and professional."
134
- if st.session_state.pdf_text:
135
- sys_msg += f"\n\nContext from uploaded document:\n{st.session_state.pdf_text[:5000]}" # Limit to first 5k chars for speed
136
 
137
- messages = [{"role": "system", "content": sys_msg}] + st.session_state.messages[-8:]
138
-
139
- inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(device)
140
-
141
- gen_kwargs = dict(input_ids=inputs, streamer=streamer, max_new_tokens=1024, do_sample=True, temperature=0.7, top_p=0.9, pad_token_id=tokenizer.eos_token_id)
142
-
143
- thread = Thread(target=model.generate, kwargs=gen_kwargs)
144
- thread.start()
145
-
146
- full_response = st.write_stream(streamer)
147
 
148
- st.session_state.messages.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
3
  from threading import Thread
4
  import torch
5
+ import time
 
6
 
7
+ # 1. Advanced CSS: Branding, Animations, and Split-Pane Artifacts
8
  st.set_page_config(page_title="Claude", page_icon="☁️", layout="wide")
9
 
10
+ # Claude's exact 2026 color palette
11
+ CLAUDE_ORANGE = "#d97757"
12
+ CLAUDE_PAPER = "#f9f9f8"
13
+
14
+ st.markdown(f"""
15
  <style>
 
16
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap');
17
 
18
+ .stApp {{ background-color: #ffffff; font-family: 'Inter', sans-serif; }}
19
+
20
+ /* Sidebar: warm paper texture */
21
+ [data-testid="stSidebar"] {{
22
+ background-color: {CLAUDE_PAPER} !important;
23
+ border-right: 1px solid #e5e5e5 !important;
24
+ }}
25
+
26
+ /* The "Living Avatar" Animation - Moving Lines Effect */
27
+ @keyframes breathe {{
28
+ 0% {{ transform: scale(1); opacity: 0.8; }}
29
+ 50% {{ transform: scale(1.05); opacity: 1; }}
30
+ 100% {{ transform: scale(1); opacity: 0.8; }}
31
+ }}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ [data-testid="chatAvatarAssistant"] {{
34
+ background-color: {CLAUDE_ORANGE} !important;
35
+ border-radius: 8px !important;
36
+ animation: breathe 3s infinite ease-in-out;
37
+ box-shadow: 0 0 15px rgba(217, 119, 87, 0.2);
38
+ }}
39
+
40
+ /* Artifacts Window Styling */
41
+ .artifact-container {{
42
+ background-color: #fcfcfb;
43
+ border: 1px solid #e5e5e5;
44
+ border-radius: 12px;
45
+ padding: 20px;
46
+ height: 80vh;
47
+ overflow-y: auto;
48
+ box-shadow: inset 0 0 10px rgba(0,0,0,0.02);
49
+ }}
50
+
51
+ /* Floating Input Bar */
52
+ .stChatInputContainer {{
53
  border: 1px solid #d1d1d1 !important;
54
+ border-radius: 16px !important;
55
+ box-shadow: 0 8px 32px rgba(0,0,0,0.06) !important;
56
  max-width: 800px !important;
57
  margin: 0 auto 20px auto !important;
58
+ }}
59
+
60
+ header, footer {{ visibility: hidden; }}
 
 
61
  </style>
62
  """, unsafe_allow_html=True)
63
 
64
+ # 2. Model Initialization
65
  @st.cache_resource
66
  def load_llm():
67
+ model_id = "Qwen/Qwen2.5-3B-Instruct" # Highest 2026 performance/efficiency ratio
68
  tokenizer = AutoTokenizer.from_pretrained(model_id)
69
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
70
+ return model, tokenizer
 
 
 
 
 
 
 
71
 
72
+ model, tokenizer = load_llm()
73
 
74
+ # 3. Sidebar: Context & Settings
75
  with st.sidebar:
76
+ st.markdown(f"<h2 style='color: #1a1a1a; font-size: 1.25rem;'>Claude 3.5 Sonnet</h2>", unsafe_allow_html=True)
77
  if st.button("+ Start New Chat", use_container_width=True):
78
  st.session_state.messages = []
79
+ st.session_state.artifact_content = ""
80
  st.rerun()
81
 
82
  st.divider()
83
+ st.caption("CAPABILITIES")
84
+ show_artifacts = st.toggle("Artifacts (Preview)", value=True)
85
+ st.caption("Recent Artifacts")
86
+ if st.session_state.get("artifact_content"):
87
+ st.info("📄 current_code_snippet.py")
88
+
89
+ # 4. Layout Definition (Chat vs Artifacts)
90
+ if show_artifacts and st.session_state.get("artifact_content"):
91
+ col_chat, col_art = st.columns([1, 1], gap="large")
92
+ else:
93
+ col_chat = st.container()
94
+ col_art = None
95
+
96
+ # 5. Chat Logic
97
+ if "messages" not in st.session_state: st.session_state.messages = []
98
+ if "artifact_content" not in st.session_state: st.session_state.artifact_content = ""
99
+
100
+ with col_chat:
101
+ # Landing View
102
+ if not st.session_state.messages:
103
+ st.markdown("<div style='height: 10vh;'></div>", unsafe_allow_html=True)
104
+ st.markdown("<h1 style='text-align: center; font-weight: 500;'>How can I help you today?</h1>", unsafe_allow_html=True)
105
+
106
+ for m in st.session_state.messages:
107
+ with st.chat_message(m["role"]):
108
+ st.markdown(m["content"])
109
+
110
+ if prompt := st.chat_input("Ask Claude..."):
111
+ st.session_state.messages.append({"role": "user", "content": prompt})
112
+ with st.chat_message("user"): st.markdown(prompt)
113
+
114
+ with st.chat_message("assistant"):
115
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
116
+ msgs = [{"role": "system", "content": "You are Claude. If the user asks for code, provide it clearly."}] + st.session_state.messages[-6:]
117
+ inputs = tokenizer.apply_chat_template(msgs, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
 
 
 
 
 
 
 
118
 
119
+ thread = Thread(target=model.generate, kwargs=dict(input_ids=inputs, streamer=streamer, max_new_tokens=1024))
120
+ thread.start()
121
+
122
+ full_response = st.write_stream(streamer)
 
 
 
 
 
 
123
 
124
+ # Detect if response contains code to trigger "Artifact"
125
+ if "```" in full_response:
126
+ code_content = full_response.split("```")[1]
127
+ st.session_state.artifact_content = code_content
128
+ st.rerun()
129
+
130
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
131
+
132
+ # 6. Artifact Display (Right Panel)
133
+ if col_art and st.session_state.artifact_content:
134
+ with col_art:
135
+ st.markdown("<p style='font-size: 0.8rem; color: #6e6e6e; margin-bottom: 5px;'>ARTIFACT</p>", unsafe_allow_html=True)
136
+ with st.container(border=True):
137
+ st.code(st.session_state.artifact_content, language="python")
138
+ if st.button("Copy to Clipboard"):
139
+ st.toast("Code copied!")