Redfire-1234 commited on
Commit
2cab077
Β·
verified Β·
1 Parent(s): 542d090

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +117 -40
src/streamlit_app.py CHANGED
@@ -1,40 +1,117 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
- import streamlit as st
5
-
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ app_code = '''import streamlit as st
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from peft import PeftModel
5
+
6
+ st.set_page_config(
7
+ page_title="AI Agent Chatbot V2",
8
+ page_icon="πŸ€–",
9
+ layout="wide"
10
+ )
11
+
12
+ BASE_MODEL = "Qwen/Qwen2.5-1.5B-Instruct"
13
+ LORA_REPO = "Redfire-1234/AI-agent-v2" # Updated repo name
14
+
15
+ if "messages" not in st.session_state:
16
+ st.session_state.messages = []
17
+
18
+ @st.cache_resource
19
+ def load_model():
20
+ tokenizer = AutoTokenizer.from_pretrained(LORA_REPO)
21
+ base_model = AutoModelForCausalLM.from_pretrained(
22
+ BASE_MODEL,
23
+ torch_dtype=torch.float16,
24
+ device_map="auto",
25
+ low_cpu_mem_usage=True
26
+ )
27
+ model = PeftModel.from_pretrained(base_model, LORA_REPO)
28
+ model.eval()
29
+ return tokenizer, model
30
+
31
+ def generate_response(tokenizer, model, user_input):
32
+ messages = [{"role": "user", "content": user_input}]
33
+
34
+ try:
35
+ prompt = tokenizer.apply_chat_template(
36
+ messages,
37
+ tokenize=False,
38
+ add_generation_prompt=True
39
+ )
40
+ except:
41
+ prompt = f"User: {user_input}\\nAssistant:"
42
+
43
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
44
+
45
+ with torch.no_grad():
46
+ outputs = model.generate(
47
+ **inputs,
48
+ max_new_tokens=200,
49
+ temperature=0.7,
50
+ do_sample=True,
51
+ top_p=0.9,
52
+ repetition_penalty=1.1,
53
+ pad_token_id=tokenizer.eos_token_id
54
+ )
55
+
56
+ new_tokens = outputs[0][inputs['input_ids'].shape[1]:]
57
+ reply = tokenizer.decode(new_tokens, skip_special_tokens=True)
58
+ return reply.strip()
59
+
60
+ # UI
61
+ col1, col2 = st.columns([6, 1])
62
+ with col1:
63
+ st.title("πŸ€– AI Agent Chatbot V2")
64
+ st.caption("✨ Improved with Natural Conversations")
65
+ with col2:
66
+ if st.button("πŸ—‘οΈ New Chat", use_container_width=True):
67
+ st.session_state.messages = []
68
+ st.rerun()
69
+
70
+ with st.spinner("Loading model..."):
71
+ tokenizer, model = load_model()
72
+
73
+ chat_container = st.container()
74
+ with chat_container:
75
+ if len(st.session_state.messages) == 0:
76
+ st.info("πŸ‘‹ **Welcome!** Ask me anything.\\n\\n⚠️ Type 'q' to end conversation")
77
+
78
+ for message in st.session_state.messages:
79
+ with st.chat_message(message["role"], avatar="πŸ‘€" if message["role"]=="user" else "πŸ€–"):
80
+ st.write(message["content"])
81
+
82
+ user_input = st.chat_input("Type your message... (Type 'q' to end)")
83
+
84
+ if user_input:
85
+ if user_input.strip().lower() == 'q':
86
+ st.session_state.messages.append({
87
+ "role": "assistant",
88
+ "content": "πŸ‘‹ Goodbye! Click 'New Chat' to start fresh."
89
+ })
90
+ st.rerun()
91
+
92
+ st.session_state.messages.append({"role": "user", "content": user_input})
93
+
94
+ with st.spinner("Thinking..."):
95
+ reply = generate_response(tokenizer, model, user_input)
96
+
97
+ st.session_state.messages.append({"role": "assistant", "content": reply})
98
+ st.rerun()
99
+
100
+ with st.sidebar:
101
+ st.header("ℹ️ About")
102
+ st.write("**Version:** 2.0 - Natural Conversations")
103
+ st.write("**Base Model:** Qwen 2.5 1.5B")
104
+ st.write("**Fine-tuned:** Yes (LoRA)")
105
+
106
+ st.divider()
107
+
108
+ st.header("πŸ“Š Stats")
109
+ st.metric("Messages", len(st.session_state.messages))
110
+
111
+ st.divider()
112
+
113
+ st.info("✨ **New in V2:**\\n- Natural greetings\\n- Better responses\\n- No more artifacts")
114
+ '''
115
+
116
+ with open("app.py", "w") as f:
117
+ f.write(app_code)