Baldezo313 commited on
Commit
1f19aec
·
verified ·
1 Parent(s): 97a039a

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +57 -39
src/streamlit_app.py CHANGED
@@ -1,40 +1,58 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
5
-
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+
5
+ st.set_page_config(page_title="OpenChat Bot", page_icon="💬")
6
+ st.title("🧠 Chatbot - OpenChat 3.5")
7
+
8
+ @st.cache_resource
9
+ def load_model():
10
+ model_name = "openchat/openchat-3.5-0106"
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ model_name,
14
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
15
+ device_map="auto"
16
+ )
17
+ return tokenizer, model
18
+
19
+ tokenizer, model = load_model()
20
+
21
+ if "messages" not in st.session_state:
22
+ st.session_state.messages = [{"role": "assistant", "content": "Bonjour ! Pose-moi une question."}]
23
+
24
+ # Afficher les messages précédents
25
+ for msg in st.session_state.messages:
26
+ with st.chat_message(msg["role"]):
27
+ st.markdown(msg["content"])
28
+
29
+ def generate_response(prompt, history):
30
+ history_text = ""
31
+ for m in history:
32
+ speaker = "User" if m["role"] == "user" else "Assistant"
33
+ history_text += f"{speaker}: {m['content']}\n"
34
+ full_prompt = history_text + f"User: {prompt}\nAssistant:"
35
+
36
+ inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=2048)
37
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
38
+
39
+ output = model.generate(
40
+ **inputs,
41
+ max_new_tokens=200,
42
+ do_sample=True,
43
+ temperature=0.7,
44
+ top_p=0.95
45
+ )
46
+ decoded = tokenizer.decode(output[0], skip_special_tokens=True)
47
+ return decoded.split("Assistant:")[-1].strip()
48
+
49
+ user_input = st.chat_input("Posez votre question ici...")
50
+ if user_input:
51
+ st.chat_message("user").markdown(user_input)
52
+ st.session_state.messages.append({"role": "user", "content": user_input})
53
+
54
+ with st.chat_message("assistant"):
55
+ with st.spinner("OpenChat réfléchit..."):
56
+ response = generate_response(user_input, st.session_state.messages)
57
+ st.markdown(response)
58
+ st.session_state.messages.append({"role": "assistant", "content": response})