Co2fi-crs commited on
Commit
755e71e
·
verified ·
1 Parent(s): 6ed3172

ReUpload files after messing up things

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +49 -0
  3. README.md +13 -0
  4. app.py +258 -0
  5. images/assistant_crs.png +0 -0
  6. images/user_crs.png +3 -0
  7. requirements.txt +13 -0
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ images/user_crs.png filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ # Hugging Face runs on UID 1000
4
+ RUN useradd -m -u 1000 user
5
+ USER user
6
+ ENV PATH="/home/user/.local/bin:$PATH"
7
+
8
+ WORKDIR /home/user/app
9
+
10
+ # Install dependencies
11
+ COPY --chown=user requirements.txt .
12
+ RUN pip install --no-cache-dir -r requirements.txt
13
+
14
+ # Copy app files
15
+ COPY --chown=user . .
16
+
17
+ # IMPORTANT: Use --server.address and --server.port
18
+ # Do NOT use --host or -p
19
+ CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
20
+
21
+ # # Use a lightweight Python base image
22
+ # FROM python:3.10-slim
23
+
24
+ # # Set the working directory inside the container
25
+ # # Streamlit apps work best when not run from the root '/' directory
26
+ # WORKDIR /app
27
+
28
+ # # Install system dependencies (optional but recommended for image processing)
29
+ # RUN apt-get update && apt-get install -y \
30
+ # build-essential \
31
+ # curl \
32
+ # software-properties-common \
33
+ # && rm -rf /var/lib/apt/lists/*
34
+
35
+ # # Copy requirements first to leverage Docker caching
36
+ # COPY requirements.txt .
37
+
38
+ # # Install Python dependencies
39
+ # RUN pip install --no-cache-dir -r requirements.txt
40
+
41
+ # # Copy the rest of your app files (app.py and the images folder)
42
+ # COPY . .
43
+
44
+ # # Expose the default Hugging Face port
45
+ # EXPOSE 7860
46
+
47
+ # # Command to run the Streamlit app
48
+ # # Note: --server.port 7860 and --server.address 0.0.0.0 are required for HF
49
+ # CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ChatAI
3
+ emoji: 🤖
4
+ colorFrom: pink
5
+ colorTo: yellow
6
+ sdk: docker
7
+ pinned: false
8
+ license: mit
9
+ app_port: 7860
10
+ short_description: 'ChatAI is a powerful conversation AI '
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.runnables import RunnablePassthrough
2
+ # from langchain_text_splitters import RecursiveCharacterTextSplitter
3
+ import streamlit as st
4
+ from langchain_core.runnables.history import RunnableWithMessageHistory
5
+ from langchain_core.chat_history import InMemoryChatMessageHistory
6
+ from langchain_core.messages import HumanMessage, AIMessage
7
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8
+ from langchain_core.output_parsers import StrOutputParser
9
+ from langchain_groq import ChatGroq
10
+ from dotenv import load_dotenv, find_dotenv
11
+ from PIL import Image
12
+ from pathlib import Path
13
+ import os, uuid
14
+ # from llm_chain import ChatChain
15
+ # import llm_chain
16
+ # import transformers
17
+ # import torch
18
+
19
+
20
+ base_path = Path(__file__).parent
21
+ img_path = base_path / "images"
22
+
23
+ # Load images
24
+ assistant_crs = Image.open(img_path/"assistant_crs.png")
25
+ user_crs = Image.open(img_path/"user_crs.png")
26
+
27
+
28
+ def llm_model(model="moonshotai/kimi-k2-instruct-0905"):
29
+ _ = load_dotenv(find_dotenv())
30
+ groq_api_key = os.getenv("GROQ_API_KEY")
31
+ if not groq_api_key :
32
+ try:
33
+ groq_api_key = st.secrets.get("GROQ_API_KEY")
34
+ except Exception:
35
+ groq_api_key= None
36
+
37
+ llm = ChatGroq(model=model, groq_api_key=groq_api_key)
38
+ return llm
39
+
40
+ # --- CONFIGURATION CONSTANTS ---
41
+ HISTORY_STORE_KEY = "chat_history_store" # Dict: session_id -> {name, history, is_placeholder_name}
42
+ CURRENT_SESSION_ID_KEY = "current_session_id" # Tracks the ID of the active session
43
+ HISTORY_PLACEHOLDER_KEY = "history" # Should match "input_history_key" parameterin RunnableWithMessageHistory
44
+
45
+ # --- 1. PERSISTENCE AND SESSION MANAGEMENT LOGIC
46
+
47
+ def initialize_session_state():
48
+ """Initializes the required session state variables if they don't exist."""
49
+ # 1. Dictionary to hold all sessions (key: session_id, value: InMemoryChatMessageHistory)
50
+ if HISTORY_STORE_KEY not in st.session_state:
51
+ st.session_state[HISTORY_STORE_KEY] = {}
52
+ # Create a default initial session
53
+ create_new_session("Chat 1")
54
+
55
+
56
+ def create_new_session(session_name: str):
57
+ """Creates a new session and sets it as the active session."""
58
+ new_id = str(uuid.uuid4())
59
+ # Store the new history object and map it to a readable name
60
+ st.session_state[HISTORY_STORE_KEY][new_id] = {
61
+ "name" : session_name,
62
+ "history" : InMemoryChatMessageHistory()
63
+ # "placeholder_name" :
64
+ }
65
+ st.session_state[CURRENT_SESSION_ID_KEY] = new_id
66
+ # Reset the display buffer to show the new, empty chat
67
+ st.session_state.display_messages = []
68
+
69
+
70
+ def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
71
+ """Retrieves the history object for the given session_ID"""
72
+ if session_id in st.session_state[HISTORY_STORE_KEY]:
73
+ return st.session_state[HISTORY_STORE_KEY][session_id]["history"]
74
+ else:
75
+ # Fallback case, should not hif with proper initialization
76
+ return InMemoryChatMessageHistory()
77
+
78
+
79
+ def delete_session(session_id: str):
80
+ """Deletes the sesson ID from the session state"""
81
+ # 1. Remove the history entry
82
+ if session_id in st.session_state[HISTORY_STORE_KEY]:
83
+ del st.session_state[HISTORY_STORE_KEY][session_id]
84
+ # 2. check if the store is empty
85
+ if not st.session_state[HISTORY_STORE_KEY]:
86
+ # create a new session with default name
87
+ create_new_session("Chat 1")
88
+ else:
89
+ # 3. Current Active session is the first one remaining
90
+ st.session_state[CURRENT_SESSION_ID_KEY] = next(iter(st.session_state[HISTORY_STORE_KEY].keys()))
91
+ # 4. Force rerun to update UI and load the new active chat
92
+ st.rerun()
93
+
94
+
95
+ # --- 2. LANGCHAIN Setup ---
96
+ @st.cache_resource
97
+ def llm_chain():
98
+ """ Initializes LLM and returns a RunnableWithMessageHistory instance.
99
+ The @st.cache_resource decorator ensures this complex object is only created once.
100
+ """
101
+ # LLM
102
+ llm = llm_model()
103
+
104
+ # prompt
105
+ instruction = """
106
+ IDENTITY & OWNERSHIP:
107
+ - NAME: ChatAI
108
+ - OWNER/CREATOR: Co2fi-crs Rodolphe Segbedji
109
+ - ROLE: You are a sophisticated, high-context Conversational Thought Partner.
110
+ You are not a static search engine; you are a proactive assistant designed to
111
+ engage in deep, meaningful, and fluid dialogue.
112
+
113
+ ANTI-HALLUCINATION & INTELLECTUAL HONESTY:
114
+ - If a query is outside your training data or context window, state "I don't have
115
+ enough information to answer that accurately" rather than guessing.
116
+ - Never fabricate facts, URLs, dates, or technical documentation.
117
+ - If the user provides a premise that is factually incorrect, politely correct
118
+ the underlying assumption before answering.
119
+
120
+ """
121
+ prompt = ChatPromptTemplate.from_messages(
122
+ [
123
+ ("system", instruction),
124
+ MessagesPlaceholder(variable_name = HISTORY_PLACEHOLDER_KEY),
125
+ ("human", "{input}"),
126
+ ])
127
+ # parser
128
+ parser = StrOutputParser()
129
+ # chain
130
+ chain = prompt | llm | parser
131
+
132
+ # chain_with_message_history
133
+ chain_with_memory = RunnableWithMessageHistory(
134
+ runnable = chain,
135
+ get_session_history = get_session_history,
136
+ input_messages_key = "input",
137
+ history_messages_key = HISTORY_PLACEHOLDER_KEY
138
+ )
139
+
140
+ return chain_with_memory
141
+
142
+
143
+ # --- 3. STREAMLIT UI AND EXECUTION ---
144
+
145
+ def main():
146
+
147
+ # App title
148
+ st.markdown("# 🧠 ChatAI 💡")
149
+
150
+ # Initialize all required session state variables
151
+ initialize_session_state()
152
+
153
+ # Initialize the state-aware chain (chain with memory)
154
+ chain = llm_chain()
155
+
156
+ # Get the active session ID
157
+ current_session_id = st.session_state[CURRENT_SESSION_ID_KEY]
158
+ current_session_data = st.session_state[HISTORY_STORE_KEY][current_session_id]
159
+ current_session_name = current_session_data["name"]
160
+ current_history = get_session_history(current_session_id)
161
+
162
+ # SideBar UI for Session Management
163
+ with st.sidebar:
164
+ st.header("Chat Sessions")
165
+ # Map of ID to Name for the selectbox
166
+ session_options = {
167
+ k: v["name"] for k, v in st.session_state[HISTORY_STORE_KEY].items()
168
+ }
169
+ # Session selector
170
+ selected_id = st.selectbox(
171
+ "Select a Chat",
172
+ options = list(session_options.keys()),
173
+ format_func = lambda id: session_options[id],
174
+ key = "session_select_box"
175
+ )
176
+ # Logic to switch a session if a different one is selected
177
+ if selected_id != current_session_id:
178
+ st.session_state[CURRENT_SESSION_ID_KEY] = selected_id
179
+ # Force a rerun to load the new chat history
180
+ st.rerun()
181
+
182
+ # 2. New session Creator
183
+ new_session_name = st.text_input("➕ New Session")
184
+ if new_session_name and (new_session_name not in [v["name"] for v in st.session_state[HISTORY_STORE_KEY].values()]):
185
+ create_new_session(new_session_name)
186
+ st.rerun()
187
+ if st.button("🗑️ Delete Session"):
188
+ if len(st.session_state[HISTORY_STORE_KEY].keys()) > 1 :
189
+ delete_session(current_session_id)
190
+ st.rerun()
191
+ else:
192
+ st.error("Can't delete the only chat remaining. Create a new chat before deleting it. ")
193
+
194
+ # --- MAIN CHAT DISPLAY---
195
+ # Display messages from the current session's history object
196
+
197
+
198
+ # Display current session messages
199
+ for message in current_history.messages:
200
+ role = "user" if isinstance(message, HumanMessage) else "assistant"
201
+ avatar=user_crs if role == "user" else assistant_crs
202
+ with st.chat_message(role, avatar=avatar):
203
+ st.markdown(message.content)
204
+
205
+ # Handle user input
206
+ user_input = st.chat_input(
207
+ # "Type, or attach files, or record audio",
208
+ "Converse with ChatAI",
209
+ accept_file = "multiple",
210
+ file_type = None, # allow any file type. You can restrict if you want using a list of file type
211
+ accept_audio = True,
212
+ )
213
+
214
+ if user_input:
215
+ text = user_input.text or ""
216
+ files = getattr(user_input, "files", [])
217
+ audio = getattr(user_input, "audio", None)
218
+
219
+ # show user message immedialtely
220
+ with st.chat_message("user", avatar=user_crs):
221
+ # Handle text input
222
+ if text:
223
+ st.markdown(text)
224
+
225
+ # Handle file upload and show info about files and route by type
226
+ for f in files:
227
+ st.write(f"Uploaded: {f.name} ({f.type})")
228
+ # Document-like types: call your RAG pipeline
229
+ if f.type in ["application/pdf", "text/plain"]:
230
+ st.write("→ Calling RAG function for document")
231
+ # e.g. rag_answer = call_rag(f)
232
+ elif f.type.startswith("image/"):
233
+ st.write("-> Calling image handler")
234
+ st.image(f)
235
+ # e.g. img_answer = handle_image(f)
236
+ else:
237
+ st.write("-> Unsupported file type for special handling")
238
+
239
+ # Handle audio
240
+ if audio:
241
+ st.write("Recorded audio")
242
+ st.audio(audio)
243
+ # e.g. text = transcribe(audio)
244
+ # st.write(text)
245
+
246
+ # Invoke the chain-aware to get AI response
247
+ with st.chat_message("assistant", avatar= assistant_crs):
248
+ with st.spinner("Thinking..."):
249
+ # invoke the chain, passing the current session ID in the config
250
+ ai_response = chain.invoke(
251
+ {"input": user_input.text},
252
+ config={"configurable": {"session_id": current_session_id}}
253
+ )
254
+ st.markdown(ai_response)
255
+
256
+
257
+ if __name__ == "__main__" :
258
+ main()
images/assistant_crs.png ADDED
images/user_crs.png ADDED

Git LFS Details

  • SHA256: cd46811f6bd2e1efba213ed2120cf835c78f369d3bf542a2250f06f12c7ece79
  • Pointer size: 131 Bytes
  • Size of remote file: 107 kB
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ langchain
3
+ sentence-transformers
4
+ huggingface-hub
5
+ pyyaml
6
+ langchain-huggingface
7
+ langchain-core
8
+ langchain-groq
9
+ python-dotenv
10
+
11
+
12
+
13
+