Rhodham96 commited on
Commit
84f5e0b
·
1 Parent(s): 8685642

first commit of the app

Browse files
Files changed (3) hide show
  1. Dockerfile +18 -0
  2. chat-streamlit-app.py +79 -0
  3. requirements.txt +109 -0
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python 3.11.6 image
2
+ FROM python:3.11.6
3
+
4
+ # Install Streamlit and other dependencies
5
+ COPY requirements.txt .
6
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
7
+
8
+ # Copy the rest of the app
9
+ COPY . .
10
+
11
+ # Expose the port (optionnel, mais propre)
12
+ EXPOSE 7860
13
+
14
+ # Set the working directory
15
+ WORKDIR /
16
+
17
+ # Start the Streamlit app
18
+ CMD ["streamlit", "run", "chat-streamlit-app.py", "--server.port", "7860", "--server.address", "0.0.0.0"]
chat-streamlit-app.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import logging
3
+ import time
4
+ from transformers import pipeline
5
+
6
+ logging.basicConfig(level=logging.INFO)
7
+
8
+ # Dictionnaire des modèles disponibles
9
+ # "Meta-LLaMA 3 8B": "meta-llama/Meta-Llama-3-8B-Instruct",
10
+ MODEL_DICT = {
11
+ "DistilGPT-2": "distilgpt2",
12
+ "Falcon3-7B-Base": "tiiuae/Falcon3-7B-Base"
13
+ }
14
+
15
+ # Initialize chat history
16
+ if 'messages' not in st.session_state:
17
+ st.session_state.messages = []
18
+
19
+ if 'current_model' not in st.session_state:
20
+ st.session_state.current_model = None
21
+
22
+ def load_generator(model_name):
23
+ return pipeline("text-generation", model=model_name)
24
+
25
+ def stream_chat(generator, messages):
26
+ try:
27
+ prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
28
+ output = generator(prompt, max_length=512, do_sample=True, temperature=0.7)
29
+ response = output[0]['generated_text']
30
+ return response
31
+ except Exception as e:
32
+ logging.error(f"Error during generation: {str(e)}")
33
+ raise e
34
+
35
+ def main():
36
+ st.title("Chat with Multiple LLMs")
37
+ logging.info("App started")
38
+
39
+ # Sidebar for model selection
40
+ selected_model_key = st.sidebar.selectbox("Choose a model", list(MODEL_DICT.keys()))
41
+ selected_model_name = MODEL_DICT[selected_model_key]
42
+
43
+ # Load generator only if model changed
44
+ if st.session_state.current_model != selected_model_name:
45
+ with st.spinner(f"Loading model {selected_model_key}..."):
46
+ st.session_state.generator = load_generator(selected_model_name)
47
+ st.session_state.current_model = selected_model_name
48
+ st.success(f"Model {selected_model_key} loaded!")
49
+
50
+ if prompt := st.chat_input("Your question"):
51
+ st.session_state.messages.append({"role": "user", "content": prompt})
52
+ logging.info(f"User input: {prompt}")
53
+
54
+ # Display conversation
55
+ for message in st.session_state.messages:
56
+ with st.chat_message(message["role"]):
57
+ st.write(message["content"])
58
+
59
+ if st.session_state.messages[-1]["role"] == "user":
60
+ with st.chat_message("assistant"):
61
+ start_time = time.time()
62
+ logging.info("Generating response")
63
+
64
+ with st.spinner("Writing..."):
65
+ try:
66
+ response_message = stream_chat(st.session_state.generator, st.session_state.messages)
67
+ duration = time.time() - start_time
68
+ response_with_duration = f"{response_message}\n\n⏱ Duration: {duration:.2f} seconds"
69
+ st.session_state.messages.append({"role": "assistant", "content": response_with_duration})
70
+ st.write(response_with_duration)
71
+ logging.info(f"Response generated in {duration:.2f}s")
72
+
73
+ except Exception as e:
74
+ st.session_state.messages.append({"role": "assistant", "content": str(e)})
75
+ st.error("An error occurred while generating the response.")
76
+ logging.error(f"Error: {str(e)}")
77
+
78
+ if __name__ == "__main__":
79
+ main()
requirements.txt ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohappyeyeballs==2.6.1
2
+ aiohttp==3.11.18
3
+ aiosignal==1.3.2
4
+ altair==5.5.0
5
+ annotated-types==0.7.0
6
+ anyio==4.9.0
7
+ attrs==25.3.0
8
+ banks==2.1.2
9
+ beautifulsoup4==4.13.4
10
+ blinker==1.9.0
11
+ cachetools==5.5.2
12
+ certifi==2025.4.26
13
+ charset-normalizer==3.4.2
14
+ click==8.1.8
15
+ colorama==0.4.6
16
+ dataclasses-json==0.6.7
17
+ Deprecated==1.2.18
18
+ dirtyjson==1.0.8
19
+ distro==1.9.0
20
+ filelock==3.18.0
21
+ filetype==1.2.0
22
+ frozenlist==1.6.0
23
+ fsspec==2025.3.2
24
+ gitdb==4.0.12
25
+ GitPython==3.1.44
26
+ greenlet==3.2.1
27
+ griffe==1.7.3
28
+ h11==0.16.0
29
+ hf-xet==1.1.0
30
+ httpcore==1.0.9
31
+ httpx==0.28.1
32
+ huggingface-hub==0.31.1
33
+ idna==3.10
34
+ Jinja2==3.1.6
35
+ jiter==0.9.0
36
+ joblib==1.5.0
37
+ jsonschema==4.23.0
38
+ jsonschema-specifications==2025.4.1
39
+ llama-cloud==0.1.19
40
+ llama-cloud-services==0.6.21
41
+ llama-index==0.12.34
42
+ llama-index-agent-openai==0.4.7
43
+ llama-index-cli==0.4.1
44
+ llama-index-core==0.12.34.post1
45
+ llama-index-embeddings-openai==0.3.1
46
+ llama-index-indices-managed-llama-cloud==0.6.11
47
+ llama-index-llms-ollama==0.5.4
48
+ llama-index-llms-openai==0.3.38
49
+ llama-index-multi-modal-llms-openai==0.4.3
50
+ llama-index-program-openai==0.3.1
51
+ llama-index-question-gen-openai==0.3.0
52
+ llama-index-readers-file==0.4.7
53
+ llama-index-readers-llama-parse==0.4.0
54
+ llama-parse==0.6.21
55
+ MarkupSafe==3.0.2
56
+ marshmallow==3.26.1
57
+ mpmath==1.3.0
58
+ multidict==6.4.3
59
+ mypy_extensions==1.1.0
60
+ narwhals==1.38.0
61
+ nest-asyncio==1.6.0
62
+ networkx==3.4.2
63
+ nltk==3.9.1
64
+ numpy==2.2.5
65
+ ollama==0.4.8
66
+ openai==1.77.0
67
+ packaging==24.2
68
+ pandas==2.2.3
69
+ pillow==11.2.1
70
+ platformdirs==4.3.8
71
+ propcache==0.3.1
72
+ protobuf==6.30.2
73
+ pyarrow==20.0.0
74
+ pydantic==2.11.4
75
+ pydantic_core==2.33.2
76
+ pydeck==0.9.1
77
+ pypdf==5.4.0
78
+ python-dateutil==2.9.0.post0
79
+ python-dotenv==1.1.0
80
+ pytz==2025.2
81
+ PyYAML==6.0.2
82
+ referencing==0.36.2
83
+ regex==2024.11.6
84
+ requests==2.32.3
85
+ rpds-py==0.24.0
86
+ safetensors==0.5.3
87
+ six==1.17.0
88
+ smmap==5.0.2
89
+ sniffio==1.3.1
90
+ soupsieve==2.7
91
+ SQLAlchemy==2.0.40
92
+ streamlit==1.45.0
93
+ striprtf==0.0.26
94
+ sympy==1.14.0
95
+ tenacity==9.1.2
96
+ tiktoken==0.9.0
97
+ tokenizers==0.21.1
98
+ toml==0.10.2
99
+ torch==2.7.0
100
+ tornado==6.4.2
101
+ tqdm==4.67.1
102
+ transformers==4.51.3
103
+ typing-inspect==0.9.0
104
+ typing-inspection==0.4.0
105
+ typing_extensions==4.13.2
106
+ tzdata==2025.2
107
+ urllib3==2.4.0
108
+ wrapt==1.17.2
109
+ yarl==1.20.0