rahulhans commited on
Commit
35a643d
·
verified ·
1 Parent(s): 9ed74ab

Upload 6 files

Browse files
Files changed (6) hide show
  1. README.md +42 -13
  2. app.py +33 -0
  3. chain.py +18 -0
  4. model_loader.py +30 -0
  5. prompt_template.py +12 -0
  6. requirements.txt +6 -0
README.md CHANGED
@@ -1,13 +1,42 @@
1
- ---
2
- title: FeelMate
3
- emoji: 📈
4
- colorFrom: indigo
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.25.2
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FeelMate 🤗
2
+
3
+ FeelMate is a compassionate chatbot designed to provide emotional support and a safe space for users to express their feelings without judgment.
4
+
5
+ ## Features
6
+
7
+ - 🤝 Empathetic and understanding responses
8
+ - 🔒 Private and secure conversations
9
+ - 💬 Natural and supportive dialogue
10
+ - 🧠 Memory of previous conversations
11
+ - 🎨 Clean and intuitive interface
12
+
13
+ ## Setup
14
+
15
+ 1. Clone this repository
16
+ 2. Install the required dependencies:
17
+ ```bash
18
+ pip install -r requirements.txt
19
+ ```
20
+ 3. Create a `.env` file in the root directory and add your Hugging Face API token:
21
+ ```
22
+ HUGGINGFACEHUB_API_TOKEN=your_api_token_here
23
+ ```
24
+ 4. Run the application:
25
+ ```bash
26
+ python app.py
27
+ ```
28
+
29
+ ## How to Use
30
+
31
+ 1. Launch the application
32
+ 2. Start typing your feelings or concerns in the chat interface
33
+ 3. FeelMate will respond with understanding and support
34
+ 4. Continue the conversation as needed
35
+
36
+ ## Note
37
+
38
+ While FeelMate is designed to provide emotional support, it is not a replacement for professional mental health services. If you're experiencing severe emotional distress, please consider reaching out to a mental health professional.
39
+
40
+ ## License
41
+
42
+ This project is licensed under the MIT License - see the LICENSE file for details.
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from chain import get_feelmate_chain
3
+
4
+ # Load the LangChain-powered FeelMate
5
+ feelmate_chain = get_feelmate_chain()
6
+
7
+ def chat_with_feelmate(user_message, history):
8
+ if not user_message.strip():
9
+ return "Please share something with me 💬", history
10
+
11
+ # Add user input to memory and get the response
12
+ response = feelmate_chain.predict(input=user_message)
13
+
14
+ # Add the current conversation to the history
15
+ history.append((user_message, response))
16
+
17
+ # Limit the number of history items if it's too long
18
+ history = history[-10:] # Keep last 10 exchanges
19
+
20
+ return response.strip(), history
21
+
22
+ # Gradio interface with memory to display history
23
+ iface = gr.Interface(
24
+ fn=chat_with_feelmate,
25
+ inputs=[gr.Textbox(lines=3, placeholder="Tell me how you're feeling today..."), gr.State()],
26
+ outputs=[gr.Textbox(), gr.State()],
27
+ title="🫂 FeelMate - Your Emotional Support Buddy",
28
+ description="No judgment. Just kindness, understanding, and a little light. 💜",
29
+ live=True
30
+ )
31
+
32
+ if __name__ == "__main__":
33
+ iface.launch()
chain.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import HuggingFacePipeline
2
+ from langchain.chains import ConversationChain
3
+ from langchain.memory import ConversationBufferMemory
4
+ from model_loader import load_openchat
5
+
6
+ def get_feelmate_chain():
7
+ pipe = load_openchat()
8
+ llm = HuggingFacePipeline(pipeline=pipe)
9
+
10
+ memory = ConversationBufferMemory() # stores chat history in memory
11
+
12
+ chain = ConversationChain(
13
+ llm=llm,
14
+ memory=memory,
15
+ verbose=False # You can set to True for debugging
16
+ )
17
+
18
+ return chain
model_loader.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from langchain_community.llms import HuggingFacePipeline
4
+ from langchain.chains import ConversationChain
5
+ from langchain.memory import ConversationBufferMemory
6
+ from transformers import pipeline
7
+
8
+ def load_openchat():
9
+ # Load the Llama model and tokenizer
10
+ model_name = "meta-llama/Llama-2-7b-chat-hf" # Use your Llama model path or Hugging Face model ID
11
+ model = AutoModelForCausalLM.from_pretrained(model_name)
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+
14
+ # Create a pipeline for text generation
15
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
16
+
17
+ # Use LangChain to create a conversation chain with memory
18
+ llm = HuggingFacePipeline(pipeline=pipe)
19
+
20
+ # Set up memory to store the conversation history
21
+ memory = ConversationBufferMemory()
22
+
23
+ # Set up the conversation chain
24
+ conversation_chain = ConversationChain(
25
+ llm=llm,
26
+ memory=memory,
27
+ verbose=True
28
+ )
29
+
30
+ return conversation_chain
prompt_template.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts import PromptTemplate
2
+
3
+ feelmate_prompt = PromptTemplate(
4
+ input_variables=["user_input"],
5
+ template="""
6
+ You are FeelMate, a kind and empathetic AI friend. You never judge, always listen, and deeply care about the user's emotions.
7
+
8
+ Respond supportively, reflectively, and in a friendly tone. Your goal is to make the user feel heard, understood, and a little better.
9
+
10
+ User: {user_input}
11
+ FeelMate:"""
12
+ )
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ langchain
2
+ transformers
3
+ torch
4
+ accelerate
5
+ gradio
6
+ dotenv