devilsa commited on
Commit
255977c
·
verified ·
1 Parent(s): 8883e9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -15
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import streamlit as st
2
  import faiss
3
  from sentence_transformers import SentenceTransformer
4
- from openai import OpenAI
5
 
6
- # Initialize OpenAI API
7
- api_key = "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"
8
- base_url = "https://api.aimlapi.com/v1"
9
- api = OpenAI(api_key=api_key, base_url=base_url)
10
 
11
  # Initialize Sentence Transformer
12
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
@@ -35,18 +35,25 @@ def embed_and_store(chunks):
35
  embeddings = embedding_model.encode(chunks)
36
  index.add(embeddings)
37
 
38
- # Query handling
39
  def query_llm(prompt):
40
- completion = api.chat.completions.create(
41
- model="deepseek-ai/deepseek-llm-67b-chat",
42
  messages=[
43
- {"role": "system", "content": "You are a relationship counselor. Analyze the given WhatsApp conversation and provide insights on potential red flags, toxicity, and room for improvement in behavior. Every response must start by rating the overall chat toxicity out of 10."},
 
 
 
 
 
 
 
44
  {"role": "user", "content": prompt},
45
  ],
46
  temperature=0.7,
47
  max_tokens=350,
48
  )
49
- return completion.choices[0].message.content
50
 
51
  # Streamlit App
52
  st.title("AI Relationship Counsellor")
@@ -64,16 +71,16 @@ if uploaded_file:
64
  # Query Interface
65
  user_query = st.text_input("Ask a question about your relationship:")
66
  if user_query:
67
- # Embed query and search FAISS
68
  query_embedding = embedding_model.encode([user_query])
69
- distances, indices = index.search(query_embedding, k=5) # Top 5 results
70
  relevant_chunks = [chunks[i] for i in indices[0]]
71
 
72
- # Combine chunks for context
73
  context = " ".join(relevant_chunks)
74
  final_prompt = f"Context: {context}\n\nQuestion: {user_query}"
75
 
76
- # Get response from AI model
77
  response = query_llm(final_prompt)
78
  st.write("### AI Analysis")
79
- st.write(response)
 
1
  import streamlit as st
2
  import faiss
3
  from sentence_transformers import SentenceTransformer
4
+ import groq # Hypothetical Groq Python SDK
5
 
6
+ # Initialize Groq API
7
+ groq_api_key = "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"
8
+ groq_base_url = "https://api.groq.ai/v1"
9
+ client = groq.Client(api_key=groq_api_key, base_url=groq_base_url)
10
 
11
  # Initialize Sentence Transformer
12
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
 
35
  embeddings = embedding_model.encode(chunks)
36
  index.add(embeddings)
37
 
38
+ # Query handling using the Groq model
39
  def query_llm(prompt):
40
+ response = client.chat.completions.create(
41
+ model="groq-llm-model", # Replace with the actual Groq model identifier
42
  messages=[
43
+ {
44
+ "role": "system",
45
+ "content": (
46
+ "You are a relationship counselor. Analyze the given WhatsApp conversation "
47
+ "and provide insights on potential red flags, toxicity, and room for improvement in behavior. "
48
+ "Every response must start by rating the overall chat toxicity out of 10."
49
+ )
50
+ },
51
  {"role": "user", "content": prompt},
52
  ],
53
  temperature=0.7,
54
  max_tokens=350,
55
  )
56
+ return response.choices[0].message.content
57
 
58
  # Streamlit App
59
  st.title("AI Relationship Counsellor")
 
71
  # Query Interface
72
  user_query = st.text_input("Ask a question about your relationship:")
73
  if user_query:
74
+ # Embed query and search FAISS for the top 5 relevant chunks
75
  query_embedding = embedding_model.encode([user_query])
76
+ distances, indices = index.search(query_embedding, k=5)
77
  relevant_chunks = [chunks[i] for i in indices[0]]
78
 
79
+ # Combine chunks to form context
80
  context = " ".join(relevant_chunks)
81
  final_prompt = f"Context: {context}\n\nQuestion: {user_query}"
82
 
83
+ # Get response from the Groq model
84
  response = query_llm(final_prompt)
85
  st.write("### AI Analysis")
86
+ st.write(response)