lyimo commited on
Commit
a7638f5
·
verified ·
1 Parent(s): d6bbef9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -15
app.py CHANGED
@@ -4,7 +4,7 @@ import numpy as np
4
  from sentence_transformers import SentenceTransformer
5
  from sklearn.metrics.pairwise import cosine_similarity
6
  import gradio as gr
7
- from together import Together
8
 
9
  # Load pre-trained Sentence Transformer model
10
  model = SentenceTransformer('LaBSE')
@@ -15,23 +15,18 @@ df = pd.read_csv('combined_questions_and_answers.csv')
15
  # Encode all questions in the dataset
16
  question_embeddings = model.encode(df['Question'].tolist())
17
 
18
- # Together API setup
19
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
20
 
21
  def llama_query(prompt, system_content):
22
  response = client.chat.completions.create(
23
- model="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
24
  messages=[
25
  {"role": "system", "content": system_content},
26
  {"role": "user", "content": prompt}
27
  ],
28
- max_tokens=512,
29
- temperature=0.7,
30
- top_p=0.7,
31
- top_k=50,
32
- repetition_penalty=1,
33
- stop=["<|eot_id|>", "<|eom_id|>"],
34
- stream=False
35
  )
36
  return response.choices[0].message.content
37
 
@@ -69,18 +64,18 @@ def get_answer(user_question, threshold=0.35):
69
  return "I'm sorry, but your question doesn't seem to be related to blood donation. Could you please ask a question about blood donation?", 0
70
 
71
  language = detect_language(user_question)
72
-
73
  if language == 'swahili':
74
  english_question = translate_to_english(user_question)
75
  else:
76
  english_question = user_question
77
 
78
  user_embedding = model.encode(english_question)
79
-
80
  similarities = cosine_similarity([user_embedding], question_embeddings)
81
-
82
  max_similarity = np.max(similarities)
83
-
84
  if max_similarity > threshold:
85
  similar_question_idx = np.argmax(similarities)
86
  retrieved_answer = df.iloc[similar_question_idx]['Answer']
 
4
  from sentence_transformers import SentenceTransformer
5
  from sklearn.metrics.pairwise import cosine_similarity
6
  import gradio as gr
7
+ from groq import Groq
8
 
9
  # Load pre-trained Sentence Transformer model
10
  model = SentenceTransformer('LaBSE')
 
15
  # Encode all questions in the dataset
16
  question_embeddings = model.encode(df['Question'].tolist())
17
 
18
+ # Groq API setup
19
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
20
 
21
  def llama_query(prompt, system_content):
22
  response = client.chat.completions.create(
 
23
  messages=[
24
  {"role": "system", "content": system_content},
25
  {"role": "user", "content": prompt}
26
  ],
27
+ model="llama-3.1-70b-versatile",
28
+ max_tokens=800,
29
+ temperature=0.7
 
 
 
 
30
  )
31
  return response.choices[0].message.content
32
 
 
64
  return "I'm sorry, but your question doesn't seem to be related to blood donation. Could you please ask a question about blood donation?", 0
65
 
66
  language = detect_language(user_question)
67
+
68
  if language == 'swahili':
69
  english_question = translate_to_english(user_question)
70
  else:
71
  english_question = user_question
72
 
73
  user_embedding = model.encode(english_question)
74
+
75
  similarities = cosine_similarity([user_embedding], question_embeddings)
76
+
77
  max_similarity = np.max(similarities)
78
+
79
  if max_similarity > threshold:
80
  similar_question_idx = np.argmax(similarities)
81
  retrieved_answer = df.iloc[similar_question_idx]['Answer']