SameerArz commited on
Commit
faa02e7
·
verified ·
1 Parent(s): 1b128f6

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +26 -27
utils.py CHANGED
@@ -1,21 +1,17 @@
1
- # utils.py
2
-
3
- from groq import Groq
4
  import os
 
 
5
 
6
- # Initialize Groq client
7
- client = Groq(api_key=os.environ["sk-QOjwnewUPqnUMgDwrVkUNAqgZS4RcEBanCXO6gha0tIwC6s9"])
8
 
9
- def generate_tutor_output(subject, difficulty, student_input, model):
10
- model_dict = {
11
- "OpenAI": "Meta-Llama-3.1-8B-Instruct",
12
- "LLAMA3 70B": "llama3-groq-70b-8192-tool-use-preview",
13
- "Mixtral 8x7B": "mixtral-8x7b-32768"
14
- }
15
- current_model = model_dict.get(model, None)
16
 
17
- if not current_model:
18
- return "Invalid model selected"
 
19
 
20
  prompt = f"""
21
  You are an expert tutor in {subject} at the {difficulty} level.
@@ -29,19 +25,22 @@ def generate_tutor_output(subject, difficulty, student_input, model):
29
  Format your response as a JSON object with keys: "lesson", "question", "feedback"
30
  """
31
 
32
- completion = client.chat.completions.create(
33
- messages=[
34
- {
35
- "role": "system",
36
- "content": f"You are the world's best AI tutor, renowned for your ability to explain complex concepts in an engaging, clear, and memorable way. You specialize in {subject} at the {difficulty} level.",
37
- },
38
- {
39
- "role": "user",
40
- "content": prompt,
41
- }
42
- ],
43
  model=current_model,
44
- max_tokens=1000,
 
 
 
45
  )
 
 
 
 
 
46
 
47
- return completion.choices[0].message.content
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import openai
3
+ import requests
4
 
5
+ # Set the OpenAI API key for GPT-3 or any LLM models you choose
6
+ openai.api_key = os.getenv("OPENAI_API_KEY")
7
 
8
+ # Hugging Face API token for Stable Diffusion
9
+ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
10
+ STABLE_DIFFUSION_MODEL = "CompVis/stable-diffusion-v-1-4-original"
 
 
 
 
11
 
12
+ def generate_tutor_output(subject, difficulty, student_input, model):
13
+ model_dict = {"OpenAI": "gpt-3.5-turbo", "LLAMA3 70B": "llama3-groq-70b-8192-tool-use-preview", "Mixtral 8x7B": "mixtral-8x7b-32768"}
14
+ current_model = model_dict.get(model, "gpt-3.5-turbo")
15
 
16
  prompt = f"""
17
  You are an expert tutor in {subject} at the {difficulty} level.
 
25
  Format your response as a JSON object with keys: "lesson", "question", "feedback"
26
  """
27
 
28
+ response = openai.ChatCompletion.create(
 
 
 
 
 
 
 
 
 
 
29
  model=current_model,
30
+ messages=[
31
+ {"role": "system", "content": f"You are a highly skilled {subject} tutor for {difficulty} level."},
32
+ {"role": "user", "content": prompt}
33
+ ]
34
  )
35
+ return response.choices[0].message['content']
36
+
37
+ def generate_visual_answer(prompt):
38
+ url = f"https://api-inference.huggingface.co/models/{STABLE_DIFFUSION_MODEL}"
39
+ headers = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"}
40
 
41
+ response = requests.post(url, headers=headers, json={"inputs": prompt})
42
+
43
+ if response.status_code == 200:
44
+ return response.json()[0]["url"]
45
+ else:
46
+ return "Error generating image: " + response.text