garvitcpp commited on
Commit
dc464ea
·
verified ·
1 Parent(s): f1d327b

Update app/services/extraction.py

Browse files
Files changed (1) hide show
  1. app/services/extraction.py +19 -11
app/services/extraction.py CHANGED
@@ -6,7 +6,7 @@ from dotenv import load_dotenv
6
  from difflib import SequenceMatcher
7
 
8
  load_dotenv()
9
- openai.api_key = os.getenv("OPENAI_API_KEY")
10
 
11
  # Define categories from dataset
12
  RECIPE_CATEGORIES = [
@@ -359,18 +359,26 @@ Output:
359
  ]
360
 
361
  # Send the prompt to OpenAI API
362
- response = openai.ChatCompletion.create(
363
- model="gpt-3.5-turbo",
364
- messages=messages,
365
- temperature=0,
366
- max_tokens=150,
367
- top_p=1,
368
- frequency_penalty=0,
369
- presence_penalty=0,
370
- )
371
 
 
 
 
 
 
 
 
 
 
 
 
372
  # Process the response
373
- output_text = response['choices'][0]['message']['content'].strip()
374
 
375
  try:
376
  result = json.loads(output_text)
 
6
  from difflib import SequenceMatcher
7
 
8
  load_dotenv()
9
+ genai.configure(api_key=os.getenv("EXTRACTION_API_KEY"))
10
 
11
  # Define categories from dataset
12
  RECIPE_CATEGORIES = [
 
359
  ]
360
 
361
  # Send the prompt to OpenAI API
362
+ prompt = ""
363
+ for message in messages:
364
+ if message["role"] == "system":
365
+ prompt += message["content"] + "\n\n"
366
+ else:
367
+ prompt += message["content"]
 
 
 
368
 
369
+ # Configure the Gemini model
370
+ model = genai.GenerativeModel('gemini-2.5-flash')
371
+
372
+ # Generate response
373
+ response = model.generate_content(prompt,
374
+ generation_config=genai.types.GenerationConfig(
375
+ temperature=0,
376
+ max_output_tokens=150,
377
+ top_p=1
378
+ ))
379
+
380
  # Process the response
381
+ output_text = response.text.strip()
382
 
383
  try:
384
  result = json.loads(output_text)