mehulr910 commited on
Commit
6f9dac7
·
0 Parent(s):

initial commit

Browse files
backend/__pycache__/main.cpython-310.pyc ADDED
Binary file (6.7 kB). View file
 
backend/__pycache__/temp.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
backend/check.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ import torch
2
+ print(torch.cuda.is_available())
3
+ print(torch.cuda.get_device_name(0))
backend/main.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from fastapi import FastAPI, HTTPException, Request
2
+ # from fastapi.responses import HTMLResponse, JSONResponse
3
+ # from fastapi.templating import Jinja2Templates
4
+ # from fastapi.middleware.cors import CORSMiddleware
5
+ # from pydantic import BaseModel, Field
6
+ # import os
7
+ # from dotenv import load_dotenv
8
+ # import openai
9
+ # from typing import Optional, List
10
+ # import logging
11
+
12
+ # # Configure logging
13
+ # logging.basicConfig(level=logging.INFO)
14
+ # logger = logging.getLogger(__name__)
15
+
16
+ # # Load environment variables
17
+ # load_dotenv()
18
+
19
+ # # Initialize FastAPI app
20
+ # app = FastAPI(title="AI Recipe Assistant")
21
+
22
+ # # Add CORS middleware
23
+ # app.add_middleware(
24
+ # CORSMiddleware,
25
+ # allow_origins=["*"],
26
+ # allow_credentials=True,
27
+ # allow_methods=["*"],
28
+ # allow_headers=["*"],
29
+ # )
30
+
31
+ # # Setup templates
32
+ # templates = Jinja2Templates(directory="templates")
33
+
34
+ # # Configure OpenAI
35
+ # # openai.api_key = os.getenv("OPENAI_API_KEY")
36
+ # # if not openai.api_key:
37
+ # # raise ValueError("OPENAI_API_KEY environment variable is not set")
38
+
39
+ # class RecipeRequest(BaseModel):
40
+ # query: str = Field(..., min_length=1, description="The recipe to generate")
41
+ # diet_preference: Optional[str] = Field(None, description="Dietary preference (e.g., vegetarian, vegan)")
42
+ # cuisine_type: Optional[str] = Field(None, description="Type of cuisine (e.g., Italian, Mexican)")
43
+
44
+ # class Config:
45
+ # schema_extra = {
46
+ # "example": {
47
+ # "query": "chocolate chip cookies",
48
+ # "diet_preference": "vegetarian",
49
+ # "cuisine_type": "italian"
50
+ # }
51
+ # }
52
+
53
+ # class LearningResource(BaseModel):
54
+ # title: str
55
+ # url: str
56
+ # type: str
57
+
58
+ # class RecipeResponse(BaseModel):
59
+ # recipe: str
60
+ # image_url: str
61
+ # learning_resources: List[LearningResource]
62
+
63
+ # # def generate_recipe(query: str, diet_preference: Optional[str] = None, cuisine_type: Optional[str] = None) -> dict:
64
+ # # logger.info(f"Generating recipe for query: {query}, diet: {diet_preference}, cuisine: {cuisine_type}")
65
+
66
+ # # if not query:
67
+ # # raise HTTPException(status_code=400, detail="Recipe query is required")
68
+
69
+ # # # Create a detailed prompt for the recipe
70
+ # # prompt = f"""Create a detailed recipe for {query}"""
71
+ # # if diet_preference:
72
+ # # prompt += f" that is {diet_preference}"
73
+ # # if cuisine_type:
74
+ # # prompt += f" in {cuisine_type} style"
75
+
76
+ # # prompt += """\n\nFormat the recipe in markdown with the following sections:
77
+ # # 1. Brief Description
78
+ # # 2. Ingredients (as a bulleted list)
79
+ # # 3. Instructions (as numbered steps)
80
+ # # 4. Tips (as a bulleted list)
81
+ # # 5. Nutritional Information (as a bulleted list)
82
+
83
+ # # Use markdown formatting like:
84
+ # # - Headers (###)
85
+ # # - Bold text (**)
86
+ # # - Lists (- and 1.)
87
+ # # - Sections (>)
88
+ # # """
89
+
90
+ # # try:
91
+ # # logger.info(f"Sending prompt to OpenAI: {prompt}")
92
+
93
+ # # # Generate recipe text
94
+ # # completion = openai.chat.completions.create(
95
+ # # model="gpt-3.5-turbo",
96
+ # # messages=[
97
+ # # {"role": "system", "content": "You are a professional chef who provides detailed recipes with ingredients, instructions, nutritional information, and cooking tips. Format your responses in markdown."},
98
+ # # {"role": "user", "content": prompt}
99
+ # # ],
100
+ # # temperature=0.7
101
+ # # )
102
+ # # recipe_text = completion.choices[0].message.content
103
+ # # logger.info("Successfully generated recipe text")
104
+
105
+ # # # Generate recipe image
106
+ # # logger.info("Generating recipe image")
107
+ # # image_response = openai.images.generate(
108
+ # # model="dall-e-3",
109
+ # # prompt=f"Professional food photography of {query}, appetizing, high-quality, restaurant style",
110
+ # # n=1,
111
+ # # size="1024x1024"
112
+ # # )
113
+ # # image_url = image_response.data[0].url
114
+ # # logger.info("Successfully generated recipe image")
115
+
116
+ # # # Get learning resources
117
+ # # learning_resources = get_learning_resources(query)
118
+ # # logger.info("Successfully generated learning resources")
119
+
120
+ # # response_data = {
121
+ # # "recipe": recipe_text,
122
+ # # "image_url": image_url,
123
+ # # "learning_resources": learning_resources
124
+ # # }
125
+
126
+ # # return response_data
127
+ # # except Exception as e:
128
+ # # logger.error(f"Error generating recipe: {str(e)}")
129
+ # # raise HTTPException(status_code=500, detail=str(e))
130
+ # def generate_recipe(query: str, diet_preference: Optional[str] = None, cuisine_type: Optional[str] = None) -> dict:
131
+ # logger.info(f"Generating mock recipe for query: {query}, diet: {diet_preference}, cuisine: {cuisine_type}")
132
+
133
+ # mock_recipe = f"""
134
+ # ### {query.title()} Recipe
135
+
136
+ # > **A quick and easy mock recipe!**
137
+
138
+ # #### Ingredients
139
+ # - 1 cup flour
140
+ # - 2 eggs
141
+ # - 1/2 cup milk
142
+ # - Salt to taste
143
+
144
+ # #### Instructions
145
+ # 1. Mix all ingredients.
146
+ # 2. Cook on medium heat.
147
+ # 3. Serve hot.
148
+
149
+ # #### Tips
150
+ # - Use fresh ingredients.
151
+ # - Adjust salt as per taste.
152
+
153
+ # #### Nutritional Info
154
+ # - Calories: ~200
155
+ # - Protein: 5g
156
+ # - Carbs: 30g
157
+ # """
158
+
159
+ # mock_image_url = "https://via.placeholder.com/600x400.png?text=Recipe+Image"
160
+
161
+ # mock_learning_resources = [
162
+ # {
163
+ # "title": "Mock Cooking Basics",
164
+ # "url": "https://example.com/mock-cooking",
165
+ # "type": "video"
166
+ # },
167
+ # {
168
+ # "title": "Mock Recipe Tips",
169
+ # "url": "https://example.com/mock-tips",
170
+ # "type": "article"
171
+ # }
172
+ # ]
173
+
174
+ # return {
175
+ # "recipe": mock_recipe,
176
+ # "image_url": mock_image_url,
177
+ # "learning_resources": mock_learning_resources
178
+ # }
179
+
180
+ # def get_learning_resources(recipe_name: str) -> list:
181
+ # return [
182
+ # {
183
+ # "title": f"Master the Art of {recipe_name}",
184
+ # "url": f"https://cooking-school.example.com/learn/{recipe_name.lower().replace(' ', '-')}",
185
+ # "type": "video"
186
+ # },
187
+ # {
188
+ # "title": f"Tips and Tricks for Perfect {recipe_name}",
189
+ # "url": f"https://recipes.example.com/tips/{recipe_name.lower().replace(' ', '-')}",
190
+ # "type": "article"
191
+ # }
192
+ # ]
193
+
194
+ # @app.post("/recipe", response_model=RecipeResponse)
195
+ # async def get_recipe(request: RecipeRequest):
196
+ # logger.info(f"Received recipe request: {request}")
197
+ # try:
198
+ # result = generate_recipe(request.query, request.diet_preference, request.cuisine_type)
199
+ # logger.info("Successfully generated recipe response")
200
+ # return result
201
+ # except Exception as e:
202
+ # logger.error(f"Error processing recipe request: {str(e)}")
203
+ # return JSONResponse(
204
+ # status_code=500,
205
+ # content={"detail": str(e)}
206
+ # )
207
+
208
+ # @app.get("/", response_class=HTMLResponse)
209
+ # async def root(request: Request):
210
+ # return templates.TemplateResponse("index.html", {"request": request})
211
+
212
+ # if __name__ == "__main__":
213
+ # import uvicorn
214
+ # uvicorn.run(app, host="0.0.0.0", port=8080)
215
+ # import os
216
+ # import requests
217
+ # from fastapi import FastAPI, Request, HTTPException
218
+ # from fastapi.responses import JSONResponse, HTMLResponse
219
+ # from fastapi.staticfiles import StaticFiles
220
+ # from fastapi.templating import Jinja2Templates
221
+ # from pydantic import BaseModel
222
+ # from typing import Optional
223
+ # from dotenv import load_dotenv
224
+
225
+ # # Load environment variables
226
+ # load_dotenv()
227
+
228
+ # app = FastAPI()
229
+
230
+ # # Setup static + templates
231
+ # os.makedirs("static", exist_ok=True)
232
+ # os.makedirs("templates", exist_ok=True)
233
+
234
+ # app.mount("/static", StaticFiles(directory="static"), name="static")
235
+ # templates = Jinja2Templates(directory="templates")
236
+
237
+ # # Hugging Face config
238
+ # TEXT_MODEL = "facebook/bart-large-cnn"
239
+ # HF_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
240
+
241
+ # class RecipeRequest(BaseModel):
242
+ # ingredients: str
243
+ # diet: Optional[str] = None
244
+ # cuisine: Optional[str] = None
245
+
246
+ # @app.get("/", response_class=HTMLResponse)
247
+ # async def home(request: Request):
248
+ # return templates.TemplateResponse("index.html", {"request": request})
249
+
250
+ # @app.post("/api/generate-recipe")
251
+ # async def generate_recipe(request: RecipeRequest):
252
+ # try:
253
+ # # 👨‍🍳 Smart Prompt
254
+ # prompt = f"""
255
+ # You are a professional chef and recipe writer.
256
+ # Create a full, detailed cooking recipe using the following ingredients: {request.ingredients}.
257
+ # {f"Make sure it is suitable for a {request.diet} diet." if request.diet else ""}
258
+ # {f"The recipe should follow {request.cuisine} cuisine style." if request.cuisine else ""}
259
+
260
+ # Format the response in markdown with the following sections:
261
+ # ### Title
262
+ # ### Description
263
+ # ### Ingredients (as a bulleted list)
264
+ # ### Instructions (as numbered steps)
265
+ # ### Tips
266
+ # ### Nutritional Information (if possible)
267
+
268
+ # Be friendly and helpful in tone.
269
+ # """
270
+
271
+ # # Debug logs
272
+ # print("📤 Prompt Sent:", prompt)
273
+ # print("🧠 Model:", TEXT_MODEL)
274
+
275
+ # headers = {"Authorization": f"Bearer {HF_API_KEY}"}
276
+ # payload = {
277
+ # "inputs": prompt,
278
+ # "parameters": {
279
+ # "max_new_tokens": 250,
280
+ # "temperature": 0.8,
281
+ # "do_sample": True
282
+ # }
283
+ # }
284
+
285
+ # # Send to Hugging Face
286
+ # response = requests.post(
287
+ # f"https://api-inference.huggingface.co/models/{TEXT_MODEL}",
288
+ # headers=headers,
289
+ # json=payload,
290
+ # timeout=30
291
+ # )
292
+
293
+ # # Try JSON parse or show raw text
294
+ # try:
295
+ # result = response.json()
296
+ # except Exception as e:
297
+ # print("❌ Could not parse JSON. Raw response:")
298
+ # print(response.text)
299
+ # raise HTTPException(status_code=500, detail="Invalid response from Hugging Face API")
300
+
301
+ # print("✅ HF JSON Response:", result)
302
+
303
+ # # Handle errors or loading message
304
+ # if "error" in result:
305
+ # raise HTTPException(status_code=503, detail=result["error"])
306
+ # if isinstance(result, dict) and "generated_text" in result:
307
+ # generated = result["generated_text"]
308
+ # elif isinstance(result, list) and "generated_text" in result[0]:
309
+ # generated = result[0]["generated_text"]
310
+ # else:
311
+ # raise HTTPException(status_code=500, detail="No recipe generated by the model.")
312
+
313
+ # return {
314
+ # "recipe": generated.strip(),
315
+ # "image_url": "/static/placeholder.jpg"
316
+ # }
317
+
318
+ # except Exception as e:
319
+ # print(f"🔥 Error: {str(e)}")
320
+ # raise HTTPException(status_code=500, detail=str(e))
321
+
322
+
323
+ # if __name__ == "__main__":
324
+ # import uvicorn
325
+ # uvicorn.run(app, host="0.0.0.0", port=8000, reload=True)
326
+
327
+ #uvicorn main:app --reload
328
+ import os
329
+ import shutil
330
+ from fastapi import FastAPI, Request, UploadFile, File, Form
331
+ from fastapi.responses import JSONResponse, HTMLResponse
332
+ from fastapi.staticfiles import StaticFiles
333
+ from fastapi.templating import Jinja2Templates
334
+ from fastapi.middleware.cors import CORSMiddleware
335
+ from langchain.chains import RetrievalQA
336
+ from langchain_community.document_loaders import TextLoader
337
+ from langchain_community.vectorstores import FAISS
338
+ from langchain_community.embeddings import HuggingFaceEmbeddings
339
+ from langchain.prompts import PromptTemplate
340
+ from langchain_community.llms import HuggingFacePipeline
341
+ from transformers import pipeline
342
+ from dotenv import load_dotenv
343
+ from PyPDF2 import PdfReader
344
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
345
+ from typing import Optional
346
+ import torch
347
+
348
+ load_dotenv()
349
+
350
+ app = FastAPI()
351
+
352
+ app.add_middleware(
353
+ CORSMiddleware,
354
+ allow_origins=["*"],
355
+ allow_credentials=True,
356
+ allow_methods=["*"],
357
+ allow_headers=["*"],
358
+ )
359
+
360
+ templates = Jinja2Templates(directory="frontend/templates")
361
+ app.mount("/static", StaticFiles(directory="frontend/static"), name="static")
362
+
363
+ # ========== Model & LLM Setup ==========
364
+ device = 0 if torch.cuda.is_available() else -1
365
+ print(f"🔥 Using device: {'GPU' if device==0 else 'CPU'}")
366
+
367
+ llm_pipeline = pipeline(
368
+ "text-generation",
369
+ model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
370
+ max_new_tokens=512,
371
+ temperature=0.7,
372
+ device=device
373
+ )
374
+
375
+ llm = HuggingFacePipeline(pipeline=llm_pipeline)
376
+
377
+ DB_PATH = "vector_store"
378
+ EMBED_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
379
+ embedding = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
380
+
381
+ # ========== Prompt Templates ==========
382
+ non_rag_prompt = """
383
+ You are an expert Indian home chef and AI assistant.
384
+ Generate a single, detailed, easy-to-follow recipe based only on the query.
385
+ Output must be in **Markdown** format with clear sections:
386
+ - Ingredients
387
+ - Method
388
+ - Nutritional Info
389
+ - Cooking Tips
390
+
391
+ Be friendly and professional. Stop after the recipe.
392
+
393
+ Query: {query}
394
+ """
395
+
396
+ rag_prompt_template = PromptTemplate(
397
+ input_variables=["context", "question"],
398
+ template="""
399
+ You are an expert Indian home chef and AI assistant.
400
+
401
+ You are given some cooking knowledge from the user's personal recipe notes in <context>.
402
+ Use only the recipes from <context> that match the user's question.
403
+
404
+ STRICTLY FOLLOW MARKDOWN FORMAT.
405
+
406
+ ✅ If the recipe exists in <context>, use it.
407
+ ✅ If it needs improvement, create an improved version.
408
+ ✅ If it does not exist, create a new one.
409
+ ❌ DO NOT include unrelated recipes.
410
+
411
+ Always include: Ingredients, Method, Nutritional Info, Cooking Tips.
412
+
413
+ <context>
414
+ {context}
415
+ </context>
416
+
417
+ <user_question>
418
+ {question}
419
+ </user_question>
420
+
421
+ <response>
422
+ """
423
+ )
424
+
425
+ # ========== Helper Functions ==========
426
+ def extract_non_rag_output(full_text: str) -> str:
427
+ marker = "Ingredients:"
428
+ idx = full_text.find(marker)
429
+ return full_text[idx:].strip() if idx != -1 else full_text.strip()
430
+
431
+ def extract_rag_output(full_text: str) -> str:
432
+ marker = "Generate a recipe for:"
433
+ idx = full_text.find(marker)
434
+ if idx != -1:
435
+ return full_text[idx:].strip()
436
+ marker2 = "Ingredients:"
437
+ idx2 = full_text.find(marker2)
438
+ return full_text[idx2:].strip() if idx2 != -1 else full_text.strip()
439
+
440
+ # ========== Routes ==========
441
+ @app.get("/", response_class=HTMLResponse)
442
+ async def serve_home(request: Request):
443
+ print("✅ Serving index.html")
444
+ return templates.TemplateResponse("index.html", {"request": request})
445
+
446
+ @app.post("/upload")
447
+ async def upload_recipe_file(file: UploadFile = File(...)):
448
+ if file.filename.endswith(".txt") or file.filename.endswith(".pdf"):
449
+ save_path = f"uploaded_files/{file.filename}"
450
+ os.makedirs("uploaded_files", exist_ok=True)
451
+ with open(save_path, "wb") as buffer:
452
+ shutil.copyfileobj(file.file, buffer)
453
+
454
+ loader = TextLoader(save_path)
455
+ documents = loader.load()
456
+
457
+ db = FAISS.from_documents(documents, embedding)
458
+ db.save_local(DB_PATH)
459
+
460
+ return {"message": "File uploaded and processed successfully."}
461
+ else:
462
+ return JSONResponse(status_code=400, content={"error": "Only .txt or .pdf files allowed."})
463
+
464
+ @app.post("/api/generate-recipe") # NON-RAG
465
+ async def generate_recipe(
466
+ ingredients: str = Form(...),
467
+ diet: Optional[str] = Form("Any"),
468
+ cuisine: Optional[str] = Form("Any")
469
+ ):
470
+ try:
471
+ query = f"Give me a recipe using these ingredients: {ingredients}. Diet: {diet}, Cuisine: {cuisine}."
472
+ response = llm.invoke(non_rag_prompt.format(query=query))
473
+ cleaned_response = extract_non_rag_output(response)
474
+
475
+ return {
476
+ "recipe": cleaned_response,
477
+ "image_url": "/static/placeholder.jpg"
478
+ }
479
+
480
+ except Exception as e:
481
+ print("❌ Non-RAG failed:", e)
482
+ return JSONResponse(status_code=500, content={"detail": "Internal Server Error"})
483
+
484
+ @app.post("/api/rag-recipe") # RAG
485
+ async def rag_recipe(
486
+ ingredients: str = Form(...),
487
+ diet: Optional[str] = Form("Any"),
488
+ cuisine: Optional[str] = Form("Any"),
489
+ file: Optional[UploadFile] = File(None)
490
+ ):
491
+ try:
492
+ query = f"Generate a recipe for: {ingredients}. Diet: {diet}. Cuisine style: {cuisine}."
493
+
494
+ # Check if we have a file upload or need to use existing DB
495
+ if file:
496
+ extracted_text = ""
497
+ if file.content_type == "application/pdf":
498
+ pdf = PdfReader(file.file)
499
+ extracted_text = "\n".join(page.extract_text() or "" for page in pdf.pages)
500
+ elif file.content_type == "text/plain":
501
+ extracted_text = (await file.read()).decode("utf-8")
502
+ else:
503
+ return JSONResponse(status_code=400, content={"detail": "Only .txt and .pdf supported"})
504
+
505
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
506
+ docs = text_splitter.create_documents([extracted_text])
507
+ vector_store = FAISS.from_documents(docs, embedding)
508
+ else:
509
+ if not os.path.exists(DB_PATH):
510
+ return JSONResponse(
511
+ status_code=400,
512
+ content={"detail": "No recipe database found. Please upload a file first."}
513
+ )
514
+ vector_store = FAISS.load_local(DB_PATH, embedding, allow_dangerous_deserialization=True)
515
+
516
+ retriever = vector_store.as_retriever(search_type="mmr", k=1)
517
+
518
+ qa_chain = RetrievalQA.from_chain_type(
519
+ llm=llm,
520
+ retriever=retriever,
521
+ chain_type="stuff",
522
+ chain_type_kwargs={"prompt": rag_prompt_template}
523
+ )
524
+
525
+ # Changed from "question" to "query" to match the prompt template
526
+ result = qa_chain.invoke({"query": query})["result"]
527
+ cleaned_result = extract_rag_output(result)
528
+
529
+ return {
530
+ "recipe": cleaned_result,
531
+ "image_url": "/static/placeholder.jpg"
532
+ }
533
+
534
+ except Exception as e:
535
+ print("❌ RAG failed:", e)
536
+ return JSONResponse(status_code=500, content={"detail": str(e)})
537
+
538
+ if __name__ == "__main__":
539
+ import uvicorn
540
+ uvicorn.run("backend.main:app", host="0.0.0.0", port=8080, reload=True)
541
+
542
+
543
+ # uvicorn backend.main:app --reload --port 8008
backend/requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi>=0.68.0
2
+ uvicorn>=0.15.0
3
+ python-dotenv>=0.19.0
4
+ openai>=1.0.0
5
+ jinja2>=3.0.1
6
+ python-multipart>=0.0.5
7
+ requests>=2.26.0
8
+ pydantic>=2.0.0
backend/static/attach-file.png ADDED
backend/static/placeholder.jpg ADDED
backend/temp.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import os
2
+ # from fastapi import FastAPI, Request, HTTPException
3
+ # from fastapi.responses import HTMLResponse, JSONResponse
4
+ # from fastapi.staticfiles import StaticFiles
5
+ # from fastapi.templating import Jinja2Templates
6
+
7
+ # app = FastAPI()
8
+
9
+ # # Configure paths
10
+ # BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
11
+ # STATIC_DIR = os.path.join(BASE_DIR, "frontend", "static")
12
+ # TEMPLATE_DIR = os.path.join(BASE_DIR, "frontend", "templates")
13
+
14
+ # # Debug paths
15
+ # print(f"Static files directory: {STATIC_DIR} (exists: {os.path.exists(STATIC_DIR)})")
16
+ # print(f"Templates directory: {TEMPLATE_DIR} (exists: {os.path.exists(TEMPLATE_DIR)})")
17
+
18
+ # app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
19
+ # templates = Jinja2Templates(directory=TEMPLATE_DIR)
20
+
21
+ # @app.get("/", response_class=HTMLResponse)
22
+ # async def home(request: Request):
23
+ # return templates.TemplateResponse("index.html", {"request": request})
24
+
25
+ # # Add API endpoint for recipe generation
26
+ # @app.post("/api/generate-recipe")
27
+ # async def generate_recipe(request: Request):
28
+ # try:
29
+ # data = await request.json()
30
+ # ingredients = data.get("ingredients", "")
31
+ # diet = data.get("diet")
32
+ # cuisine = data.get("cuisine")
33
+
34
+ # # Here you would add your actual recipe generation logic
35
+ # # For now, returning a mock response
36
+ # return JSONResponse({
37
+ # "recipe": f"Mock recipe using: {ingredients}\n\n1. Do something\n2. Then another step\n3. Serve hot!",
38
+ # "image_url": "https://via.placeholder.com/600x400?text=Recipe+Image"
39
+ # })
40
+ # except Exception as e:
41
+ # raise HTTPException(status_code=400, detail=str(e))
42
+
43
+
44
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
45
+ from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
46
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
47
+
48
+ model_id = "mistralai/Mistral-7B-Instruct-v0.3"
49
+
50
+ # Load tokenizer + model
51
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
52
+ model = AutoModelForCausalLM.from_pretrained(
53
+ model_id,
54
+ device_map="auto",
55
+ torch_dtype="auto"
56
+ )
57
+
58
+ # Create text-generation pipeline
59
+ pipe = pipeline(
60
+ "text-generation",
61
+ model=model,
62
+ tokenizer=tokenizer,
63
+ max_new_tokens=200,
64
+ do_sample=True,
65
+ temperature=0.7
66
+ )
67
+
68
+ llm = HuggingFacePipeline(pipeline=pipe)
69
+ modell = ChatHuggingFace(llm = llm)
70
+
71
+ # Wrap in LangChain Chat Model
72
+
73
+ # Prompt Template
74
+ prompt = ChatPromptTemplate.from_messages([
75
+ ("system", "You are a wise assistant reply quickly to users prompt."),
76
+ MessagesPlaceholder("history"),
77
+ ("user", "{input}")
78
+ ])
79
+
80
+ history = []
81
+
82
+ while True:
83
+ user_input = input("You: ")
84
+ if user_input in ["stop", "exit"]:
85
+ break
86
+
87
+ chain_input = {
88
+ "input": user_input,
89
+ "history": history
90
+ }
91
+
92
+ # Generate answer
93
+ response = modell.invoke(prompt.invoke(chain_input))
94
+
95
+ print("AI:", response.content)
96
+
97
+ # Maintain chat memory
98
+ history.append(("user", user_input))
99
+ history.append(("assistant", response.content))
backend/test_hugf.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import os
2
+ # import requests
3
+ # from pathlib import Path
4
+ # from dotenv import load_dotenv
5
+
6
+ # # 1. Find and load the .env file - searches for ANY of these names
7
+ # ENV_FILES = ['.env', '.env.example', 'env.example', 'env.txt']
8
+ # loaded = False
9
+ # for env_file in ENV_FILES:
10
+ # if Path(env_file).exists():
11
+ # load_dotenv(env_file)
12
+ # loaded = True
13
+ # print(f"✅ Loaded environment from: {env_file}")
14
+ # break
15
+
16
+ # if not loaded:
17
+ # print("❌ No .env file found! Please create one with your API key")
18
+ # print(" File should contain either:")
19
+ # print(" HUGGINGFACE_API_KEY=your_api_key_here")
20
+ # print(" OR")
21
+ # print(" HF_API_KEY=your_api_key_here")
22
+ # exit(1)
23
+
24
+ # # 2. Find the API key - checks ALL possible variable names
25
+ # API_KEYS = ['HUGGINGFACE_API_KEY', 'HF_API_KEY', 'API_KEY']
26
+ # api_key = None
27
+ # for key in API_KEYS:
28
+ # api_key = os.getenv(key)
29
+ # if api_key:
30
+ # print(f"🔑 Found API key in variable: {key}")
31
+ # print(f" Key starts with: {api_key[:8]}...")
32
+ # break
33
+
34
+ # if not api_key:
35
+ # print("❌ No API key found in environment variables!")
36
+ # print(" Your .env file should contain:")
37
+ # print(" HUGGINGFACE_API_KEY=your_actual_key_here")
38
+ # print(" OR")
39
+ # print(" HF_API_KEY=your_actual_key_here")
40
+ # exit(1)
41
+
42
+ # # 3. Test with a GUARANTEED small model
43
+ # print("\n🚀 Testing API connection with small model (gpt2)...")
44
+ # try:
45
+ # response = requests.post(
46
+ # "https://api-inference.huggingface.co/models/gpt2",
47
+ # headers={"Authorization": f"Bearer {api_key}"},
48
+ # json={"inputs": "Just say 'API is working'"},
49
+ # timeout=10
50
+ # )
51
+
52
+ # if response.status_code == 200:
53
+ # print("🎉 SUCCESS! API Response:")
54
+ # print(response.json()[0]['generated_text'])
55
+ # else:
56
+ # print(f"❌ API Error (Status {response.status_code}):")
57
+ # print(response.text)
58
+ # print("\n🔧 Solutions:")
59
+ # print("- Wait 1 minute and try again (model may be loading)")
60
+ # print("- Check token at: https://huggingface.co/settings/tokens")
61
+ # print("- Try a different small model")
62
+
63
+ # except Exception as e:
64
+ # print(f"🚨 Connection failed: {str(e)}")
65
+ # print("\n🔧 Check your internet connection and try again")
66
+ # import os
67
+ # import requests
68
+ # from dotenv import load_dotenv
69
+ # import time
70
+
71
+ # # Load environment
72
+ # load_dotenv()
73
+ # API_KEY = os.getenv("HUGGINGFACE_API_KEY")
74
+
75
+ # # Verified working models (2025)
76
+ # MODELS = [
77
+ # "gpt2", # Always available
78
+ # "distilgpt2", # More responsive
79
+ # "facebook/opt-350m", # Structured responses
80
+ # "google/flan-t5-small", # Best for instructions
81
+ # "bert-base-uncased", # Embeddings
82
+ # "deepset/roberta-base-squad2", # QA
83
+ # "distilbert-base-uncased-finetuned-sst-2-english", # Sentiment
84
+ # "nlpconnect/vit-gpt2-image-captioning", # Multimodal
85
+ # "microsoft/DialoGPT-medium", # Chat
86
+ # "Jean-Baptiste/camembert-ner" # NER
87
+ # ]
88
+
89
+ # def test_model(model: str):
90
+ # headers = {"Authorization": f"Bearer {API_KEY}"}
91
+ # test_prompts = {
92
+ # "text-gen": "Respond only with: API_TEST_OK",
93
+ # "embedding": "Test sentence for embeddings",
94
+ # "qa": {"inputs": {"question": "Test?", "context": "Testing"}},
95
+ # "multimodal": {"image": "https://example.com/test.jpg"}
96
+ # }
97
+
98
+ # try:
99
+ # # Get model type from Hugging Face API
100
+ # model_info = requests.get(
101
+ # f"https://huggingface.co/api/models/{model}",
102
+ # timeout=10
103
+ # ).json()
104
+
105
+ # # Select appropriate test
106
+ # pipeline_tags = model_info.get("pipeline_tags", ["unknown"])
107
+ # print(f"\n🚀 Testing {model} ({pipeline_tags})...")
108
+
109
+ # if "text-generation" in pipeline_tags:
110
+ # prompt = test_prompts["text-gen"]
111
+ # elif "feature-extraction" in pipeline_tags:
112
+ # prompt = test_prompts["embedding"]
113
+ # else:
114
+ # prompt = test_prompts["text-gen"] # Default
115
+
116
+ # response = requests.post(
117
+ # f"https://api-inference.huggingface.co/models/{model}",
118
+ # headers=headers,
119
+ # json={"inputs": prompt},
120
+ # timeout=30
121
+ # )
122
+
123
+ # if response.status_code == 200:
124
+ # print(f"✅ Working! Response: {response.json()}")
125
+ # return True
126
+ # else:
127
+ # print(f"❌ Failed (Status {response.status_code}): {response.text[:200]}...")
128
+ # return False
129
+
130
+ # except Exception as e:
131
+ # print(f"🚨 Error testing {model}: {str(e)}")
132
+ # return False
133
+
134
+ # def main():
135
+ # print("🔍 Hugging Face Full Model Test (2025)")
136
+ # print("="*60)
137
+
138
+ # if not API_KEY:
139
+ # print("❌ Error: Set HUGGINGFACE_API_KEY in .env file")
140
+ # return
141
+
142
+ # for model in MODELS:
143
+ # for attempt in range(3): # 3 retries
144
+ # if test_model(model):
145
+ # break
146
+ # time.sleep(10) # Wait between attempts
147
+
148
+ # if __name__ == "__main__":
149
+ # main()
150
+
151
+ import requests
152
+ from dotenv import load_dotenv
153
+ import os
154
+ import time
155
+
156
+ # Load environment variables
157
+ load_dotenv()
158
+ API_KEY = os.getenv("HUGGINGFACE_API_KEY")
159
+
160
+ MODEL = "google/flan-t5-large" # Google FLAN-T5 Small model
161
+
162
+ def search_and_answer(prompt: str) -> str:
163
+ """
164
+ Sends a query to the Hugging Face FLAN-T5 Small model and retrieves an answer.
165
+
166
+ :param prompt: The user query to process and generate an answer.
167
+ :return: The generated answer.
168
+ """
169
+ headers = {"Authorization": f"Bearer {API_KEY}"}
170
+
171
+ try:
172
+ # Make a request to Hugging Face FLAN-T5 Small model
173
+ response = requests.post(
174
+ f"https://api-inference.huggingface.co/models/{MODEL}",
175
+ headers=headers,
176
+ json={"inputs": prompt},
177
+ timeout=30
178
+ )
179
+
180
+ # Check response status
181
+ if response.status_code == 200:
182
+ result = response.json()
183
+ generated_text = result[0].get("generated_text", "No answer generated.")
184
+ return f"Answer: {generated_text}"
185
+ else:
186
+ return f"Error: Unable to fetch answer (Status: {response.status_code})"
187
+
188
+ except Exception as e:
189
+ return f"Error: {str(e)}"
190
+
191
+ def main():
192
+ print("🔍 Query Answering Tool Using Google FLAN-T5 Small")
193
+ print("=" * 50)
194
+
195
+ if not API_KEY:
196
+ print("❌ Error: Set HUGGINGFACE_API_KEY in .env file")
197
+ return
198
+
199
+ # Get user input for the query
200
+ user_query = input("Enter your query: ")
201
+
202
+ # Get the answer using the model
203
+ answer = search_and_answer(user_query)
204
+
205
+ print("\n🚀 Result:")
206
+ print(answer)
207
+
208
+ if __name__ == "__main__":
209
+ main()
frontend/static/attach-file.png ADDED
frontend/static/placeholder.jpg ADDED
frontend/templates/index.html ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <title>NPChef</title>
7
+ <link rel="icon" href="https://img.icons8.com/ios-filled/100/meal.png" type="image/png">
8
+
9
+ <!-- Optional: For better display on mobile homescreens -->
10
+ <link rel="apple-touch-icon" href="https://img.icons8.com/ios-filled/100/meal.png">
11
+ <meta name="theme-color" content="#FF8C42">
12
+ <script src="https://cdn.tailwindcss.com"></script>
13
+ </head>
14
+ <body class="bg-orange-50 text-gray-800">
15
+ <!-- Hero Section -->
16
+ <!-- <section class="bg-gradient-to-r from-orange-400 to-orange-500 text-white py-20 pt-20 text-center">
17
+ <h1 class="text-5xl font-bold mb-4">AI Recipe Generator</h1>
18
+ <p class="text-lg">Enter your ingredients below and get amazing recipes!</p>
19
+ </section> -->
20
+ <section class="text-center mb-10 bg-gradient-to-r from-orange-400 to-orange-500 text-white py-10 text-center">
21
+ <img src="https://img.icons8.com/ios-filled/100/meal.png" alt="logo" class="mx-auto w-16 h-16 mb-2">
22
+ <h1 class="text-4xl font-extrabold tracking-tight text-white">NPChef Recipe Assistant</h1>
23
+ <p class="text-sm text-white font-bold mt-2">NPChef Recipe Assistant for NPCs = Non-Player Cooks 😉 </p>
24
+ <!-- <p class="text-sm text-white font-bold mt-2"> Enter your ingredients below and get amazing recipes!</p> -->
25
+ </section>
26
+
27
+ <!-- Input Section -->
28
+ <section class="max-w-3xl mx-auto py-10 px-4">
29
+ <h2 class="text-2xl text-orange-400 font-bold mb-4">Enter your ingredients below and get amazing recipes!</h2>
30
+ <p class="text-gray-600 mb-6">NOTE! This service is hosted on an open source server and uses CPU and allow limited resources,so the responses may be wrong and it may take upto 2 minutes to generate the recipe.</p>
31
+ <form id="recipeForm" class="space-y-4">
32
+ <div class="relative">
33
+ <textarea
34
+ id="ingredientsInput"
35
+ class="w-full p-4 rounded-lg shadow border border-orange-300 focus:ring-2 focus:ring-orange-400"
36
+ rows="4"
37
+ placeholder="e.g. rice, tomato, onion, chicken or just the name of the dish you want to make"
38
+ ></textarea>
39
+ <label for="recipeFile" class="absolute bottom-3 right-3 cursor-pointer">
40
+ <img src="/static/attach-file.png" class="w-6 h-6" alt="Upload" title="Upload your recipe file">
41
+ </label>
42
+ <input type="file" id="recipeFile" name="recipeFile" accept=".txt,.pdf" class="hidden" />
43
+ </div>
44
+
45
+
46
+
47
+ <!-- Info-only Box -->
48
+ <strong class="text-orange-500">New</strong><br>
49
+ <div class="relative max-w-3xl border border-dashed border-orange-400 rounded p-3 bg-orange-50 text-sm text-gray-700" style="margin-top: 0.5rem;" id="uploadInfoBox">
50
+ <!-- Tail triangle -->
51
+ <div class="absolute -top-2 right-5 w-0 h-0 border-l-[6px] border-l-transparent border-r-[6px] border-r-transparent border-b-[8px] border-b-orange-400"></div>
52
+
53
+
54
+ Got your or your mom’s secret recipes? Upload and get detailed versions!
55
+ </div>
56
+ <!-- Hidden File Input -->
57
+ <input type="file" id="recipeFile" name="recipeFile" accept=".txt,.pdf" class="hidden" />
58
+ <p id="uploadMessage" class="text-sm mt-2 text-green-600 hidden"></p>
59
+ </div>
60
+
61
+ <div class="flex gap-4">
62
+ <button
63
+ type="submit"
64
+ class="bg-orange-500 hover:bg-orange-600 text-white px-6 py-2 rounded-lg shadow"
65
+ >
66
+ Generate Recipe
67
+ </button>
68
+ <button
69
+ type="button"
70
+ onclick="clearInput()"
71
+ class="bg-white text-orange-500 border border-orange-500 hover:bg-orange-100 px-6 py-2 rounded-lg shadow"
72
+ >
73
+ Clear
74
+ </button>
75
+ </div>
76
+
77
+ <!-- Dietary Preferences & Cuisine Type -->
78
+ <div class="mt-6 grid gap-4 md:grid-cols-2">
79
+ <div>
80
+ <label for="diet" class="block font-medium mb-1">Dietary Preference</label>
81
+ <select id="diet" class="w-full p-3 rounded-lg border border-orange-300">
82
+ <option value="">Any</option>
83
+ <option value="vegetarian">Vegetarian</option>
84
+ <option value="vegan">Vegan</option>
85
+ <option value="gluten-free">Gluten-Free</option>
86
+ </select>
87
+ </div>
88
+ <div>
89
+ <label for="cuisine" class="block font-medium mb-1">Cuisine Type</label>
90
+ <select id="cuisine" class="w-full p-3 rounded-lg border border-orange-300">
91
+ <option value="">Any</option>
92
+ <option value="indian">Indian</option>
93
+ <option value="italian">Italian</option>
94
+ <option value="mexican">Mexican</option>
95
+ </select>
96
+ </div>
97
+ </div>
98
+ </form>
99
+
100
+ <!-- Most Used Ingredients -->
101
+ <div class="mt-6">
102
+ <h2 class="text-xl font-semibold mb-2">Most Used Ingredients</h2>
103
+ <div class="flex flex-wrap gap-2">
104
+ <button onclick="fillIngredient('rice')" class="bg-orange-200 hover:bg-orange-300 px-3 py-1 rounded-full">rice</button>
105
+ <button onclick="fillIngredient('onion')" class="bg-orange-200 hover:bg-orange-300 px-3 py-1 rounded-full">onion</button>
106
+ <button onclick="fillIngredient('chicken')" class="bg-orange-200 hover:bg-orange-300 px-3 py-1 rounded-full">chicken</button>
107
+ <button onclick="fillIngredient('garlic')" class="bg-orange-200 hover:bg-orange-300 px-3 py-1 rounded-full">garlic</button>
108
+ <button onclick="fillIngredient('milk')" class="bg-orange-200 hover:bg-orange-300 px-3 py-1 rounded-full">milk</button>
109
+ <button onclick="fillIngredient('bread')" class="bg-orange-200 hover:bg-orange-300 px-3 py-1 rounded-full">bread</button>
110
+ </div>
111
+ </div>
112
+ </section>
113
+
114
+ <!-- Recipe Output Section -->
115
+ <section id="recipeOutput" class="max-w-4xl mx-auto px-4 pb-20">
116
+ <!-- Recipes will appear here -->
117
+ </section>
118
+
119
+ <!-- Footer -->
120
+ <footer class="bg-orange-100 text-center py-9 pt-9 text-sm text-orange-700 mt-20" >
121
+ <!-- Made with ❤️ using AI. Never waste food. -->
122
+ <p class="flex items-center justify-center space-x-1">
123
+ <span>Made with ❤️</span>
124
+ <!-- <svg class="w-5 h-5 text-red-500" fill="currentColor" viewBox="0 0 20 20">
125
+ <path fill-rule="evenodd" d="M3.172 5.172a4 4 0 015.656 0L10 6.343l1.172-1.171a4 4 0 115.656 5.656L10 17.657l-6.828-6.829a4 4 0 010-5.656z" clip-rule="evenodd" />
126
+ </svg> -->
127
+ <span>by <a href="https://mehul-raul.github.io/mehul.dev.portfolio/" class="text-orange-600 hover:text-orange-800 font-medium" target="_blank" rel="noopener noreferrer">Mehul Raul</a></span>
128
+ <span>|</span>
129
+ <span>Never waste food:)</span>
130
+ </p>
131
+ </footer>
132
+
133
+ <script>
134
+ const form = document.getElementById("recipeForm");
135
+ const ingredientsInput = document.getElementById("ingredientsInput");
136
+ const output = document.getElementById("recipeOutput");
137
+ const dietSelect = document.getElementById("diet");
138
+ const cuisineSelect = document.getElementById("cuisine");
139
+ const fileInput = document.getElementById("recipeFile");
140
+
141
+ function fillIngredient(text) {
142
+ ingredientsInput.value = ingredientsInput.value
143
+ ? ingredientsInput.value + ", " + text
144
+ : text;
145
+ ingredientsInput.focus();
146
+ }
147
+
148
+ function clearInput() {
149
+ ingredientsInput.value = "";
150
+ ingredientsInput.focus();
151
+ fileInput.value = "";
152
+
153
+ const uploadMessage = document.getElementById("uploadMessage");
154
+ if (uploadMessage) {
155
+ uploadMessage.textContent = "";
156
+ uploadMessage.classList.add("hidden");
157
+ }
158
+ }
159
+
160
+ document.getElementById("recipeFile").addEventListener("change", function () {
161
+ const file = this.files[0];
162
+ const message = document.getElementById("uploadMessage");
163
+
164
+ if (!file) {
165
+ message.textContent = "No file selected.";
166
+ message.classList.remove("hidden", "text-green-600");
167
+ message.classList.add("text-red-600");
168
+ return;
169
+ }
170
+
171
+ const allowedTypes = ["text/plain", "application/pdf"];
172
+ if (!allowedTypes.includes(file.type)) {
173
+ message.textContent = "❌ Please upload a .txt or .pdf file only!";
174
+ message.classList.remove("hidden", "text-green-600");
175
+ message.classList.add("text-red-600");
176
+ return;
177
+ }
178
+
179
+ message.textContent = `✅ File "${file.name}" uploaded successfully!`;
180
+ message.classList.remove("hidden", "text-red-600");
181
+ message.classList.add("text-green-600");
182
+ });
183
+
184
+ form.addEventListener("submit", async (e) => {
185
+ e.preventDefault();
186
+ output.innerHTML = `
187
+ <div class='text-center py-6'>
188
+ <div class='inline-block animate-spin rounded-full h-8 w-8 border-t-2 border-b-2 border-orange-500'></div>
189
+ <p class='mt-2 text-orange-500'>Generating your recipe (please wait, this may take up to 2 minutes)...</p>
190
+ </div>`;
191
+
192
+ const formData = new FormData();
193
+ formData.append("ingredients", ingredientsInput.value.trim());
194
+ formData.append("diet", dietSelect.value || "");
195
+ formData.append("cuisine", cuisineSelect.value || "");
196
+
197
+ let endpoint = "/api/generate-recipe"; // default: non-RAG
198
+ if (fileInput.files.length > 0) {
199
+ formData.append("file", fileInput.files[0]);
200
+ endpoint = "/api/rag-recipe"; // use RAG if file uploaded
201
+ }
202
+
203
+ try {
204
+ const response = await fetch(endpoint, {
205
+ method: "POST",
206
+ body: formData,
207
+ });
208
+
209
+ if (!response.ok) {
210
+ const error = await response.json();
211
+ throw new Error(error.detail || "Recipe generation failed");
212
+ }
213
+
214
+ const data = await response.json();
215
+ const formattedRecipe = data.recipe.replace(/\n/g, "<br>");
216
+
217
+ output.innerHTML = `
218
+ <div class="bg-white shadow rounded-lg overflow-hidden">
219
+ <img src="${data.image_url}" alt="Recipe Image" class="w-full h-48 object-cover" onerror="this.src='/docs/static/placeholder.jpg'">
220
+ <div class="p-6">
221
+ <h3 class="text-2xl font-bold mb-4">Your Generated Recipe</h3>
222
+ <div class="prose max-w-none">${formattedRecipe}</div>
223
+ </div>
224
+ </div>
225
+ `;
226
+ } catch (error) {
227
+ output.innerHTML = `
228
+ <div class="bg-red-50 border-l-4 border-red-500 p-4">
229
+ <div class="flex">
230
+ <div class="flex-shrink-0">
231
+ <svg class="h-5 w-5 text-red-500" viewBox="0 0 20 20" fill="currentColor">
232
+ <path fill-rule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM8.707 7.293a1 1 0 00-1.414 1.414L8.586 10l-1.293 1.293a1 1 0 101.414 1.414L10 11.414l1.293 1.293a1 1 0 001.414-1.414L11.414 10l1.293-1.293a1 1 0 00-1.414-1.414L10 8.586 8.707 7.293z" clip-rule="evenodd" />
233
+ </svg>
234
+ </div>
235
+ <div class="ml-3">
236
+ <p class="text-sm text-red-700">${error.message}</p>
237
+ </div>
238
+ </div>
239
+ </div>`;
240
+ console.error("Error:", error);
241
+ }
242
+ });
243
+ </script>
244
+
245
+
246
+
247
+ </body>
248
+ </html>